1/*	$NetBSD: aic79xx.c,v 1.44 2009/09/03 14:52:22 tsutsui Exp $	*/
2
3/*
4 * Core routines and tables shareable across OS platforms.
5 *
6 * Copyright (c) 1994-2002 Justin T. Gibbs.
7 * Copyright (c) 2000-2003 Adaptec Inc.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions, and the following disclaimer,
15 *    without modification.
16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17 *    substantially similar to the "NO WARRANTY" disclaimer below
18 *    ("Disclaimer") and any redistribution must be conditioned upon
19 *    including a substantially similar Disclaimer requirement for further
20 *    binary redistribution.
21 * 3. Neither the names of the above-listed copyright holders nor the names
22 *    of any contributors may be used to endorse or promote products derived
23 *    from this software without specific prior written permission.
24 *
25 * Alternatively, this software may be distributed under the terms of the
26 * GNU General Public License ("GPL") version 2 as published by the Free
27 * Software Foundation.
28 *
29 * NO WARRANTY
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
38 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
39 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGES.
41 *
42 * Id: //depot/aic7xxx/aic7xxx/aic79xx.c#202 $
43 *
44 * $FreeBSD: src/sys/dev/aic7xxx/aic79xx.c,v 1.24 2003/06/28 04:46:54 gibbs Exp $
45 */
46/*
47 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc.
48 * - April 2003
49 */
50
51#include <sys/cdefs.h>
52__KERNEL_RCSID(0, "$NetBSD: aic79xx.c,v 1.44 2009/09/03 14:52:22 tsutsui Exp $");
53
54#include <dev/ic/aic79xx_osm.h>
55#include <dev/ic/aic79xx_inline.h>
56#include <dev/ic/aic7xxx_cam.h>
57
58#include <dev/microcode/aic7xxx/aicasm.h>
59#include <dev/microcode/aic7xxx/aicasm_insformat.h>
60
61
62/******************************** Globals *************************************/
63struct ahd_softc_tailq ahd_tailq = TAILQ_HEAD_INITIALIZER(ahd_tailq);
64
65/***************************** Lookup Tables **********************************/
66const char *ahd_chip_names[] =
67{
68	"NONE",
69	"aic7901",
70	"aic7902",
71	"aic7901A"
72};
73
74/*
75 * Hardware error codes.
76 */
77struct ahd_hard_error_entry {
78	uint8_t errno;
79	const char *errmesg;
80};
81
82static struct ahd_hard_error_entry ahd_hard_errors[] = {
83	{ DSCTMOUT,	"Discard Timer has timed out" },
84	{ ILLOPCODE,	"Illegal Opcode in sequencer program" },
85	{ SQPARERR,	"Sequencer Parity Error" },
86	{ DPARERR,	"Data-path Parity Error" },
87	{ MPARERR,	"Scratch or SCB Memory Parity Error" },
88	{ CIOPARERR,	"CIOBUS Parity Error" },
89};
90static const u_int num_errors = NUM_ELEMENTS(ahd_hard_errors);
91
92static struct ahd_phase_table_entry ahd_phase_table[] =
93{
94	{ P_DATAOUT,	MSG_NOOP,		"in Data-out phase"	},
95	{ P_DATAIN,	MSG_INITIATOR_DET_ERR,	"in Data-in phase"	},
96	{ P_DATAOUT_DT,	MSG_NOOP,		"in DT Data-out phase"	},
97	{ P_DATAIN_DT,	MSG_INITIATOR_DET_ERR,	"in DT Data-in phase"	},
98	{ P_COMMAND,	MSG_NOOP,		"in Command phase"	},
99	{ P_MESGOUT,	MSG_NOOP,		"in Message-out phase"	},
100	{ P_STATUS,	MSG_INITIATOR_DET_ERR,	"in Status phase"	},
101	{ P_MESGIN,	MSG_PARITY_ERROR,	"in Message-in phase"	},
102	{ P_BUSFREE,	MSG_NOOP,		"while idle"		},
103	{ 0,		MSG_NOOP,		"in unknown phase"	}
104};
105
106/*
107 * In most cases we only wish to itterate over real phases, so
108 * exclude the last element from the count.
109 */
110static const u_int num_phases = NUM_ELEMENTS(ahd_phase_table) - 1;
111
112/* Our Sequencer Program */
113#include <dev/microcode/aic7xxx/aic79xx_seq.h>
114
115/**************************** Function Declarations ***************************/
116static void		ahd_handle_transmission_error(struct ahd_softc *ahd);
117static void		ahd_handle_lqiphase_error(struct ahd_softc *ahd,
118						  u_int lqistat1);
119static int		ahd_handle_pkt_busfree(struct ahd_softc *ahd,
120					       u_int busfreetime);
121static int		ahd_handle_nonpkt_busfree(struct ahd_softc *ahd);
122static void		ahd_handle_proto_violation(struct ahd_softc *ahd);
123static void		ahd_force_renegotiation(struct ahd_softc *ahd,
124						struct ahd_devinfo *devinfo);
125
126static struct ahd_tmode_tstate*
127			ahd_alloc_tstate(struct ahd_softc *ahd,
128					 u_int scsi_id, char channel);
129#ifdef AHD_TARGET_MODE
130static void		ahd_free_tstate(struct ahd_softc *ahd,
131					u_int scsi_id, char channel, int force);
132#endif
133static void		ahd_devlimited_syncrate(struct ahd_softc *ahd,
134					        struct ahd_initiator_tinfo *,
135						u_int *period,
136						u_int *ppr_options,
137						role_t role);
138static void		ahd_update_neg_table(struct ahd_softc *ahd,
139					     struct ahd_devinfo *devinfo,
140					     struct ahd_transinfo *tinfo);
141static void		ahd_update_pending_scbs(struct ahd_softc *ahd);
142static void		ahd_fetch_devinfo(struct ahd_softc *ahd,
143					  struct ahd_devinfo *devinfo);
144static void		ahd_scb_devinfo(struct ahd_softc *ahd,
145					struct ahd_devinfo *devinfo,
146					struct scb *scb);
147static void		ahd_setup_initiator_msgout(struct ahd_softc *ahd,
148						   struct ahd_devinfo *devinfo,
149						   struct scb *scb);
150static void		ahd_build_transfer_msg(struct ahd_softc *ahd,
151					       struct ahd_devinfo *devinfo);
152static void		ahd_construct_sdtr(struct ahd_softc *ahd,
153					   struct ahd_devinfo *devinfo,
154					   u_int period, u_int offset);
155static void		ahd_construct_wdtr(struct ahd_softc *ahd,
156					   struct ahd_devinfo *devinfo,
157					   u_int bus_width);
158static void		ahd_construct_ppr(struct ahd_softc *ahd,
159					  struct ahd_devinfo *devinfo,
160					  u_int period, u_int offset,
161					  u_int bus_width, u_int ppr_options);
162static void		ahd_clear_msg_state(struct ahd_softc *ahd);
163static void		ahd_handle_message_phase(struct ahd_softc *ahd);
164typedef enum {
165	AHDMSG_1B,
166	AHDMSG_2B,
167	AHDMSG_EXT
168} ahd_msgtype;
169static int		ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type,
170				     u_int msgval, int full);
171static int		ahd_parse_msg(struct ahd_softc *ahd,
172				      struct ahd_devinfo *devinfo);
173static int		ahd_handle_msg_reject(struct ahd_softc *ahd,
174					      struct ahd_devinfo *devinfo);
175static void		ahd_handle_ign_wide_residue(struct ahd_softc *ahd,
176						struct ahd_devinfo *devinfo);
177static void		ahd_reinitialize_dataptrs(struct ahd_softc *ahd);
178static void		ahd_handle_devreset(struct ahd_softc *ahd,
179					    struct ahd_devinfo *devinfo,
180					    u_int lun, cam_status status,
181					    const char *message,
182					    int verbose_level);
183#if AHD_TARGET_MODE
184static void		ahd_setup_target_msgin(struct ahd_softc *ahd,
185					       struct ahd_devinfo *devinfo,
186					       struct scb *scb);
187#endif
188
189static u_int		ahd_sglist_size(struct ahd_softc *ahd);
190static u_int		ahd_sglist_allocsize(struct ahd_softc *ahd);
191static void		ahd_initialize_hscbs(struct ahd_softc *ahd);
192static int		ahd_init_scbdata(struct ahd_softc *ahd);
193static void		ahd_fini_scbdata(struct ahd_softc *ahd);
194static void		ahd_setup_iocell_workaround(struct ahd_softc *ahd);
195static void		ahd_iocell_first_selection(struct ahd_softc *ahd);
196static void		ahd_add_col_list(struct ahd_softc *ahd,
197					 struct scb *scb, u_int col_idx);
198static void		ahd_rem_col_list(struct ahd_softc *ahd,
199					 struct scb *scb);
200static void		ahd_chip_init(struct ahd_softc *ahd);
201static void		ahd_qinfifo_requeue(struct ahd_softc *ahd,
202					    struct scb *prev_scb,
203					    struct scb *scb);
204static int		ahd_qinfifo_count(struct ahd_softc *ahd);
205static int		ahd_search_scb_list(struct ahd_softc *ahd, int target,
206					    char channel, int lun, u_int tag,
207					    role_t role, uint32_t status,
208					    ahd_search_action action,
209					    u_int *list_head, u_int tid);
210static void		ahd_stitch_tid_list(struct ahd_softc *ahd,
211					    u_int tid_prev, u_int tid_cur,
212					    u_int tid_next);
213static void		ahd_add_scb_to_free_list(struct ahd_softc *ahd,
214						 u_int scbid);
215static u_int		ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
216				     u_int prev, u_int next, u_int tid);
217static void		ahd_reset_current_bus(struct ahd_softc *ahd);
218static ahd_callback_t	ahd_reset_poll;
219static ahd_callback_t	ahd_stat_timer;
220#ifdef AHD_DUMP_SEQ
221static void		ahd_dumpseq(struct ahd_softc *ahd);
222#endif
223static void		ahd_loadseq(struct ahd_softc *ahd);
224static int		ahd_check_patch(struct ahd_softc *ahd,
225					struct patch **start_patch,
226					u_int start_instr, u_int *skip_addr);
227static u_int		ahd_resolve_seqaddr(struct ahd_softc *ahd,
228					    u_int address);
229static void		ahd_download_instr(struct ahd_softc *ahd,
230					   u_int instrptr, uint8_t *dconsts);
231static int		ahd_probe_stack_size(struct ahd_softc *ahd);
232static int		ahd_scb_active_in_fifo(struct ahd_softc *ahd,
233					       struct scb *scb);
234static void		ahd_run_data_fifo(struct ahd_softc *ahd,
235					  struct scb *scb);
236
237#ifdef AHD_TARGET_MODE
238static void		ahd_queue_lstate_event(struct ahd_softc *ahd,
239					       struct ahd_tmode_lstate *lstate,
240					       u_int initiator_id,
241					       u_int event_type,
242					       u_int event_arg);
243static void		ahd_update_scsiid(struct ahd_softc *ahd,
244					  u_int targid_mask);
245static int		ahd_handle_target_cmd(struct ahd_softc *ahd,
246					      struct target_cmd *cmd);
247#endif
248
249/************************** Added for porting to NetBSD ***********************/
250static int ahd_createdmamem(bus_dma_tag_t tag,
251			    int size,
252			    int flags,
253			    bus_dmamap_t *mapp,
254			    void **vaddr,
255			    bus_addr_t *baddr,
256			    bus_dma_segment_t *seg,
257			    int *nseg,
258			    const char *myname, const char *what);
259
260static void ahd_freedmamem(bus_dma_tag_t tag,
261			   int size,
262			   bus_dmamap_t map,
263			   void *vaddr,
264			   bus_dma_segment_t *seg,
265			   int nseg);
266
267/******************************** Private Inlines *****************************/
268static inline void	ahd_assert_atn(struct ahd_softc *ahd);
269static inline int	ahd_currently_packetized(struct ahd_softc *ahd);
270static inline int	ahd_set_active_fifo(struct ahd_softc *ahd);
271
272static inline void
273ahd_assert_atn(struct ahd_softc *ahd)
274{
275	ahd_outb(ahd, SCSISIGO, ATNO);
276}
277
278/*
279 * Determine if the current connection has a packetized
280 * agreement.  This does not necessarily mean that we
281 * are currently in a packetized transfer.  We could
282 * just as easily be sending or receiving a message.
283 */
284static inline int
285ahd_currently_packetized(struct ahd_softc *ahd)
286{
287	ahd_mode_state	 saved_modes;
288	int		 packetized;
289
290	saved_modes = ahd_save_modes(ahd);
291	if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) {
292		/*
293		 * The packetized bit refers to the last
294		 * connection, not the current one.  Check
295		 * for non-zero LQISTATE instead.
296		 */
297		ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
298		packetized = ahd_inb(ahd, LQISTATE) != 0;
299	} else {
300		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
301		packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED;
302	}
303	ahd_restore_modes(ahd, saved_modes);
304	return (packetized);
305}
306
307static inline int
308ahd_set_active_fifo(struct ahd_softc *ahd)
309{
310	u_int active_fifo;
311
312	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
313	active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO;
314	switch (active_fifo) {
315	case 0:
316	case 1:
317		ahd_set_modes(ahd, active_fifo, active_fifo);
318		return (1);
319	default:
320		return (0);
321	}
322}
323
324/************************* Sequencer Execution Control ************************/
325/*
326 * Restart the sequencer program from address zero
327 */
328void
329ahd_restart(struct ahd_softc *ahd)
330{
331
332	ahd_pause(ahd);
333
334	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
335
336	/* No more pending messages */
337	ahd_clear_msg_state(ahd);
338	ahd_outb(ahd, SCSISIGO, 0);		/* De-assert BSY */
339	ahd_outb(ahd, MSG_OUT, MSG_NOOP);	/* No message to send */
340	ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET);
341	ahd_outb(ahd, SEQINTCTL, 0);
342	ahd_outb(ahd, LASTPHASE, P_BUSFREE);
343	ahd_outb(ahd, SEQ_FLAGS, 0);
344	ahd_outb(ahd, SAVED_SCSIID, 0xFF);
345	ahd_outb(ahd, SAVED_LUN, 0xFF);
346
347	/*
348	 * Ensure that the sequencer's idea of TQINPOS
349	 * matches our own.  The sequencer increments TQINPOS
350	 * only after it sees a DMA complete and a reset could
351	 * occur before the increment leaving the kernel to believe
352	 * the command arrived but the sequencer to not.
353	 */
354	ahd_outb(ahd, TQINPOS, ahd->tqinfifonext);
355
356	/* Always allow reselection */
357	ahd_outb(ahd, SCSISEQ1,
358		 ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
359	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
360	ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET);
361	ahd_unpause(ahd);
362}
363
364void
365ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo)
366{
367	ahd_mode_state	 saved_modes;
368
369#ifdef AHD_DEBUG
370	if ((ahd_debug & AHD_SHOW_FIFOS) != 0)
371		printf("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo);
372#endif
373	saved_modes = ahd_save_modes(ahd);
374	ahd_set_modes(ahd, fifo, fifo);
375	ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT);
376	if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0)
377		ahd_outb(ahd, CCSGCTL, CCSGRESET);
378	ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
379	ahd_outb(ahd, SG_STATE, 0);
380	ahd_restore_modes(ahd, saved_modes);
381}
382
383/************************* Input/Output Queues ********************************/
384/*
385 * Flush and completed commands that are sitting in the command
386 * complete queues down on the chip but have yet to be DMA'ed back up.
387 */
388void
389ahd_flush_qoutfifo(struct ahd_softc *ahd)
390{
391	struct		scb *scb;
392	ahd_mode_state	saved_modes;
393	u_int		saved_scbptr;
394	u_int		ccscbctl;
395	u_int		scbid;
396	u_int		next_scbid;
397
398	saved_modes = ahd_save_modes(ahd);
399
400	/*
401	 * Complete any SCBs that just finished being
402	 * DMA'ed into the qoutfifo.
403	 */
404	ahd_run_qoutfifo(ahd);
405
406	/*
407	 * Flush the good status FIFO for compelted packetized commands.
408	 */
409	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
410	saved_scbptr = ahd_get_scbptr(ahd);
411	while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) {
412		u_int fifo_mode;
413		u_int i;
414
415		scbid = (ahd_inb(ahd, GSFIFO+1) << 8)
416		      | ahd_inb(ahd, GSFIFO);
417		scb = ahd_lookup_scb(ahd, scbid);
418		if (scb == NULL) {
419			printf("%s: Warning - GSFIFO SCB %d invalid\n",
420			       ahd_name(ahd), scbid);
421			continue;
422		}
423		/*
424		 * Determine if this transaction is still active in
425		 * any FIFO.  If it is, we must flush that FIFO to
426		 * the host before completing the  command.
427		 */
428		fifo_mode = 0;
429		for (i = 0; i < 2; i++) {
430			/* Toggle to the other mode. */
431			fifo_mode ^= 1;
432			ahd_set_modes(ahd, fifo_mode, fifo_mode);
433			if (ahd_scb_active_in_fifo(ahd, scb) == 0)
434				continue;
435
436			ahd_run_data_fifo(ahd, scb);
437
438			/*
439			 * Clearing this transaction in this FIFO may
440			 * cause a CFG4DATA for this same transaction
441			 * to assert in the other FIFO.  Make sure we
442			 * loop one more time and check the other FIFO.
443			 */
444			i = 0;
445		}
446		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
447		ahd_set_scbptr(ahd, scbid);
448		if ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_LIST_NULL) == 0
449		 && ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_FULL_RESID) != 0
450		  || (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR)
451		      & SG_LIST_NULL) != 0)) {
452			u_int comp_head;
453
454			/*
455			 * The transfer completed with a residual.
456			 * Place this SCB on the complete DMA list
457			 * so that we Update our in-core copy of the
458			 * SCB before completing the command.
459			 */
460			ahd_outb(ahd, SCB_SCSI_STATUS, 0);
461			ahd_outb(ahd, SCB_SGPTR,
462				 ahd_inb_scbram(ahd, SCB_SGPTR)
463				 | SG_STATUS_VALID);
464			ahd_outw(ahd, SCB_TAG, SCB_GET_TAG(scb));
465			comp_head = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
466			ahd_outw(ahd, SCB_NEXT_COMPLETE, comp_head);
467			if (SCBID_IS_NULL(comp_head))
468				ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD,
469					 SCB_GET_TAG(scb));
470		} else
471			ahd_complete_scb(ahd, scb);
472	}
473	ahd_set_scbptr(ahd, saved_scbptr);
474
475	/*
476	 * Setup for command channel portion of flush.
477	 */
478	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
479
480	/*
481	 * Wait for any inprogress DMA to complete and clear DMA state
482	 * if this if for an SCB in the qinfifo.
483	 */
484	while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) {
485
486		if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) {
487			if ((ccscbctl & ARRDONE) != 0)
488				break;
489		} else if ((ccscbctl & CCSCBDONE) != 0)
490			break;
491		ahd_delay(200);
492	}
493	if ((ccscbctl & CCSCBDIR) != 0)
494		ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN));
495
496	saved_scbptr = ahd_get_scbptr(ahd);
497	/*
498	 * Manually update/complete any completed SCBs that are waiting to be
499	 * DMA'ed back up to the host.
500	 */
501	scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
502	while (!SCBID_IS_NULL(scbid)) {
503		uint8_t *hscb_ptr;
504		u_int	 i;
505
506		ahd_set_scbptr(ahd, scbid);
507		next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
508		scb = ahd_lookup_scb(ahd, scbid);
509		if (scb == NULL) {
510			printf("%s: Warning - DMA-up and complete "
511			       "SCB %d invalid\n", ahd_name(ahd), scbid);
512			continue;
513		}
514		hscb_ptr = (uint8_t *)scb->hscb;
515		for (i = 0; i < sizeof(struct hardware_scb); i++)
516			*hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i);
517
518		ahd_complete_scb(ahd, scb);
519		scbid = next_scbid;
520	}
521	ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL);
522
523	scbid = ahd_inw(ahd, COMPLETE_SCB_HEAD);
524	while (!SCBID_IS_NULL(scbid)) {
525
526		ahd_set_scbptr(ahd, scbid);
527		next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
528		scb = ahd_lookup_scb(ahd, scbid);
529		if (scb == NULL) {
530			printf("%s: Warning - Complete SCB %d invalid\n",
531			       ahd_name(ahd), scbid);
532			continue;
533		}
534
535		ahd_complete_scb(ahd, scb);
536		scbid = next_scbid;
537	}
538	ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL);
539
540	/*
541	 * Restore state.
542	 */
543	ahd_set_scbptr(ahd, saved_scbptr);
544	ahd_restore_modes(ahd, saved_modes);
545	ahd->flags |= AHD_UPDATE_PEND_CMDS;
546}
547
548/*
549 * Determine if an SCB for a packetized transaction
550 * is active in a FIFO.
551 */
552static int
553ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb)
554{
555
556	/*
557	 * The FIFO is only active for our transaction if
558	 * the SCBPTR matches the SCB's ID and the firmware
559	 * has installed a handler for the FIFO or we have
560	 * a pending SAVEPTRS or CFG4DATA interrupt.
561	 */
562	if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb)
563	 || ((ahd_inb(ahd, LONGJMP_ADDR+1) & INVALID_ADDR) != 0
564	  && (ahd_inb(ahd, SEQINTSRC) & (CFG4DATA|SAVEPTRS)) == 0))
565		return (0);
566
567	return (1);
568}
569
570/*
571 * Run a data fifo to completion for a transaction we know
572 * has completed across the SCSI bus (good status has been
573 * received).  We are already set to the correct FIFO mode
574 * on entry to this routine.
575 *
576 * This function attempts to operate exactly as the firmware
577 * would when running this FIFO.  Care must be taken to update
578 * this routine any time the firmware's FIFO algorithm is
579 * changed.
580 */
581static void
582ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb)
583{
584	u_int seqintsrc;
585
586	while (1) {
587		seqintsrc = ahd_inb(ahd, SEQINTSRC);
588		if ((seqintsrc & CFG4DATA) != 0) {
589			uint32_t datacnt;
590			uint32_t sgptr;
591
592			/*
593			 * Clear full residual flag.
594			 */
595			sgptr = ahd_inl_scbram(ahd, SCB_SGPTR) & ~SG_FULL_RESID;
596			ahd_outb(ahd, SCB_SGPTR, sgptr);
597
598			/*
599			 * Load datacnt and address.
600			 */
601			datacnt = ahd_inl_scbram(ahd, SCB_DATACNT);
602			if ((datacnt & AHD_DMA_LAST_SEG) != 0) {
603				sgptr |= LAST_SEG;
604				ahd_outb(ahd, SG_STATE, 0);
605			} else
606				ahd_outb(ahd, SG_STATE, LOADING_NEEDED);
607			ahd_outq(ahd, HADDR, ahd_inq_scbram(ahd, SCB_DATAPTR));
608			ahd_outl(ahd, HCNT, datacnt & AHD_SG_LEN_MASK);
609			ahd_outb(ahd, SG_CACHE_PRE, sgptr);
610			ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN);
611
612			/*
613			 * Initialize Residual Fields.
614			 */
615			ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, datacnt >> 24);
616			ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr & SG_PTR_MASK);
617
618			/*
619			 * Mark the SCB as having a FIFO in use.
620			 */
621			ahd_outb(ahd, SCB_FIFO_USE_COUNT,
622				 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1);
623
624			/*
625			 * Install a "fake" handler for this FIFO.
626			 */
627			ahd_outw(ahd, LONGJMP_ADDR, 0);
628
629			/*
630			 * Notify the hardware that we have satisfied
631			 * this sequencer interrupt.
632			 */
633			ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA);
634		} else if ((seqintsrc & SAVEPTRS) != 0) {
635			uint32_t sgptr;
636			uint32_t resid;
637
638			if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) {
639				/*
640				 * Snapshot Save Pointers.  Clear
641				 * the snapshot and continue.
642				 */
643				ahd_outb(ahd, DFFSXFRCTL, CLRCHN);
644				continue;
645			}
646
647			/*
648			 * Disable S/G fetch so the DMA engine
649			 * is available to future users.
650			 */
651			if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0)
652				ahd_outb(ahd, CCSGCTL, 0);
653			ahd_outb(ahd, SG_STATE, 0);
654
655			/*
656			 * Flush the data FIFO.  Strickly only
657			 * necessary for Rev A parts.
658			 */
659			ahd_outb(ahd, DFCNTRL,
660				 ahd_inb(ahd, DFCNTRL) | FIFOFLUSH);
661
662			/*
663			 * Calculate residual.
664			 */
665			sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
666			resid = ahd_inl(ahd, SHCNT);
667			resid |=
668			    ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24;
669			ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid);
670			if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) {
671				/*
672				 * Must back up to the correct S/G element.
673				 * Typically this just means resetting our
674				 * low byte to the offset in the SG_CACHE,
675				 * but if we wrapped, we have to correct
676				 * the other bytes of the sgptr too.
677				 */
678				if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0
679				 && (sgptr & 0x80) == 0)
680					sgptr -= 0x100;
681				sgptr &= ~0xFF;
682				sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW)
683				       & SG_ADDR_MASK;
684				ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
685				ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0);
686			} else if ((resid & AHD_SG_LEN_MASK) == 0) {
687				ahd_outb(ahd, SCB_RESIDUAL_SGPTR,
688					 sgptr | SG_LIST_NULL);
689			}
690			/*
691			 * Save Pointers.
692			 */
693			ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR));
694			ahd_outl(ahd, SCB_DATACNT, resid);
695			ahd_outl(ahd, SCB_SGPTR, sgptr);
696			ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS);
697			ahd_outb(ahd, SEQIMODE,
698				 ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS);
699			/*
700			 * If the data is to the SCSI bus, we are
701			 * done, otherwise wait for FIFOEMP.
702			 */
703			if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0)
704				break;
705		} else if ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) {
706			uint32_t sgptr;
707			uint64_t data_addr;
708			uint32_t data_len;
709			u_int	 dfcntrl;
710
711			/*
712			 * Disable S/G fetch so the DMA engine
713			 * is available to future users.
714			 */
715			if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) {
716				ahd_outb(ahd, CCSGCTL, 0);
717				ahd_outb(ahd, SG_STATE, LOADING_NEEDED);
718			}
719
720			/*
721			 * Wait for the DMA engine to notice that the
722			 * host transfer is enabled and that there is
723			 * space in the S/G FIFO for new segments before
724			 * loading more segments.
725			 */
726			if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) == 0)
727				continue;
728			if ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) == 0)
729				continue;
730
731			/*
732			 * Determine the offset of the next S/G
733			 * element to load.
734			 */
735			sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
736			sgptr &= SG_PTR_MASK;
737			if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
738				struct ahd_dma64_seg *sg;
739
740				sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
741				data_addr = sg->addr;
742				data_len = sg->len;
743				sgptr += sizeof(*sg);
744			} else {
745				struct	ahd_dma_seg *sg;
746
747				sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
748				data_addr = sg->len & AHD_SG_HIGH_ADDR_MASK;
749				data_addr <<= 8;
750				data_addr |= sg->addr;
751				data_len = sg->len;
752				sgptr += sizeof(*sg);
753			}
754
755			/*
756			 * Update residual information.
757			 */
758			ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, data_len >> 24);
759			ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
760
761			/*
762			 * Load the S/G.
763			 */
764			if (data_len & AHD_DMA_LAST_SEG) {
765				sgptr |= LAST_SEG;
766				ahd_outb(ahd, SG_STATE, 0);
767			}
768			ahd_outq(ahd, HADDR, data_addr);
769			ahd_outl(ahd, HCNT, data_len & AHD_SG_LEN_MASK);
770			ahd_outb(ahd, SG_CACHE_PRE, sgptr & 0xFF);
771
772			/*
773			 * Advertise the segment to the hardware.
774			 */
775			dfcntrl = ahd_inb(ahd, DFCNTRL)|PRELOADEN|HDMAEN;
776			if ((ahd->features & AHD_NEW_DFCNTRL_OPTS)!=0) {
777				/*
778				 * Use SCSIENWRDIS so that SCSIEN
779				 * is never modified by this
780				 * operation.
781				 */
782				dfcntrl |= SCSIENWRDIS;
783			}
784			ahd_outb(ahd, DFCNTRL, dfcntrl);
785		} else if ((ahd_inb(ahd, SG_CACHE_SHADOW)
786			 & LAST_SEG_DONE) != 0) {
787
788			/*
789			 * Transfer completed to the end of SG list
790			 * and has flushed to the host.
791			 */
792			ahd_outb(ahd, SCB_SGPTR,
793				 ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL);
794			break;
795		} else if ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) {
796			break;
797		}
798		ahd_delay(200);
799	}
800	/*
801	 * Clear any handler for this FIFO, decrement
802	 * the FIFO use count for the SCB, and release
803	 * the FIFO.
804	 */
805	ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
806	ahd_outb(ahd, SCB_FIFO_USE_COUNT,
807		 ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1);
808	ahd_outb(ahd, DFFSXFRCTL, CLRCHN);
809}
810
811void
812ahd_run_qoutfifo(struct ahd_softc *ahd)
813{
814	struct scb *scb;
815	u_int  scb_index;
816
817	if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0)
818		panic("ahd_run_qoutfifo recursion");
819	ahd->flags |= AHD_RUNNING_QOUTFIFO;
820	ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD);
821	while ((ahd->qoutfifo[ahd->qoutfifonext]
822	     & QOUTFIFO_ENTRY_VALID_LE) == ahd->qoutfifonext_valid_tag) {
823
824		scb_index = ahd_le16toh(ahd->qoutfifo[ahd->qoutfifonext]
825				      & ~QOUTFIFO_ENTRY_VALID_LE);
826		scb = ahd_lookup_scb(ahd, scb_index);
827		if (scb == NULL) {
828			printf("%s: WARNING no command for scb %d "
829			       "(cmdcmplt)\nQOUTPOS = %d\n",
830			       ahd_name(ahd), scb_index,
831			       ahd->qoutfifonext);
832			ahd_dump_card_state(ahd);
833		} else
834			ahd_complete_scb(ahd, scb);
835
836		ahd->qoutfifonext = (ahd->qoutfifonext+1) & (AHD_QOUT_SIZE-1);
837		if (ahd->qoutfifonext == 0)
838			ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID_LE;
839	}
840	ahd->flags &= ~AHD_RUNNING_QOUTFIFO;
841}
842
843/************************* Interrupt Handling *********************************/
844void
845ahd_handle_hwerrint(struct ahd_softc *ahd)
846{
847	/*
848	 * Some catastrophic hardware error has occurred.
849	 * Print it for the user and disable the controller.
850	 */
851	int i;
852	int error;
853
854	error = ahd_inb(ahd, ERROR);
855	for (i = 0; i < num_errors; i++) {
856		if ((error & ahd_hard_errors[i].errno) != 0)
857			printf("%s: hwerrint, %s\n",
858			       ahd_name(ahd), ahd_hard_errors[i].errmesg);
859	}
860
861	ahd_dump_card_state(ahd);
862	panic("BRKADRINT");
863
864	/* Tell everyone that this HBA is no longer available */
865	ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS,
866		       CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
867		       CAM_NO_HBA);
868
869	/* Tell the system that this controller has gone away. */
870	ahd_free(ahd);
871}
872
873void
874ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
875{
876	u_int seqintcode;
877
878	/*
879	 * Save the sequencer interrupt code and clear the SEQINT
880	 * bit. We will unpause the sequencer, if appropriate,
881	 * after servicing the request.
882	 */
883	seqintcode = ahd_inb(ahd, SEQINTCODE);
884	ahd_outb(ahd, CLRINT, CLRSEQINT);
885	if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
886		/*
887		 * Unpause the sequencer and let it clear
888		 * SEQINT by writing NO_SEQINT to it.  This
889		 * will cause the sequencer to be paused again,
890		 * which is the expected state of this routine.
891		 */
892		ahd_unpause(ahd);
893		while (!ahd_is_paused(ahd))
894			;
895		ahd_outb(ahd, CLRINT, CLRSEQINT);
896	}
897	ahd_update_modes(ahd);
898#ifdef AHD_DEBUG
899	if ((ahd_debug & AHD_SHOW_MISC) != 0)
900		printf("%s: Handle Seqint Called for code %d\n",
901		       ahd_name(ahd), seqintcode);
902#endif
903	switch (seqintcode) {
904	case BAD_SCB_STATUS:
905	{
906		struct	scb *scb;
907		u_int	scbid;
908		int	cmds_pending;
909
910		scbid = ahd_get_scbptr(ahd);
911		scb = ahd_lookup_scb(ahd, scbid);
912		if (scb != NULL) {
913			ahd_complete_scb(ahd, scb);
914		} else {
915			printf("%s: WARNING no command for scb %d "
916			       "(bad status)\n", ahd_name(ahd), scbid);
917			ahd_dump_card_state(ahd);
918		}
919		cmds_pending = ahd_inw(ahd, CMDS_PENDING);
920		if (cmds_pending > 0)
921			ahd_outw(ahd, CMDS_PENDING, cmds_pending - 1);
922		break;
923	}
924	case ENTERING_NONPACK:
925	{
926		struct	scb *scb;
927		u_int	scbid;
928
929		AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
930				 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
931		scbid = ahd_get_scbptr(ahd);
932		scb = ahd_lookup_scb(ahd, scbid);
933		if (scb == NULL) {
934			/*
935			 * Somehow need to know if this
936			 * is from a selection or reselection.
937			 * From that, we can determine target
938			 * ID so we at least have an I_T nexus.
939			 */
940		} else {
941			ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
942			ahd_outb(ahd, SAVED_LUN, scb->hscb->lun);
943			ahd_outb(ahd, SEQ_FLAGS, 0x0);
944		}
945		if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0
946		 && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) {
947			/*
948			 * Phase change after read stream with
949			 * CRC error with P0 asserted on last
950			 * packet.
951			 */
952#ifdef AHD_DEBUG
953			if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
954				printf("%s: Assuming LQIPHASE_NLQ with "
955				       "P0 assertion\n", ahd_name(ahd));
956#endif
957		}
958#ifdef AHD_DEBUG
959		if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
960			printf("%s: Entering NONPACK\n", ahd_name(ahd));
961#endif
962		break;
963	}
964	case INVALID_SEQINT:
965		printf("%s: Invalid Sequencer interrupt occurred.\n",
966		       ahd_name(ahd));
967		ahd_dump_card_state(ahd);
968		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
969		break;
970	case STATUS_OVERRUN:
971	{
972		struct	scb *scb;
973		u_int	scbid;
974
975		scbid = ahd_get_scbptr(ahd);
976		scb = ahd_lookup_scb(ahd, scbid);
977		if (scb != NULL)
978			ahd_print_path(ahd, scb);
979		else
980			printf("%s: ", ahd_name(ahd));
981		printf("SCB %d Packetized Status Overrun", scbid);
982		ahd_dump_card_state(ahd);
983		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
984		break;
985	}
986	case CFG4ISTAT_INTR:
987	{
988		struct	scb *scb;
989		u_int	scbid;
990
991		scbid = ahd_get_scbptr(ahd);
992		scb = ahd_lookup_scb(ahd, scbid);
993		if (scb == NULL) {
994			ahd_dump_card_state(ahd);
995			printf("CFG4ISTAT: Free SCB %d referenced", scbid);
996			panic("For safety");
997		}
998		ahd_outq(ahd, HADDR, scb->sense_busaddr);
999		ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE);
1000		ahd_outb(ahd, HCNT + 2, 0);
1001		ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG);
1002		ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN);
1003		break;
1004	}
1005	case ILLEGAL_PHASE:
1006	{
1007		u_int bus_phase;
1008
1009		bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
1010		printf("%s: ILLEGAL_PHASE 0x%x\n",
1011		       ahd_name(ahd), bus_phase);
1012
1013		switch (bus_phase) {
1014		case P_DATAOUT:
1015		case P_DATAIN:
1016		case P_DATAOUT_DT:
1017		case P_DATAIN_DT:
1018		case P_MESGOUT:
1019		case P_STATUS:
1020		case P_MESGIN:
1021			ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1022			printf("%s: Issued Bus Reset.\n", ahd_name(ahd));
1023			break;
1024		case P_COMMAND:
1025		{
1026			struct	ahd_devinfo devinfo;
1027			struct	scb *scb;
1028			struct	ahd_initiator_tinfo *targ_info;
1029			struct	ahd_tmode_tstate *tstate;
1030			struct	ahd_transinfo *tinfo;
1031			u_int	scbid;
1032
1033			/*
1034			 * If a target takes us into the command phase
1035			 * assume that it has been externally reset and
1036			 * has thus lost our previous packetized negotiation
1037			 * agreement.  Since we have not sent an identify
1038			 * message and may not have fully qualified the
1039			 * connection, we change our command to TUR, assert
1040			 * ATN and ABORT the task when we go to message in
1041			 * phase.  The OSM will see the REQUEUE_REQUEST
1042			 * status and retry the command.
1043			 */
1044			scbid = ahd_get_scbptr(ahd);
1045			scb = ahd_lookup_scb(ahd, scbid);
1046			if (scb == NULL) {
1047				printf("Invalid phase with no valid SCB.  "
1048				       "Resetting bus.\n");
1049				ahd_reset_channel(ahd, 'A',
1050						  /*Initiate Reset*/TRUE);
1051				break;
1052			}
1053			ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb),
1054					    SCB_GET_TARGET(ahd, scb),
1055					    SCB_GET_LUN(scb),
1056					    SCB_GET_CHANNEL(ahd, scb),
1057					    ROLE_INITIATOR);
1058			targ_info = ahd_fetch_transinfo(ahd,
1059							devinfo.channel,
1060							devinfo.our_scsiid,
1061							devinfo.target,
1062							&tstate);
1063			tinfo = &targ_info->curr;
1064			ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1065				      AHD_TRANS_ACTIVE, /*paused*/TRUE);
1066			ahd_set_syncrate(ahd, &devinfo, /*period*/0,
1067					 /*offset*/0, /*ppr_options*/0,
1068					 AHD_TRANS_ACTIVE, /*paused*/TRUE);
1069			ahd_outb(ahd, SCB_CDB_STORE, 0);
1070			ahd_outb(ahd, SCB_CDB_STORE+1, 0);
1071			ahd_outb(ahd, SCB_CDB_STORE+2, 0);
1072			ahd_outb(ahd, SCB_CDB_STORE+3, 0);
1073			ahd_outb(ahd, SCB_CDB_STORE+4, 0);
1074			ahd_outb(ahd, SCB_CDB_STORE+5, 0);
1075			ahd_outb(ahd, SCB_CDB_LEN, 6);
1076			scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE);
1077			scb->hscb->control |= MK_MESSAGE;
1078			ahd_outb(ahd, SCB_CONTROL, scb->hscb->control);
1079			ahd_outb(ahd, MSG_OUT, HOST_MSG);
1080			ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
1081			/*
1082			 * The lun is 0, regardless of the SCB's lun
1083			 * as we have not sent an identify message.
1084			 */
1085			ahd_outb(ahd, SAVED_LUN, 0);
1086			ahd_outb(ahd, SEQ_FLAGS, 0);
1087			ahd_assert_atn(ahd);
1088			scb->flags &= ~(SCB_PACKETIZED);
1089			scb->flags |= SCB_ABORT|SCB_CMDPHASE_ABORT;
1090			ahd_freeze_devq(ahd, scb);
1091			ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
1092			ahd_freeze_scb(scb);
1093
1094			/*
1095			 * Allow the sequencer to continue with
1096			 * non-pack processing.
1097			 */
1098			ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1099			ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT);
1100			if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
1101				ahd_outb(ahd, CLRLQOINT1, 0);
1102			}
1103#ifdef AHD_DEBUG
1104			if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1105				ahd_print_path(ahd, scb);
1106				printf("Unexpected command phase from "
1107				       "packetized target\n");
1108			}
1109#endif
1110			break;
1111		}
1112		}
1113		break;
1114	}
1115	case CFG4OVERRUN:
1116	{
1117		struct	scb *scb;
1118		u_int	scb_index;
1119
1120#ifdef AHD_DEBUG
1121		if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1122			printf("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd),
1123			       ahd_inb(ahd, MODE_PTR));
1124		}
1125#endif
1126		scb_index = ahd_get_scbptr(ahd);
1127		scb = ahd_lookup_scb(ahd, scb_index);
1128		if (scb == NULL) {
1129			/*
1130			 * Attempt to transfer to an SCB that is
1131			 * not outstanding.
1132			 */
1133			ahd_assert_atn(ahd);
1134			ahd_outb(ahd, MSG_OUT, HOST_MSG);
1135			ahd->msgout_buf[0] = MSG_ABORT_TASK;
1136			ahd->msgout_len = 1;
1137			ahd->msgout_index = 0;
1138			ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
1139			/*
1140			 * Clear status received flag to prevent any
1141			 * attempt to complete this bogus SCB.
1142			 */
1143			ahd_outb(ahd, SCB_CONTROL,
1144				 ahd_inb_scbram(ahd, SCB_CONTROL)
1145				 & ~STATUS_RCVD);
1146		}
1147		break;
1148	}
1149	case DUMP_CARD_STATE:
1150	{
1151		ahd_dump_card_state(ahd);
1152		break;
1153	}
1154	case PDATA_REINIT:
1155	{
1156#ifdef AHD_DEBUG
1157		if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1158			printf("%s: PDATA_REINIT - DFCNTRL = 0x%x "
1159			       "SG_CACHE_SHADOW = 0x%x\n",
1160			       ahd_name(ahd), ahd_inb(ahd, DFCNTRL),
1161			       ahd_inb(ahd, SG_CACHE_SHADOW));
1162		}
1163#endif
1164		ahd_reinitialize_dataptrs(ahd);
1165		break;
1166	}
1167	case HOST_MSG_LOOP:
1168	{
1169		struct ahd_devinfo devinfo;
1170
1171		/*
1172		 * The sequencer has encountered a message phase
1173		 * that requires host assistance for completion.
1174		 * While handling the message phase(s), we will be
1175		 * notified by the sequencer after each byte is
1176		 * transferred so we can track bus phase changes.
1177		 *
1178		 * If this is the first time we've seen a HOST_MSG_LOOP
1179		 * interrupt, initialize the state of the host message
1180		 * loop.
1181		 */
1182		ahd_fetch_devinfo(ahd, &devinfo);
1183		if (ahd->msg_type == MSG_TYPE_NONE) {
1184			struct scb *scb;
1185			u_int scb_index;
1186			u_int bus_phase;
1187
1188			bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
1189			if (bus_phase != P_MESGIN
1190			 && bus_phase != P_MESGOUT) {
1191				printf("ahd_intr: HOST_MSG_LOOP bad "
1192				       "phase 0x%x\n", bus_phase);
1193				/*
1194				 * Probably transitioned to bus free before
1195				 * we got here.  Just punt the message.
1196				 */
1197				ahd_dump_card_state(ahd);
1198				ahd_clear_intstat(ahd);
1199				ahd_restart(ahd);
1200				return;
1201			}
1202
1203			scb_index = ahd_get_scbptr(ahd);
1204			scb = ahd_lookup_scb(ahd, scb_index);
1205			if (devinfo.role == ROLE_INITIATOR) {
1206				if (bus_phase == P_MESGOUT)
1207					ahd_setup_initiator_msgout(ahd,
1208								   &devinfo,
1209								   scb);
1210				else {
1211					ahd->msg_type =
1212					    MSG_TYPE_INITIATOR_MSGIN;
1213					ahd->msgin_index = 0;
1214				}
1215			}
1216#if AHD_TARGET_MODE
1217			else {
1218				if (bus_phase == P_MESGOUT) {
1219					ahd->msg_type =
1220					    MSG_TYPE_TARGET_MSGOUT;
1221					ahd->msgin_index = 0;
1222				}
1223				else
1224					ahd_setup_target_msgin(ahd,
1225							       &devinfo,
1226							       scb);
1227			}
1228#endif
1229		}
1230
1231		ahd_handle_message_phase(ahd);
1232		break;
1233	}
1234	case NO_MATCH:
1235	{
1236		/* Ensure we don't leave the selection hardware on */
1237		AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
1238		ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
1239
1240		printf("%s:%c:%d: no active SCB for reconnecting "
1241		       "target - issuing BUS DEVICE RESET\n",
1242		       ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4);
1243		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
1244		       "REG0 == 0x%x ACCUM = 0x%x\n",
1245		       ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN),
1246		       ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM));
1247		printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
1248		       "SINDEX == 0x%x\n",
1249		       ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd),
1250		       ahd_find_busy_tcl(ahd,
1251					 BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID),
1252						   ahd_inb(ahd, SAVED_LUN))),
1253		       ahd_inw(ahd, SINDEX));
1254		printf("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
1255		       "SCB_CONTROL == 0x%x\n",
1256		       ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID),
1257		       ahd_inb_scbram(ahd, SCB_LUN),
1258		       ahd_inb_scbram(ahd, SCB_CONTROL));
1259		printf("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n",
1260		       ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI));
1261		printf("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0));
1262		printf("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0));
1263		ahd_dump_card_state(ahd);
1264		ahd->msgout_buf[0] = MSG_BUS_DEV_RESET;
1265		ahd->msgout_len = 1;
1266		ahd->msgout_index = 0;
1267		ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
1268		ahd_outb(ahd, MSG_OUT, HOST_MSG);
1269		ahd_assert_atn(ahd);
1270		break;
1271	}
1272	case PROTO_VIOLATION:
1273	{
1274		ahd_handle_proto_violation(ahd);
1275		break;
1276	}
1277	case IGN_WIDE_RES:
1278	{
1279		struct ahd_devinfo devinfo;
1280
1281		ahd_fetch_devinfo(ahd, &devinfo);
1282		ahd_handle_ign_wide_residue(ahd, &devinfo);
1283		break;
1284	}
1285	case BAD_PHASE:
1286	{
1287		u_int lastphase;
1288
1289		lastphase = ahd_inb(ahd, LASTPHASE);
1290		printf("%s:%c:%d: unknown scsi bus phase %x, "
1291		       "lastphase = 0x%x.  Attempting to continue\n",
1292		       ahd_name(ahd), 'A',
1293		       SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)),
1294		       lastphase, ahd_inb(ahd, SCSISIGI));
1295		break;
1296	}
1297	case MISSED_BUSFREE:
1298	{
1299		u_int lastphase;
1300
1301		lastphase = ahd_inb(ahd, LASTPHASE);
1302		printf("%s:%c:%d: Missed busfree. "
1303		       "Lastphase = 0x%x, Curphase = 0x%x\n",
1304		       ahd_name(ahd), 'A',
1305		       SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)),
1306		       lastphase, ahd_inb(ahd, SCSISIGI));
1307		ahd_restart(ahd);
1308		return;
1309	}
1310	case DATA_OVERRUN:
1311	{
1312		/*
1313		 * When the sequencer detects an overrun, it
1314		 * places the controller in "BITBUCKET" mode
1315		 * and allows the target to complete its transfer.
1316		 * Unfortunately, none of the counters get updated
1317		 * when the controller is in this mode, so we have
1318		 * no way of knowing how large the overrun was.
1319		 */
1320		struct	scb *scb;
1321		u_int	scbindex;
1322#ifdef AHD_DEBUG
1323		u_int	lastphase;
1324#endif
1325
1326		scbindex = ahd_get_scbptr(ahd);
1327		scb = ahd_lookup_scb(ahd, scbindex);
1328#ifdef AHD_DEBUG
1329		lastphase = ahd_inb(ahd, LASTPHASE);
1330		if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1331			ahd_print_path(ahd, scb);
1332			printf("data overrun detected %s.  Tag == 0x%x.\n",
1333			       ahd_lookup_phase_entry(lastphase)->phasemsg,
1334			       SCB_GET_TAG(scb));
1335			ahd_print_path(ahd, scb);
1336			printf("%s seen Data Phase.  Length = %ld.  "
1337			       "NumSGs = %d.\n",
1338			       ahd_inb(ahd, SEQ_FLAGS) & DPHASE
1339			       ? "Have" : "Haven't",
1340			       ahd_get_transfer_length(scb), scb->sg_count);
1341			ahd_dump_sglist(scb);
1342		}
1343#endif
1344
1345		/*
1346		 * Set this and it will take effect when the
1347		 * target does a command complete.
1348		 */
1349		ahd_freeze_devq(ahd, scb);
1350		ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);
1351		ahd_freeze_scb(scb);
1352		break;
1353	}
1354	case MKMSG_FAILED:
1355	{
1356		struct ahd_devinfo devinfo;
1357		struct scb *scb;
1358		u_int scbid;
1359
1360		ahd_fetch_devinfo(ahd, &devinfo);
1361		printf("%s:%c:%d:%d: Attempt to issue message failed\n",
1362		       ahd_name(ahd), devinfo.channel, devinfo.target,
1363		       devinfo.lun);
1364		scbid = ahd_get_scbptr(ahd);
1365		scb = ahd_lookup_scb(ahd, scbid);
1366		if (scb != NULL
1367		 && (scb->flags & SCB_RECOVERY_SCB) != 0)
1368			/*
1369			 * Ensure that we didn't put a second instance of this
1370			 * SCB into the QINFIFO.
1371			 */
1372			ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
1373					   SCB_GET_CHANNEL(ahd, scb),
1374					   SCB_GET_LUN(scb), SCB_GET_TAG(scb),
1375					   ROLE_INITIATOR, /*status*/0,
1376					   SEARCH_REMOVE);
1377		ahd_outb(ahd, SCB_CONTROL,
1378			 ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE);
1379		break;
1380	}
1381	case TASKMGMT_FUNC_COMPLETE:
1382	{
1383		u_int	scbid;
1384		struct	scb *scb;
1385
1386		scbid = ahd_get_scbptr(ahd);
1387		scb = ahd_lookup_scb(ahd, scbid);
1388		if (scb != NULL) {
1389			u_int	   lun;
1390			u_int	   tag;
1391			cam_status error;
1392
1393			ahd_print_path(ahd, scb);
1394			printf("Task Management Func 0x%x Complete\n",
1395			       scb->hscb->task_management);
1396			lun = CAM_LUN_WILDCARD;
1397			tag = SCB_LIST_NULL;
1398
1399			switch (scb->hscb->task_management) {
1400			case SIU_TASKMGMT_ABORT_TASK:
1401				tag = SCB_GET_TAG(scb);
1402			case SIU_TASKMGMT_ABORT_TASK_SET:
1403			case SIU_TASKMGMT_CLEAR_TASK_SET:
1404				lun = scb->hscb->lun;
1405				error = CAM_REQ_ABORTED;
1406				ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
1407					       'A', lun, tag, ROLE_INITIATOR,
1408					       error);
1409				break;
1410			case SIU_TASKMGMT_LUN_RESET:
1411				lun = scb->hscb->lun;
1412			case SIU_TASKMGMT_TARGET_RESET:
1413			{
1414				struct ahd_devinfo devinfo;
1415
1416				ahd_scb_devinfo(ahd, &devinfo, scb);
1417				error = CAM_BDR_SENT;
1418				ahd_handle_devreset(ahd, &devinfo, lun,
1419						    CAM_BDR_SENT,
1420						    lun != CAM_LUN_WILDCARD
1421						    ? "Lun Reset"
1422						    : "Target Reset",
1423						    /*verbose_level*/0);
1424				break;
1425			}
1426			default:
1427				panic("Unexpected TaskMgmt Func\n");
1428				break;
1429			}
1430		}
1431		break;
1432	}
1433	case TASKMGMT_CMD_CMPLT_OKAY:
1434	{
1435		u_int	scbid;
1436		struct	scb *scb;
1437
1438		/*
1439		 * An ABORT TASK TMF failed to be delivered before
1440		 * the targeted command completed normally.
1441		 */
1442		scbid = ahd_get_scbptr(ahd);
1443		scb = ahd_lookup_scb(ahd, scbid);
1444		if (scb != NULL) {
1445			/*
1446			 * Remove the second instance of this SCB from
1447			 * the QINFIFO if it is still there.
1448			 */
1449			ahd_print_path(ahd, scb);
1450			printf("SCB completes before TMF\n");
1451			/*
1452			 * Handle losing the race.  Wait until any
1453			 * current selection completes.  We will then
1454			 * set the TMF back to zero in this SCB so that
1455			 * the sequencer doesn't bother to issue another
1456			 * sequencer interrupt for its completion.
1457			 */
1458			while ((ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0
1459			    && (ahd_inb(ahd, SSTAT0) & SELDO) == 0
1460			    && (ahd_inb(ahd, SSTAT1) & SELTO) == 0)
1461				;
1462			ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0);
1463			ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
1464					   SCB_GET_CHANNEL(ahd, scb),
1465					   SCB_GET_LUN(scb), SCB_GET_TAG(scb),
1466					   ROLE_INITIATOR, /*status*/0,
1467					   SEARCH_REMOVE);
1468		}
1469		break;
1470	}
1471	case TRACEPOINT0:
1472	case TRACEPOINT1:
1473	case TRACEPOINT2:
1474	case TRACEPOINT3:
1475		printf("%s: Tracepoint %d\n", ahd_name(ahd),
1476		       seqintcode - TRACEPOINT0);
1477		break;
1478	case NO_SEQINT:
1479		break;
1480	case SAW_HWERR:
1481		ahd_handle_hwerrint(ahd);
1482		break;
1483	default:
1484		printf("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd),
1485		       seqintcode);
1486		break;
1487	}
1488	/*
1489	 *  The sequencer is paused immediately on
1490	 *  a SEQINT, so we should restart it when
1491	 *  we're done.
1492	 */
1493	ahd_unpause(ahd);
1494}
1495
1496void
1497ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
1498{
1499	struct scb	*scb;
1500	u_int		 status0;
1501	u_int		 status3;
1502	u_int		 status;
1503	u_int		 lqistat1;
1504	u_int		 lqostat0;
1505	u_int		 scbid;
1506	u_int		 busfreetime;
1507
1508	ahd_update_modes(ahd);
1509	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1510
1511	status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR);
1512	status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO);
1513	status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
1514	lqistat1 = ahd_inb(ahd, LQISTAT1);
1515	lqostat0 = ahd_inb(ahd, LQOSTAT0);
1516	busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME;
1517	if ((status0 & (SELDI|SELDO)) != 0) {
1518		u_int simode0;
1519
1520		ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
1521		simode0 = ahd_inb(ahd, SIMODE0);
1522		status0 &= simode0 & (IOERR|OVERRUN|SELDI|SELDO);
1523		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1524	}
1525	scbid = ahd_get_scbptr(ahd);
1526	scb = ahd_lookup_scb(ahd, scbid);
1527	if (scb != NULL
1528	 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
1529		scb = NULL;
1530
1531	/* Make sure the sequencer is in a safe location. */
1532	ahd_clear_critical_section(ahd);
1533
1534	if ((status0 & IOERR) != 0) {
1535		u_int now_lvd;
1536
1537		now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40;
1538		printf("%s: Transceiver State Has Changed to %s mode\n",
1539		       ahd_name(ahd), now_lvd ? "LVD" : "SE");
1540		ahd_outb(ahd, CLRSINT0, CLRIOERR);
1541		/*
1542		 * A change in I/O mode is equivalent to a bus reset.
1543		 */
1544		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1545		ahd_pause(ahd);
1546		ahd_setup_iocell_workaround(ahd);
1547		ahd_unpause(ahd);
1548	} else if ((status0 & OVERRUN) != 0) {
1549		printf("%s: SCSI offset overrun detected.  Resetting bus.\n",
1550		       ahd_name(ahd));
1551		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1552	} else if ((status & SCSIRSTI) != 0) {
1553		printf("%s: Someone reset channel A\n", ahd_name(ahd));
1554		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE);
1555	} else if ((status & SCSIPERR) != 0) {
1556		ahd_handle_transmission_error(ahd);
1557	} else if (lqostat0 != 0) {
1558		printf("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0);
1559		ahd_outb(ahd, CLRLQOINT0, lqostat0);
1560		if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
1561			ahd_outb(ahd, CLRLQOINT1, 0);
1562		}
1563	} else if ((status & SELTO) != 0) {
1564		u_int  scbid1;
1565
1566		/* Stop the selection */
1567		ahd_outb(ahd, SCSISEQ0, 0);
1568
1569		/* No more pending messages */
1570		ahd_clear_msg_state(ahd);
1571
1572		/* Clear interrupt state */
1573		ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
1574
1575		/*
1576		 * Although the driver does not care about the
1577		 * 'Selection in Progress' status bit, the busy
1578		 * LED does.  SELINGO is only cleared by a sucessfull
1579		 * selection, so we must manually clear it to insure
1580		 * the LED turns off just incase no future successful
1581		 * selections occur (e.g. no devices on the bus).
1582		 */
1583		ahd_outb(ahd, CLRSINT0, CLRSELINGO);
1584
1585		scbid1 = ahd_inw(ahd, WAITING_TID_HEAD);
1586		scb = ahd_lookup_scb(ahd, scbid1);
1587		if (scb == NULL) {
1588			printf("%s: ahd_intr - referenced scb not "
1589			       "valid during SELTO scb(0x%x)\n",
1590			       ahd_name(ahd), scbid1);
1591			ahd_dump_card_state(ahd);
1592		} else {
1593			struct ahd_devinfo devinfo;
1594#ifdef AHD_DEBUG
1595			if ((ahd_debug & AHD_SHOW_SELTO) != 0) {
1596				ahd_print_path(ahd, scb);
1597				printf("Saw Selection Timeout for SCB 0x%x\n",
1598				       scbid1);
1599			}
1600#endif
1601			/*
1602			 * Force a renegotiation with this target just in
1603			 * case the cable was pulled and will later be
1604			 * re-attached.  The target may forget its negotiation
1605			 * settings with us should it attempt to reselect
1606			 * during the interruption.  The target will not issue
1607			 * a unit attention in this case, so we must always
1608			 * renegotiate.
1609			 */
1610			ahd_scb_devinfo(ahd, &devinfo, scb);
1611			ahd_force_renegotiation(ahd, &devinfo);
1612			ahd_set_transaction_status(scb, CAM_SEL_TIMEOUT);
1613			ahd_freeze_devq(ahd, scb);
1614		}
1615		ahd_outb(ahd, CLRINT, CLRSCSIINT);
1616		ahd_iocell_first_selection(ahd);
1617		ahd_unpause(ahd);
1618	} else if ((status0 & (SELDI|SELDO)) != 0) {
1619		ahd_iocell_first_selection(ahd);
1620		ahd_unpause(ahd);
1621	} else if (status3 != 0) {
1622		printf("%s: SCSI Cell parity error SSTAT3 == 0x%x\n",
1623		       ahd_name(ahd), status3);
1624		ahd_outb(ahd, CLRSINT3, status3);
1625	} else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) {
1626		ahd_handle_lqiphase_error(ahd, lqistat1);
1627	} else if ((lqistat1 & LQICRCI_NLQ) != 0) {
1628		/*
1629		 * This status can be delayed during some
1630		 * streaming operations.  The SCSIPHASE
1631		 * handler has already dealt with this case
1632		 * so just clear the error.
1633		 */
1634		ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ);
1635	} else if ((status & BUSFREE) != 0) {
1636		u_int lqostat1;
1637		int   restart;
1638		int   clear_fifo;
1639		int   packetized;
1640		u_int mode;
1641
1642		/*
1643		 * Clear our selection hardware as soon as possible.
1644		 * We may have an entry in the waiting Q for this target,
1645		 * that is affected by this busfree and we don't want to
1646		 * go about selecting the target while we handle the event.
1647		 */
1648		ahd_outb(ahd, SCSISEQ0, 0);
1649
1650		/*
1651		 * Determine what we were up to at the time of
1652		 * the busfree.
1653		 */
1654		mode = AHD_MODE_SCSI;
1655		busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME;
1656		lqostat1 = ahd_inb(ahd, LQOSTAT1);
1657		switch (busfreetime) {
1658		case BUSFREE_DFF0:
1659		case BUSFREE_DFF1:
1660		{
1661			u_int	scbid1;
1662			struct	scb *scb1;
1663
1664			mode = busfreetime == BUSFREE_DFF0
1665			     ? AHD_MODE_DFF0 : AHD_MODE_DFF1;
1666			ahd_set_modes(ahd, mode, mode);
1667			scbid1 = ahd_get_scbptr(ahd);
1668			scb1 = ahd_lookup_scb(ahd, scbid1);
1669			if (scb1 == NULL) {
1670				printf("%s: Invalid SCB %d in DFF%d "
1671				       "during unexpected busfree\n",
1672				       ahd_name(ahd), scbid1, mode);
1673				packetized = 0;
1674			} else
1675				packetized =
1676				    (scb1->flags & SCB_PACKETIZED) != 0;
1677			clear_fifo = 1;
1678			break;
1679		}
1680		case BUSFREE_LQO:
1681			clear_fifo = 0;
1682			packetized = 1;
1683			break;
1684		default:
1685			clear_fifo = 0;
1686			packetized =  (lqostat1 & LQOBUSFREE) != 0;
1687			if (!packetized
1688			 && ahd_inb(ahd, LASTPHASE) == P_BUSFREE)
1689				packetized = 1;
1690			break;
1691		}
1692
1693#ifdef AHD_DEBUG
1694		if ((ahd_debug & AHD_SHOW_MISC) != 0)
1695			printf("Saw Busfree.  Busfreetime = 0x%x.\n",
1696			       busfreetime);
1697#endif
1698		/*
1699		 * Busfrees that occur in non-packetized phases are
1700		 * handled by the nonpkt_busfree handler.
1701		 */
1702		if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) {
1703			restart = ahd_handle_pkt_busfree(ahd, busfreetime);
1704		} else {
1705			packetized = 0;
1706			restart = ahd_handle_nonpkt_busfree(ahd);
1707		}
1708		/*
1709		 * Clear the busfree interrupt status.  The setting of
1710		 * the interrupt is a pulse, so in a perfect world, we
1711		 * would not need to muck with the ENBUSFREE logic.  This
1712		 * would ensure that if the bus moves on to another
1713		 * connection, busfree protection is still in force.  If
1714		 * BUSFREEREV is broken, however, we must manually clear
1715		 * the ENBUSFREE if the busfree occurred during a non-pack
1716		 * connection so that we don't get false positives during
1717		 * future, packetized, connections.
1718		 */
1719		ahd_outb(ahd, CLRSINT1, CLRBUSFREE);
1720		if (packetized == 0
1721		 && (ahd->bugs & AHD_BUSFREEREV_BUG) != 0)
1722			ahd_outb(ahd, SIMODE1,
1723				 ahd_inb(ahd, SIMODE1) & ~ENBUSFREE);
1724
1725		if (clear_fifo)
1726			ahd_clear_fifo(ahd, mode);
1727
1728		ahd_clear_msg_state(ahd);
1729		ahd_outb(ahd, CLRINT, CLRSCSIINT);
1730		if (restart) {
1731			ahd_restart(ahd);
1732		} else {
1733			ahd_unpause(ahd);
1734		}
1735	} else {
1736		printf("%s: Missing case in ahd_handle_scsiint. status = %x\n",
1737		       ahd_name(ahd), status);
1738		ahd_dump_card_state(ahd);
1739		ahd_clear_intstat(ahd);
1740		ahd_unpause(ahd);
1741	}
1742}
1743
1744static void
1745ahd_handle_transmission_error(struct ahd_softc *ahd)
1746{
1747	struct	scb *scb;
1748	u_int	scbid;
1749	u_int	lqistat1;
1750	u_int	lqistat2;
1751	u_int	msg_out;
1752	u_int	curphase;
1753	u_int	lastphase;
1754	u_int	perrdiag;
1755	u_int	cur_col;
1756	int	silent;
1757
1758	scb = NULL;
1759	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1760	lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ);
1761	lqistat2 = ahd_inb(ahd, LQISTAT2);
1762	if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0
1763	 && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) {
1764		u_int lqistate;
1765
1766		ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
1767		lqistate = ahd_inb(ahd, LQISTATE);
1768		if ((lqistate >= 0x1E && lqistate <= 0x24)
1769		 || (lqistate == 0x29)) {
1770#ifdef AHD_DEBUG
1771			if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
1772				printf("%s: NLQCRC found via LQISTATE\n",
1773				       ahd_name(ahd));
1774			}
1775#endif
1776			lqistat1 |= LQICRCI_NLQ;
1777		}
1778		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1779	}
1780
1781	ahd_outb(ahd, CLRLQIINT1, lqistat1);
1782	lastphase = ahd_inb(ahd, LASTPHASE);
1783	curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
1784	perrdiag = ahd_inb(ahd, PERRDIAG);
1785	msg_out = MSG_INITIATOR_DET_ERR;
1786	ahd_outb(ahd, CLRSINT1, CLRSCSIPERR);
1787
1788	/*
1789	 * Try to find the SCB associated with this error.
1790	 */
1791	silent = FALSE;
1792	if (lqistat1 == 0
1793	 || (lqistat1 & LQICRCI_NLQ) != 0) {
1794		if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0)
1795			ahd_set_active_fifo(ahd);
1796		scbid = ahd_get_scbptr(ahd);
1797		scb = ahd_lookup_scb(ahd, scbid);
1798		if (scb != NULL && SCB_IS_SILENT(scb))
1799			silent = TRUE;
1800	}
1801
1802	cur_col = 0;
1803	if (silent == FALSE) {
1804		printf("%s: Transmission error detected\n", ahd_name(ahd));
1805		ahd_lqistat1_print(lqistat1, &cur_col, 50);
1806		ahd_lastphase_print(lastphase, &cur_col, 50);
1807		ahd_scsisigi_print(curphase, &cur_col, 50);
1808		ahd_perrdiag_print(perrdiag, &cur_col, 50);
1809		printf("\n");
1810		ahd_dump_card_state(ahd);
1811	}
1812
1813	if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) {
1814		if (silent == FALSE) {
1815			printf("%s: Gross protocol error during incoming "
1816			       "packet.  lqistat1 == 0x%x.  Resetting bus.\n",
1817			       ahd_name(ahd), lqistat1);
1818		}
1819		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1820		return;
1821	} else if ((lqistat1 & LQICRCI_LQ) != 0) {
1822		/*
1823		 * A CRC error has been detected on an incoming LQ.
1824		 * The bus is currently hung on the last ACK.
1825		 * Hit LQIRETRY to release the last ack, and
1826		 * wait for the sequencer to determine that ATNO
1827		 * is asserted while in message out to take us
1828		 * to our host message loop.  No NONPACKREQ or
1829		 * LQIPHASE type errors will occur in this
1830		 * scenario.  After this first LQIRETRY, the LQI
1831		 * manager will be in ISELO where it will
1832		 * happily sit until another packet phase begins.
1833		 * Unexpected bus free detection is enabled
1834		 * through any phases that occur after we release
1835		 * this last ack until the LQI manager sees a
1836		 * packet phase.  This implies we may have to
1837		 * ignore a perfectly valid "unexected busfree"
1838		 * after our "initiator detected error" message is
1839		 * sent.  A busfree is the expected response after
1840		 * we tell the target that it's L_Q was corrupted.
1841		 * (SPI4R09 10.7.3.3.3)
1842		 */
1843		ahd_outb(ahd, LQCTL2, LQIRETRY);
1844		printf("LQIRetry for LQICRCI_LQ to release ACK\n");
1845	} else if ((lqistat1 & LQICRCI_NLQ) != 0) {
1846		/*
1847		 * We detected a CRC error in a NON-LQ packet.
1848		 * The hardware has varying behavior in this situation
1849		 * depending on whether this packet was part of a
1850		 * stream or not.
1851		 *
1852		 * PKT by PKT mode:
1853		 * The hardware has already acked the complete packet.
1854		 * If the target honors our outstanding ATN condition,
1855		 * we should be (or soon will be) in MSGOUT phase.
1856		 * This will trigger the LQIPHASE_LQ status bit as the
1857		 * hardware was expecting another LQ.  Unexpected
1858		 * busfree detection is enabled.  Once LQIPHASE_LQ is
1859		 * true (first entry into host message loop is much
1860		 * the same), we must clear LQIPHASE_LQ and hit
1861		 * LQIRETRY so the hardware is ready to handle
1862		 * a future LQ.  NONPACKREQ will not be asserted again
1863		 * once we hit LQIRETRY until another packet is
1864		 * processed.  The target may either go busfree
1865		 * or start another packet in response to our message.
1866		 *
1867		 * Read Streaming P0 asserted:
1868		 * If we raise ATN and the target completes the entire
1869		 * stream (P0 asserted during the last packet), the
1870		 * hardware will ack all data and return to the ISTART
1871		 * state.  When the target reponds to our ATN condition,
1872		 * LQIPHASE_LQ will be asserted.  We should respond to
1873		 * this with an LQIRETRY to prepare for any future
1874		 * packets.  NONPACKREQ will not be asserted again
1875		 * once we hit LQIRETRY until another packet is
1876		 * processed.  The target may either go busfree or
1877		 * start another packet in response to our message.
1878		 * Busfree detection is enabled.
1879		 *
1880		 * Read Streaming P0 not asserted:
1881		 * If we raise ATN and the target transitions to
1882		 * MSGOUT in or after a packet where P0 is not
1883		 * asserted, the hardware will assert LQIPHASE_NLQ.
1884		 * We should respond to the LQIPHASE_NLQ with an
1885		 * LQIRETRY.  Should the target stay in a non-pkt
1886		 * phase after we send our message, the hardware
1887		 * will assert LQIPHASE_LQ.  Recovery is then just as
1888		 * listed above for the read streaming with P0 asserted.
1889		 * Busfree detection is enabled.
1890		 */
1891		if (silent == FALSE)
1892			printf("LQICRC_NLQ\n");
1893		if (scb == NULL) {
1894			printf("%s: No SCB valid for LQICRC_NLQ.  "
1895			       "Resetting bus\n", ahd_name(ahd));
1896			ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1897			return;
1898		}
1899	} else if ((lqistat1 & LQIBADLQI) != 0) {
1900		printf("Need to handle BADLQI!\n");
1901		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1902		return;
1903	} else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) {
1904		if ((curphase & ~P_DATAIN_DT) != 0) {
1905			/* Ack the byte.  So we can continue. */
1906			if (silent == FALSE)
1907				printf("Acking %s to clear perror\n",
1908				    ahd_lookup_phase_entry(curphase)->phasemsg);
1909			ahd_inb(ahd, SCSIDAT);
1910		}
1911
1912		if (curphase == P_MESGIN)
1913			msg_out = MSG_PARITY_ERROR;
1914	}
1915
1916	/*
1917	 * We've set the hardware to assert ATN if we
1918	 * get a parity error on "in" phases, so all we
1919	 * need to do is stuff the message buffer with
1920	 * the appropriate message.  "In" phases have set
1921	 * mesg_out to something other than MSG_NOP.
1922	 */
1923	ahd->send_msg_perror = msg_out;
1924	if (scb != NULL && msg_out == MSG_INITIATOR_DET_ERR)
1925		scb->flags |= SCB_TRANSMISSION_ERROR;
1926	ahd_outb(ahd, MSG_OUT, HOST_MSG);
1927	ahd_outb(ahd, CLRINT, CLRSCSIINT);
1928	ahd_unpause(ahd);
1929}
1930
1931static void
1932ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1)
1933{
1934	/*
1935	 * Clear the sources of the interrupts.
1936	 */
1937	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1938	ahd_outb(ahd, CLRLQIINT1, lqistat1);
1939
1940	/*
1941	 * If the "illegal" phase changes were in response
1942	 * to our ATN to flag a CRC error, AND we ended up
1943	 * on packet boundaries, clear the error, restart the
1944	 * LQI manager as appropriate, and go on our merry
1945	 * way toward sending the message.  Otherwise, reset
1946	 * the bus to clear the error.
1947	 */
1948	ahd_set_active_fifo(ahd);
1949	if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0
1950	 && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) {
1951		if ((lqistat1 & LQIPHASE_LQ) != 0) {
1952			printf("LQIRETRY for LQIPHASE_LQ\n");
1953			ahd_outb(ahd, LQCTL2, LQIRETRY);
1954		} else if ((lqistat1 & LQIPHASE_NLQ) != 0) {
1955			printf("LQIRETRY for LQIPHASE_NLQ\n");
1956			ahd_outb(ahd, LQCTL2, LQIRETRY);
1957		} else
1958			panic("ahd_handle_lqiphase_error: No phase errors\n");
1959		ahd_dump_card_state(ahd);
1960		ahd_outb(ahd, CLRINT, CLRSCSIINT);
1961		ahd_unpause(ahd);
1962	} else {
1963		printf("Reseting Channel for LQI Phase error\n");
1964		ahd_dump_card_state(ahd);
1965		ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE);
1966	}
1967}
1968
1969/*
1970 * Packetized unexpected or expected busfree.
1971 * Entered in mode based on busfreetime.
1972 */
1973static int
1974ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime)
1975{
1976	u_int lqostat1;
1977
1978	AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
1979			 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
1980	lqostat1 = ahd_inb(ahd, LQOSTAT1);
1981	if ((lqostat1 & LQOBUSFREE) != 0) {
1982		struct scb *scb;
1983		u_int scbid;
1984		u_int saved_scbptr;
1985		u_int waiting_h;
1986		u_int waiting_t;
1987		u_int next;
1988
1989		if ((busfreetime & BUSFREE_LQO) == 0)
1990			printf("%s: Warning, BUSFREE time is 0x%x.  "
1991			       "Expected BUSFREE_LQO.\n",
1992			       ahd_name(ahd), busfreetime);
1993		/*
1994		 * The LQO manager detected an unexpected busfree
1995		 * either:
1996		 *
1997		 * 1) During an outgoing LQ.
1998		 * 2) After an outgoing LQ but before the first
1999		 *    REQ of the command packet.
2000		 * 3) During an outgoing command packet.
2001		 *
2002		 * In all cases, CURRSCB is pointing to the
2003		 * SCB that encountered the failure.  Clean
2004		 * up the queue, clear SELDO and LQOBUSFREE,
2005		 * and allow the sequencer to restart the select
2006		 * out at its lesure.
2007		 */
2008		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2009		scbid = ahd_inw(ahd, CURRSCB);
2010		scb = ahd_lookup_scb(ahd, scbid);
2011		if (scb == NULL)
2012			panic("SCB not valid during LQOBUSFREE");
2013		/*
2014		 * Clear the status.
2015		 */
2016		ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE);
2017		if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0)
2018			ahd_outb(ahd, CLRLQOINT1, 0);
2019		ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
2020		ahd_flush_device_writes(ahd);
2021		ahd_outb(ahd, CLRSINT0, CLRSELDO);
2022
2023		/*
2024		 * Return the LQO manager to its idle loop.  It will
2025		 * not do this automatically if the busfree occurs
2026		 * after the first REQ of either the LQ or command
2027		 * packet or between the LQ and command packet.
2028		 */
2029		ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE);
2030
2031		/*
2032		 * Update the waiting for selection queue so
2033		 * we restart on the correct SCB.
2034		 */
2035		waiting_h = ahd_inw(ahd, WAITING_TID_HEAD);
2036		saved_scbptr = ahd_get_scbptr(ahd);
2037		if (waiting_h != scbid) {
2038
2039			ahd_outw(ahd, WAITING_TID_HEAD, scbid);
2040			waiting_t = ahd_inw(ahd, WAITING_TID_TAIL);
2041			if (waiting_t == waiting_h) {
2042				ahd_outw(ahd, WAITING_TID_TAIL, scbid);
2043				next = SCB_LIST_NULL;
2044			} else {
2045				ahd_set_scbptr(ahd, waiting_h);
2046				next = ahd_inw_scbram(ahd, SCB_NEXT2);
2047			}
2048			ahd_set_scbptr(ahd, scbid);
2049			ahd_outw(ahd, SCB_NEXT2, next);
2050		}
2051		ahd_set_scbptr(ahd, saved_scbptr);
2052		if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) {
2053			if (SCB_IS_SILENT(scb) == FALSE) {
2054				ahd_print_path(ahd, scb);
2055				printf("Probable outgoing LQ CRC error.  "
2056				       "Retrying command\n");
2057			}
2058			scb->crc_retry_count++;
2059		} else {
2060			ahd_set_transaction_status(scb, CAM_UNCOR_PARITY);
2061			ahd_freeze_scb(scb);
2062			ahd_freeze_devq(ahd, scb);
2063		}
2064		/* Return unpausing the sequencer. */
2065		return (0);
2066	} else if ((ahd_inb(ahd, PERRDIAG) & PARITYERR) != 0) {
2067		/*
2068		 * Ignore what are really parity errors that
2069		 * occur on the last REQ of a free running
2070		 * clock prior to going busfree.  Some drives
2071		 * do not properly active negate just before
2072		 * going busfree resulting in a parity glitch.
2073		 */
2074		ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE);
2075#ifdef AHD_DEBUG
2076		if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0)
2077			printf("%s: Parity on last REQ detected "
2078			       "during busfree phase.\n",
2079			       ahd_name(ahd));
2080#endif
2081		/* Return unpausing the sequencer. */
2082		return (0);
2083	}
2084	if (ahd->src_mode != AHD_MODE_SCSI) {
2085		u_int	scbid;
2086		struct	scb *scb;
2087
2088		scbid = ahd_get_scbptr(ahd);
2089		scb = ahd_lookup_scb(ahd, scbid);
2090		ahd_print_path(ahd, scb);
2091		printf("Unexpected PKT busfree condition\n");
2092		ahd_dump_card_state(ahd);
2093		ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A',
2094			       SCB_GET_LUN(scb), SCB_GET_TAG(scb),
2095			       ROLE_INITIATOR, CAM_UNEXP_BUSFREE);
2096
2097		/* Return restarting the sequencer. */
2098		return (1);
2099	}
2100	printf("%s: Unexpected PKT busfree condition\n", ahd_name(ahd));
2101	ahd_dump_card_state(ahd);
2102	/* Restart the sequencer. */
2103	return (1);
2104}
2105
2106/*
2107 * Non-packetized unexpected or expected busfree.
2108 */
2109static int
2110ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
2111{
2112	struct	ahd_devinfo devinfo;
2113	struct	scb *scb;
2114	u_int	lastphase;
2115	u_int	saved_scsiid;
2116	u_int	saved_lun;
2117	u_int	target;
2118	u_int	initiator_role_id;
2119	u_int	scbid;
2120	u_int	ppr_busfree;
2121	int	printerror;
2122
2123	/*
2124	 * Look at what phase we were last in.  If its message out,
2125	 * chances are pretty good that the busfree was in response
2126	 * to one of our abort requests.
2127	 */
2128	lastphase = ahd_inb(ahd, LASTPHASE);
2129	saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
2130	saved_lun = ahd_inb(ahd, SAVED_LUN);
2131	target = SCSIID_TARGET(ahd, saved_scsiid);
2132	initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
2133	ahd_compile_devinfo(&devinfo, initiator_role_id,
2134			    target, saved_lun, 'A', ROLE_INITIATOR);
2135	printerror = 1;
2136
2137	scbid = ahd_get_scbptr(ahd);
2138	scb = ahd_lookup_scb(ahd, scbid);
2139	if (scb != NULL
2140	 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
2141		scb = NULL;
2142
2143	ppr_busfree = (ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0;
2144	if (lastphase == P_MESGOUT) {
2145		u_int tag;
2146
2147		tag = SCB_LIST_NULL;
2148		if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT_TAG, TRUE)
2149		 || ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT, TRUE)) {
2150			int found;
2151			int sent_msg;
2152
2153			if (scb == NULL) {
2154				ahd_print_devinfo(ahd, &devinfo);
2155				printf("Abort for unidentified "
2156				       "connection completed.\n");
2157				/* restart the sequencer. */
2158				return (1);
2159			}
2160			sent_msg = ahd->msgout_buf[ahd->msgout_index - 1];
2161			ahd_print_path(ahd, scb);
2162			printf("SCB %d - Abort%s Completed.\n",
2163			       SCB_GET_TAG(scb),
2164			       sent_msg == MSG_ABORT_TAG ? "" : " Tag");
2165
2166			if (sent_msg == MSG_ABORT_TAG)
2167				tag = SCB_GET_TAG(scb);
2168
2169			if ((scb->flags & SCB_CMDPHASE_ABORT) != 0) {
2170				/*
2171				 * This abort is in response to an
2172				 * unexpected switch to command phase
2173				 * for a packetized connection.  Since
2174				 * the identify message was never sent,
2175				 * "saved lun" is 0.  We really want to
2176				 * abort only the SCB that encountered
2177				 * this error, which could have a different
2178				 * lun.  The SCB will be retried so the OS
2179				 * will see the UA after renegotiating to
2180				 * packetized.
2181				 */
2182				tag = SCB_GET_TAG(scb);
2183				saved_lun = scb->hscb->lun;
2184			}
2185			found = ahd_abort_scbs(ahd, target, 'A', saved_lun,
2186					       tag, ROLE_INITIATOR,
2187					       CAM_REQ_ABORTED);
2188			printf("found == 0x%x\n", found);
2189			printerror = 0;
2190		} else if (ahd_sent_msg(ahd, AHDMSG_1B,
2191					MSG_BUS_DEV_RESET, TRUE)) {
2192#ifdef __FreeBSD__
2193			/*
2194			 * Don't mark the user's request for this BDR
2195			 * as completing with CAM_BDR_SENT.  CAM3
2196			 * specifies CAM_REQ_CMP.
2197			 */
2198			if (scb != NULL
2199			 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
2200			 && ahd_match_scb(ahd, scb, target, 'A',
2201					  CAM_LUN_WILDCARD, SCB_LIST_NULL,
2202					  ROLE_INITIATOR))
2203				ahd_set_transaction_status(scb, CAM_REQ_CMP);
2204#endif
2205			ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD,
2206					    CAM_BDR_SENT, "Bus Device Reset",
2207					    /*verbose_level*/0);
2208			printerror = 0;
2209		} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, FALSE)
2210			&& ppr_busfree == 0) {
2211			struct ahd_initiator_tinfo *tinfo;
2212			struct ahd_tmode_tstate *tstate;
2213
2214			/*
2215			 * PPR Rejected.  Try non-ppr negotiation
2216			 * and retry command.
2217			 */
2218#ifdef AHD_DEBUG
2219			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2220				printf("PPR negotiation rejected busfree.\n");
2221#endif
2222			tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
2223						    devinfo.our_scsiid,
2224						    devinfo.target, &tstate);
2225			tinfo->curr.transport_version = 2;
2226			tinfo->goal.transport_version = 2;
2227			tinfo->goal.ppr_options = 0;
2228			ahd_qinfifo_requeue_tail(ahd, scb);
2229			printerror = 0;
2230		} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE)
2231			&& ppr_busfree == 0) {
2232			/*
2233			 * Negotiation Rejected.  Go-narrow and
2234			 * retry command.
2235			 */
2236#ifdef AHD_DEBUG
2237			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2238				printf("WDTR Negotiation rejected busfree.\n");
2239#endif
2240			ahd_set_width(ahd, &devinfo,
2241				      MSG_EXT_WDTR_BUS_8_BIT,
2242				      AHD_TRANS_CUR|AHD_TRANS_GOAL,
2243				      /*paused*/TRUE);
2244			ahd_qinfifo_requeue_tail(ahd, scb);
2245			printerror = 0;
2246		} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)
2247			&& ppr_busfree == 0) {
2248			/*
2249			 * Negotiation Rejected.  Go-async and
2250			 * retry command.
2251			 */
2252#ifdef AHD_DEBUG
2253			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2254				printf("SDTR negotiation rejected busfree.\n");
2255#endif
2256			ahd_set_syncrate(ahd, &devinfo,
2257					/*period*/0, /*offset*/0,
2258					/*ppr_options*/0,
2259					AHD_TRANS_CUR|AHD_TRANS_GOAL,
2260					/*paused*/TRUE);
2261			ahd_qinfifo_requeue_tail(ahd, scb);
2262			printerror = 0;
2263		} else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0
2264			&& ahd_sent_msg(ahd, AHDMSG_1B,
2265					 MSG_INITIATOR_DET_ERR, TRUE)) {
2266
2267#ifdef AHD_DEBUG
2268			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2269				printf("Expected IDE Busfree\n");
2270#endif
2271			printerror = 0;
2272		} else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE)
2273			&& ahd_sent_msg(ahd, AHDMSG_1B,
2274					MSG_MESSAGE_REJECT, TRUE)) {
2275
2276#ifdef AHD_DEBUG
2277			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2278				printf("Expected QAS Reject Busfree\n");
2279#endif
2280			printerror = 0;
2281		}
2282	}
2283
2284	/*
2285	 * The busfree required flag is honored at the end of
2286	 * the message phases.  We check it last in case we
2287	 * had to send some other message that caused a busfree.
2288	 */
2289	if (printerror != 0
2290	 && (lastphase == P_MESGIN || lastphase == P_MESGOUT)
2291	 && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) {
2292
2293		ahd_freeze_devq(ahd, scb);
2294		ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
2295		ahd_freeze_scb(scb);
2296		if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) {
2297			ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
2298				       SCB_GET_CHANNEL(ahd, scb),
2299				       SCB_GET_LUN(scb), SCB_LIST_NULL,
2300				       ROLE_INITIATOR, CAM_REQ_ABORTED);
2301		} else {
2302#ifdef AHD_DEBUG
2303			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2304				printf("PPR Negotiation Busfree.\n");
2305#endif
2306			ahd_done(ahd, scb);
2307		}
2308		printerror = 0;
2309	}
2310	if (printerror != 0) {
2311		int aborted;
2312
2313		aborted = 0;
2314		if (scb != NULL) {
2315			u_int tag;
2316
2317			if ((scb->hscb->control & TAG_ENB) != 0)
2318				tag = SCB_GET_TAG(scb);
2319			else
2320				tag = SCB_LIST_NULL;
2321			ahd_print_path(ahd, scb);
2322			aborted = ahd_abort_scbs(ahd, target, 'A',
2323				       SCB_GET_LUN(scb), tag,
2324				       ROLE_INITIATOR,
2325				       CAM_UNEXP_BUSFREE);
2326		} else {
2327			/*
2328			 * We had not fully identified this connection,
2329			 * so we cannot abort anything.
2330			 */
2331			printf("%s: ", ahd_name(ahd));
2332		}
2333		if (lastphase != P_BUSFREE)
2334			ahd_force_renegotiation(ahd, &devinfo);
2335		printf("Unexpected busfree %s, %d SCBs aborted, "
2336		       "PRGMCNT == 0x%x\n",
2337		       ahd_lookup_phase_entry(lastphase)->phasemsg,
2338		       aborted,
2339		       ahd_inb(ahd, PRGMCNT)
2340			| (ahd_inb(ahd, PRGMCNT+1) << 8));
2341		ahd_dump_card_state(ahd);
2342	}
2343	/* Always restart the sequencer. */
2344	return (1);
2345}
2346
2347static void
2348ahd_handle_proto_violation(struct ahd_softc *ahd)
2349{
2350	struct	ahd_devinfo devinfo;
2351	struct	scb *scb;
2352	u_int	scbid;
2353	u_int	seq_flags;
2354	u_int	curphase;
2355	u_int	lastphase;
2356	int	found;
2357
2358	ahd_fetch_devinfo(ahd, &devinfo);
2359	scbid = ahd_get_scbptr(ahd);
2360	scb = ahd_lookup_scb(ahd, scbid);
2361	seq_flags = ahd_inb(ahd, SEQ_FLAGS);
2362	curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
2363	lastphase = ahd_inb(ahd, LASTPHASE);
2364	if ((seq_flags & NOT_IDENTIFIED) != 0) {
2365
2366		/*
2367		 * The reconnecting target either did not send an
2368		 * identify message, or did, but we didn't find an SCB
2369		 * to match.
2370		 */
2371		ahd_print_devinfo(ahd, &devinfo);
2372		printf("Target did not send an IDENTIFY message. "
2373		       "LASTPHASE = 0x%x.\n", lastphase);
2374		scb = NULL;
2375	} else if (scb == NULL) {
2376		/*
2377		 * We don't seem to have an SCB active for this
2378		 * transaction.  Print an error and reset the bus.
2379		 */
2380		ahd_print_devinfo(ahd, &devinfo);
2381		printf("No SCB found during protocol violation\n");
2382		goto proto_violation_reset;
2383	} else {
2384		ahd_set_transaction_status(scb, CAM_SEQUENCE_FAIL);
2385		if ((seq_flags & NO_CDB_SENT) != 0) {
2386			ahd_print_path(ahd, scb);
2387			printf("No or incomplete CDB sent to device.\n");
2388		} else if ((ahd_inb_scbram(ahd, SCB_CONTROL)
2389			  & STATUS_RCVD) == 0) {
2390			/*
2391			 * The target never bothered to provide status to
2392			 * us prior to completing the command.  Since we don't
2393			 * know the disposition of this command, we must attempt
2394			 * to abort it.  Assert ATN and prepare to send an abort
2395			 * message.
2396			 */
2397			ahd_print_path(ahd, scb);
2398			printf("Completed command without status.\n");
2399		} else {
2400			ahd_print_path(ahd, scb);
2401			printf("Unknown protocol violation.\n");
2402			ahd_dump_card_state(ahd);
2403		}
2404	}
2405	if ((lastphase & ~P_DATAIN_DT) == 0
2406	 || lastphase == P_COMMAND) {
2407proto_violation_reset:
2408		/*
2409		 * Target either went directly to data
2410		 * phase or didn't respond to our ATN.
2411		 * The only safe thing to do is to blow
2412		 * it away with a bus reset.
2413		 */
2414		found = ahd_reset_channel(ahd, 'A', TRUE);
2415		printf("%s: Issued Channel %c Bus Reset. "
2416		       "%d SCBs aborted\n", ahd_name(ahd), 'A', found);
2417	} else {
2418		/*
2419		 * Leave the selection hardware off in case
2420		 * this abort attempt will affect yet to
2421		 * be sent commands.
2422		 */
2423		ahd_outb(ahd, SCSISEQ0,
2424			 ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
2425		ahd_assert_atn(ahd);
2426		ahd_outb(ahd, MSG_OUT, HOST_MSG);
2427		if (scb == NULL) {
2428			ahd_print_devinfo(ahd, &devinfo);
2429			ahd->msgout_buf[0] = MSG_ABORT_TASK;
2430			ahd->msgout_len = 1;
2431			ahd->msgout_index = 0;
2432			ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2433		} else {
2434			ahd_print_path(ahd, scb);
2435			scb->flags |= SCB_ABORT;
2436		}
2437		printf("Protocol violation %s.  Attempting to abort.\n",
2438		       ahd_lookup_phase_entry(curphase)->phasemsg);
2439	}
2440}
2441
2442/*
2443 * Force renegotiation to occur the next time we initiate
2444 * a command to the current device.
2445 */
2446static void
2447ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
2448{
2449	struct	ahd_initiator_tinfo *targ_info;
2450	struct	ahd_tmode_tstate *tstate;
2451
2452#ifdef AHD_DEBUG
2453	if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
2454		ahd_print_devinfo(ahd, devinfo);
2455		printf("Forcing renegotiation\n");
2456	}
2457#endif
2458	targ_info = ahd_fetch_transinfo(ahd,
2459					devinfo->channel,
2460					devinfo->our_scsiid,
2461					devinfo->target,
2462					&tstate);
2463	ahd_update_neg_request(ahd, devinfo, tstate,
2464			       targ_info, AHD_NEG_IF_NON_ASYNC);
2465}
2466
2467#define AHD_MAX_STEPS 2000
2468void
2469ahd_clear_critical_section(struct ahd_softc *ahd)
2470{
2471	ahd_mode_state	saved_modes;
2472	int		stepping;
2473	int		steps;
2474	int		first_instr;
2475	u_int		simode0;
2476	u_int		simode1;
2477	u_int		simode3;
2478	u_int		lqimode0;
2479	u_int		lqimode1;
2480	u_int		lqomode0;
2481	u_int		lqomode1;
2482
2483	if (ahd->num_critical_sections == 0)
2484		return;
2485
2486	stepping = FALSE;
2487	steps = 0;
2488	first_instr = 0;
2489	simode0 = 0;
2490	simode1 = 0;
2491	simode3 = 0;
2492	lqimode0 = 0;
2493	lqimode1 = 0;
2494	lqomode0 = 0;
2495	lqomode1 = 0;
2496	saved_modes = ahd_save_modes(ahd);
2497	for (;;) {
2498		struct	cs *cs;
2499		u_int	seqaddr;
2500		u_int	i;
2501
2502		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2503		seqaddr = ahd_inb(ahd, CURADDR)
2504			| (ahd_inb(ahd, CURADDR+1) << 8);
2505
2506		cs = ahd->critical_sections;
2507		for (i = 0; i < ahd->num_critical_sections; i++, cs++) {
2508
2509			if (cs->begin < seqaddr && cs->end >= seqaddr)
2510				break;
2511		}
2512
2513		if (i == ahd->num_critical_sections)
2514			break;
2515
2516		if (steps > AHD_MAX_STEPS) {
2517			printf("%s: Infinite loop in critical section\n"
2518			       "%s: First Instruction 0x%x now 0x%x\n",
2519			       ahd_name(ahd), ahd_name(ahd), first_instr,
2520			       seqaddr);
2521			ahd_dump_card_state(ahd);
2522			panic("critical section loop");
2523		}
2524
2525		steps++;
2526#ifdef AHD_DEBUG
2527		if ((ahd_debug & AHD_SHOW_MISC) != 0)
2528			printf("%s: Single stepping at 0x%x\n", ahd_name(ahd),
2529			       seqaddr);
2530#endif
2531		if (stepping == FALSE) {
2532
2533			first_instr = seqaddr;
2534			ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
2535			simode0 = ahd_inb(ahd, SIMODE0);
2536			simode3 = ahd_inb(ahd, SIMODE3);
2537			lqimode0 = ahd_inb(ahd, LQIMODE0);
2538			lqimode1 = ahd_inb(ahd, LQIMODE1);
2539			lqomode0 = ahd_inb(ahd, LQOMODE0);
2540			lqomode1 = ahd_inb(ahd, LQOMODE1);
2541			ahd_outb(ahd, SIMODE0, 0);
2542			ahd_outb(ahd, SIMODE3, 0);
2543			ahd_outb(ahd, LQIMODE0, 0);
2544			ahd_outb(ahd, LQIMODE1, 0);
2545			ahd_outb(ahd, LQOMODE0, 0);
2546			ahd_outb(ahd, LQOMODE1, 0);
2547			ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2548			simode1 = ahd_inb(ahd, SIMODE1);
2549			/*
2550			 * We don't clear ENBUSFREE.  Unfortunately
2551			 * we cannot re-enable busfree detection within
2552			 * the current connection, so we must leave it
2553			 * on while single stepping.
2554			 */
2555			ahd_outb(ahd, SIMODE1, simode1 & ENBUSFREE);
2556			ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP);
2557			stepping = TRUE;
2558		}
2559		ahd_outb(ahd, CLRSINT1, CLRBUSFREE);
2560		ahd_outb(ahd, CLRINT, CLRSCSIINT);
2561		ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
2562		ahd_outb(ahd, HCNTRL, ahd->unpause);
2563		while (!ahd_is_paused(ahd))
2564			ahd_delay(200);
2565		ahd_update_modes(ahd);
2566	}
2567	if (stepping) {
2568		ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
2569		ahd_outb(ahd, SIMODE0, simode0);
2570		ahd_outb(ahd, SIMODE3, simode3);
2571		ahd_outb(ahd, LQIMODE0, lqimode0);
2572		ahd_outb(ahd, LQIMODE1, lqimode1);
2573		ahd_outb(ahd, LQOMODE0, lqomode0);
2574		ahd_outb(ahd, LQOMODE1, lqomode1);
2575		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
2576		ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP);
2577		ahd_outb(ahd, SIMODE1, simode1);
2578		/*
2579		 * SCSIINT seems to glitch occassionally when
2580		 * the interrupt masks are restored.  Clear SCSIINT
2581		 * one more time so that only persistent errors
2582		 * are seen as a real interrupt.
2583		 */
2584		ahd_outb(ahd, CLRINT, CLRSCSIINT);
2585	}
2586	ahd_restore_modes(ahd, saved_modes);
2587}
2588
2589/*
2590 * Clear any pending interrupt status.
2591 */
2592void
2593ahd_clear_intstat(struct ahd_softc *ahd)
2594{
2595	AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
2596			 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
2597	/* Clear any interrupt conditions this may have caused */
2598	ahd_outb(ahd, CLRLQIINT0, CLRLQIATNQAS|CLRLQICRCT1|CLRLQICRCT2
2599				 |CLRLQIBADLQT|CLRLQIATNLQ|CLRLQIATNCMD);
2600	ahd_outb(ahd, CLRLQIINT1, CLRLQIPHASE_LQ|CLRLQIPHASE_NLQ|CLRLIQABORT
2601				 |CLRLQICRCI_LQ|CLRLQICRCI_NLQ|CLRLQIBADLQI
2602				 |CLRLQIOVERI_LQ|CLRLQIOVERI_NLQ|CLRNONPACKREQ);
2603	ahd_outb(ahd, CLRLQOINT0, CLRLQOTARGSCBPERR|CLRLQOSTOPT2|CLRLQOATNLQ
2604				 |CLRLQOATNPKT|CLRLQOTCRC);
2605	ahd_outb(ahd, CLRLQOINT1, CLRLQOINITSCBPERR|CLRLQOSTOPI2|CLRLQOBADQAS
2606				 |CLRLQOBUSFREE|CLRLQOPHACHGINPKT);
2607	if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) {
2608		ahd_outb(ahd, CLRLQOINT0, 0);
2609		ahd_outb(ahd, CLRLQOINT1, 0);
2610	}
2611	ahd_outb(ahd, CLRSINT3, CLRNTRAMPERR|CLROSRAMPERR);
2612	ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
2613				|CLRBUSFREE|CLRSCSIPERR|CLRREQINIT);
2614	ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO
2615			        |CLRIOERR|CLROVERRUN);
2616	ahd_outb(ahd, CLRINT, CLRSCSIINT);
2617}
2618
2619/**************************** Debugging Routines ******************************/
2620#ifdef AHD_DEBUG
2621uint32_t ahd_debug = AHD_DEBUG_OPTS;
2622#endif
2623void
2624ahd_print_scb(struct scb *scb)
2625{
2626	struct hardware_scb *hscb;
2627	int i;
2628
2629	hscb = scb->hscb;
2630	printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
2631	       (void *)scb,
2632	       hscb->control,
2633	       hscb->scsiid,
2634	       hscb->lun,
2635	       hscb->cdb_len);
2636	printf("Shared Data: ");
2637	for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++)
2638		printf("%#02x", hscb->shared_data.idata.cdb[i]);
2639	printf("        dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n",
2640	       (uint32_t)((ahd_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF),
2641	       (uint32_t)(ahd_le64toh(hscb->dataptr) & 0xFFFFFFFF),
2642	       ahd_le32toh(hscb->datacnt),
2643	       ahd_le32toh(hscb->sgptr),
2644	       SCB_GET_TAG(scb));
2645	ahd_dump_sglist(scb);
2646}
2647
2648void
2649ahd_dump_sglist(struct scb *scb)
2650{
2651	int i;
2652
2653	if (scb->sg_count > 0) {
2654		if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) {
2655			struct ahd_dma64_seg *sg_list;
2656
2657			sg_list = (struct ahd_dma64_seg*)scb->sg_list;
2658			for (i = 0; i < scb->sg_count; i++) {
2659				uint64_t addr;
2660				uint32_t len;
2661
2662				addr = ahd_le64toh(sg_list[i].addr);
2663				len = ahd_le32toh(sg_list[i].len);
2664				printf("sg[%d] - Addr 0x%x%x : Length %d%s\n",
2665				       i,
2666				       (uint32_t)((addr >> 32) & 0xFFFFFFFF),
2667				       (uint32_t)(addr & 0xFFFFFFFF),
2668				       sg_list[i].len & AHD_SG_LEN_MASK,
2669				       (sg_list[i].len & AHD_DMA_LAST_SEG)
2670				     ? " Last" : "");
2671			}
2672		} else {
2673			struct ahd_dma_seg *sg_list;
2674
2675			sg_list = (struct ahd_dma_seg*)scb->sg_list;
2676			for (i = 0; i < scb->sg_count; i++) {
2677				uint32_t len;
2678
2679				len = ahd_le32toh(sg_list[i].len);
2680				printf("sg[%d] - Addr 0x%x%x : Length %d%s\n",
2681				       i,
2682				       (len & AHD_SG_HIGH_ADDR_MASK) >> 24,
2683				       ahd_le32toh(sg_list[i].addr),
2684				       len & AHD_SG_LEN_MASK,
2685				       len & AHD_DMA_LAST_SEG ? " Last" : "");
2686			}
2687		}
2688	}
2689}
2690
2691/************************* Transfer Negotiation *******************************/
2692/*
2693 * Allocate per target mode instance (ID we respond to as a target)
2694 * transfer negotiation data structures.
2695 */
2696static struct ahd_tmode_tstate *
2697ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel)
2698{
2699	struct ahd_tmode_tstate *master_tstate;
2700	struct ahd_tmode_tstate *tstate;
2701	int i;
2702
2703	master_tstate = ahd->enabled_targets[ahd->our_id];
2704	if (ahd->enabled_targets[scsi_id] != NULL
2705	 && ahd->enabled_targets[scsi_id] != master_tstate)
2706		panic("%s: ahd_alloc_tstate - Target already allocated",
2707		      ahd_name(ahd));
2708	tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT | M_ZERO);
2709	if (tstate == NULL)
2710		return (NULL);
2711
2712	/*
2713	 * If we have allocated a master tstate, copy user settings from
2714	 * the master tstate (taken from SRAM or the EEPROM) for this
2715	 * channel, but reset our current and goal settings to async/narrow
2716	 * until an initiator talks to us.
2717	 */
2718	if (master_tstate != NULL) {
2719		memcpy(tstate, master_tstate, sizeof(*tstate));
2720		memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
2721		for (i = 0; i < 16; i++) {
2722			memset(&tstate->transinfo[i].curr, 0,
2723			      sizeof(tstate->transinfo[i].curr));
2724			memset(&tstate->transinfo[i].goal, 0,
2725			      sizeof(tstate->transinfo[i].goal));
2726		}
2727	} else
2728		memset(tstate, 0, sizeof(*tstate));
2729	ahd->enabled_targets[scsi_id] = tstate;
2730	return (tstate);
2731}
2732
2733#ifdef AHD_TARGET_MODE
2734/*
2735 * Free per target mode instance (ID we respond to as a target)
2736 * transfer negotiation data structures.
2737 */
2738static void
2739ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force)
2740{
2741	struct ahd_tmode_tstate *tstate;
2742
2743	/*
2744	 * Don't clean up our "master" tstate.
2745	 * It has our default user settings.
2746	 */
2747	if (scsi_id == ahd->our_id
2748	 && force == FALSE)
2749		return;
2750
2751	tstate = ahd->enabled_targets[scsi_id];
2752	if (tstate != NULL)
2753		free(tstate, M_DEVBUF);
2754	ahd->enabled_targets[scsi_id] = NULL;
2755}
2756#endif
2757
2758/*
2759 * Called when we have an active connection to a target on the bus,
2760 * this function finds the nearest period to the input period limited
2761 * by the capabilities of the bus connectivity of and sync settings for
2762 * the target.
2763 */
2764void
2765ahd_devlimited_syncrate(struct ahd_softc *ahd,
2766			struct ahd_initiator_tinfo *tinfo,
2767			u_int *period, u_int *ppr_options, role_t role)
2768{
2769	struct	ahd_transinfo *transinfo;
2770	u_int	maxsync;
2771
2772	if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0
2773	 && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) {
2774		maxsync = AHD_SYNCRATE_PACED;
2775	} else {
2776		maxsync = AHD_SYNCRATE_ULTRA;
2777		/* Can't do DT related options on an SE bus */
2778		*ppr_options &= MSG_EXT_PPR_QAS_REQ;
2779	}
2780	/*
2781	 * Never allow a value higher than our current goal
2782	 * period otherwise we may allow a target initiated
2783	 * negotiation to go above the limit as set by the
2784	 * user.  In the case of an initiator initiated
2785	 * sync negotiation, we limit based on the user
2786	 * setting.  This allows the system to still accept
2787	 * incoming negotiations even if target initiated
2788	 * negotiation is not performed.
2789	 */
2790	if (role == ROLE_TARGET)
2791		transinfo = &tinfo->user;
2792	else
2793		transinfo = &tinfo->goal;
2794	*ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN);
2795	if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
2796		maxsync = MAX(maxsync, AHD_SYNCRATE_ULTRA2);
2797		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
2798	}
2799	if (transinfo->period == 0) {
2800		*period = 0;
2801		*ppr_options = 0;
2802	} else {
2803		*period = MAX(*period, transinfo->period);
2804		ahd_find_syncrate(ahd, period, ppr_options, maxsync);
2805	}
2806}
2807
2808/*
2809 * Look up the valid period to SCSIRATE conversion in our table.
2810 * Return the period and offset that should be sent to the target
2811 * if this was the beginning of an SDTR.
2812 */
2813void
2814ahd_find_syncrate(struct ahd_softc *ahd, u_int *period,
2815		  u_int *ppr_options, u_int maxsync)
2816{
2817	if (*period < maxsync)
2818		*period = maxsync;
2819
2820	if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0
2821	 && *period > AHD_SYNCRATE_MIN_DT)
2822		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
2823
2824	if (*period > AHD_SYNCRATE_MIN)
2825		*period = 0;
2826
2827	/* Honor PPR option conformance rules. */
2828	if (*period > AHD_SYNCRATE_PACED)
2829		*ppr_options &= ~MSG_EXT_PPR_RTI;
2830
2831	if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0)
2832		*ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ);
2833
2834	if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0)
2835		*ppr_options &= MSG_EXT_PPR_QAS_REQ;
2836
2837	/* Skip all PACED only entries if IU is not available */
2838	if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0
2839	 && *period < AHD_SYNCRATE_DT)
2840		*period = AHD_SYNCRATE_DT;
2841
2842	/* Skip all DT only entries if DT is not available */
2843	if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
2844	 && *period < AHD_SYNCRATE_ULTRA2)
2845		*period = AHD_SYNCRATE_ULTRA2;
2846}
2847
2848/*
2849 * Truncate the given synchronous offset to a value the
2850 * current adapter type and syncrate are capable of.
2851 */
2852void
2853ahd_validate_offset(struct ahd_softc *ahd,
2854		    struct ahd_initiator_tinfo *tinfo,
2855		    u_int period, u_int *offset, int wide,
2856		    role_t role)
2857{
2858	u_int maxoffset;
2859
2860	/* Limit offset to what we can do */
2861	if (period == 0)
2862		maxoffset = 0;
2863	else if (period <= AHD_SYNCRATE_PACED) {
2864		if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0)
2865			maxoffset = MAX_OFFSET_PACED_BUG;
2866		else
2867			maxoffset = MAX_OFFSET_PACED;
2868	} else
2869		maxoffset = MAX_OFFSET_NON_PACED;
2870	*offset = MIN(*offset, maxoffset);
2871	if (tinfo != NULL) {
2872		if (role == ROLE_TARGET)
2873			*offset = MIN(*offset, tinfo->user.offset);
2874		else
2875			*offset = MIN(*offset, tinfo->goal.offset);
2876	}
2877}
2878
2879/*
2880 * Truncate the given transfer width parameter to a value the
2881 * current adapter type is capable of.
2882 */
2883void
2884ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo,
2885		   u_int *bus_width, role_t role)
2886{
2887	switch (*bus_width) {
2888	default:
2889		if (ahd->features & AHD_WIDE) {
2890			/* Respond Wide */
2891			*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2892			break;
2893		}
2894		/* FALLTHROUGH */
2895	case MSG_EXT_WDTR_BUS_8_BIT:
2896		*bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2897		break;
2898	}
2899	if (tinfo != NULL) {
2900		if (role == ROLE_TARGET)
2901			*bus_width = MIN(tinfo->user.width, *bus_width);
2902		else
2903			*bus_width = MIN(tinfo->goal.width, *bus_width);
2904	}
2905}
2906
2907/*
2908 * Update the bitmask of targets for which the controller should
2909 * negotiate with at the next convenient opportunity.  This currently
2910 * means the next time we send the initial identify messages for
2911 * a new transaction.
2912 */
2913int
2914ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
2915		       struct ahd_tmode_tstate *tstate,
2916		       struct ahd_initiator_tinfo *tinfo, ahd_neg_type neg_type)
2917{
2918	u_int auto_negotiate_orig;
2919
2920	auto_negotiate_orig = tstate->auto_negotiate;
2921	if (neg_type == AHD_NEG_ALWAYS) {
2922		/*
2923		 * Force our "current" settings to be
2924		 * unknown so that unless a bus reset
2925		 * occurs the need to renegotiate is
2926		 * recorded persistently.
2927		 */
2928		if ((ahd->features & AHD_WIDE) != 0)
2929			tinfo->curr.width = AHD_WIDTH_UNKNOWN;
2930		tinfo->curr.period = AHD_PERIOD_UNKNOWN;
2931		tinfo->curr.offset = AHD_OFFSET_UNKNOWN;
2932	}
2933	if (tinfo->curr.period != tinfo->goal.period
2934	 || tinfo->curr.width != tinfo->goal.width
2935	 || tinfo->curr.offset != tinfo->goal.offset
2936	 || tinfo->curr.ppr_options != tinfo->goal.ppr_options
2937	 || (neg_type == AHD_NEG_IF_NON_ASYNC
2938	  && (tinfo->goal.offset != 0
2939	   || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
2940	   || tinfo->goal.ppr_options != 0)))
2941		tstate->auto_negotiate |= devinfo->target_mask;
2942	else
2943		tstate->auto_negotiate &= ~devinfo->target_mask;
2944
2945	return (auto_negotiate_orig != tstate->auto_negotiate);
2946}
2947
2948/*
2949 * Update the user/goal/curr tables of synchronous negotiation
2950 * parameters as well as, in the case of a current or active update,
2951 * any data structures on the host controller.  In the case of an
2952 * active update, the specified target is currently talking to us on
2953 * the bus, so the transfer parameter update must take effect
2954 * immediately.
2955 */
2956void
2957ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
2958		 u_int period, u_int offset, u_int ppr_options,
2959		 u_int type, int paused)
2960{
2961	struct	ahd_initiator_tinfo *tinfo;
2962	struct	ahd_tmode_tstate *tstate;
2963	u_int	old_period;
2964	u_int	old_offset;
2965	u_int	old_ppr;
2966	int	active;
2967	int	update_needed;
2968
2969	active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE;
2970	update_needed = 0;
2971
2972	if (period == 0 || offset == 0) {
2973		period = 0;
2974		offset = 0;
2975	}
2976
2977	tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
2978				    devinfo->target, &tstate);
2979
2980	if ((type & AHD_TRANS_USER) != 0) {
2981		tinfo->user.period = period;
2982		tinfo->user.offset = offset;
2983		tinfo->user.ppr_options = ppr_options;
2984	}
2985
2986	if ((type & AHD_TRANS_GOAL) != 0) {
2987		tinfo->goal.period = period;
2988		tinfo->goal.offset = offset;
2989		tinfo->goal.ppr_options = ppr_options;
2990	}
2991
2992	old_period = tinfo->curr.period;
2993	old_offset = tinfo->curr.offset;
2994	old_ppr    = tinfo->curr.ppr_options;
2995
2996	if ((type & AHD_TRANS_CUR) != 0
2997	 && (old_period != period
2998	  || old_offset != offset
2999	  || old_ppr != ppr_options)) {
3000
3001		update_needed++;
3002
3003		tinfo->curr.period = period;
3004		tinfo->curr.offset = offset;
3005		tinfo->curr.ppr_options = ppr_options;
3006
3007		ahd_send_async(ahd, devinfo->channel, devinfo->target,
3008			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
3009
3010		if (bootverbose) {
3011			if (offset != 0) {
3012				int options;
3013
3014				printf("%s: target %d synchronous with "
3015				       "period = 0x%x, offset = 0x%x",
3016				       ahd_name(ahd), devinfo->target,
3017				       period, offset);
3018				options = 0;
3019				if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) {
3020					printf("(RDSTRM");
3021					options++;
3022				}
3023				if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
3024					printf("%s", options ? "|DT" : "(DT");
3025					options++;
3026				}
3027				if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
3028					printf("%s", options ? "|IU" : "(IU");
3029					options++;
3030				}
3031				if ((ppr_options & MSG_EXT_PPR_RTI) != 0) {
3032					printf("%s", options ? "|RTI" : "(RTI");
3033					options++;
3034				}
3035				if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) {
3036					printf("%s", options ? "|QAS" : "(QAS");
3037					options++;
3038				}
3039				if (options != 0)
3040					printf(")\n");
3041				else
3042					printf("\n");
3043			} else {
3044				printf("%s: target %d using "
3045				       "asynchronous transfers%s\n",
3046				       ahd_name(ahd), devinfo->target,
3047				       (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0
3048				     ?  "(QAS)" : "");
3049			}
3050		}
3051	}
3052	/*
3053	 * Always refresh the neg-table to handle the case of the
3054	 * sequencer setting the ENATNO bit for a MK_MESSAGE request.
3055	 * We will always renegotiate in that case if this is a
3056	 * packetized request.  Also manage the busfree expected flag
3057	 * from this common routine so that we catch changes due to
3058	 * WDTR or SDTR messages.
3059	 */
3060	if ((type & AHD_TRANS_CUR) != 0) {
3061		if (!paused)
3062			ahd_pause(ahd);
3063		ahd_update_neg_table(ahd, devinfo, &tinfo->curr);
3064		if (!paused)
3065			ahd_unpause(ahd);
3066		if (ahd->msg_type != MSG_TYPE_NONE) {
3067			if ((old_ppr & MSG_EXT_PPR_IU_REQ)
3068			 != (ppr_options & MSG_EXT_PPR_IU_REQ)) {
3069#ifdef AHD_DEBUG
3070				if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3071					ahd_print_devinfo(ahd, devinfo);
3072					printf("Expecting IU Change busfree\n");
3073				}
3074#endif
3075				ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE
3076					       |  MSG_FLAG_IU_REQ_CHANGED;
3077			}
3078			if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) {
3079#ifdef AHD_DEBUG
3080				if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3081					printf("PPR with IU_REQ outstanding\n");
3082#endif
3083				ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE;
3084			}
3085		}
3086	}
3087
3088	update_needed += ahd_update_neg_request(ahd, devinfo, tstate,
3089						tinfo, AHD_NEG_TO_GOAL);
3090
3091	if (update_needed && active)
3092		ahd_update_pending_scbs(ahd);
3093}
3094
3095/*
3096 * Update the user/goal/curr tables of wide negotiation
3097 * parameters as well as, in the case of a current or active update,
3098 * any data structures on the host controller.  In the case of an
3099 * active update, the specified target is currently talking to us on
3100 * the bus, so the transfer parameter update must take effect
3101 * immediately.
3102 */
3103void
3104ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3105	      u_int width, u_int type, int paused)
3106{
3107	struct	ahd_initiator_tinfo *tinfo;
3108	struct	ahd_tmode_tstate *tstate;
3109	u_int	oldwidth;
3110	int	active;
3111	int	update_needed;
3112
3113	active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE;
3114	update_needed = 0;
3115	tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
3116				    devinfo->target, &tstate);
3117
3118	if ((type & AHD_TRANS_USER) != 0)
3119		tinfo->user.width = width;
3120
3121	if ((type & AHD_TRANS_GOAL) != 0)
3122		tinfo->goal.width = width;
3123
3124	oldwidth = tinfo->curr.width;
3125	if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) {
3126
3127		update_needed++;
3128
3129		tinfo->curr.width = width;
3130		ahd_send_async(ahd, devinfo->channel, devinfo->target,
3131			       CAM_LUN_WILDCARD, AC_TRANSFER_NEG, NULL);
3132
3133		if (bootverbose) {
3134			printf("%s: target %d using %dbit transfers\n",
3135			       ahd_name(ahd), devinfo->target,
3136			       8 * (0x01 << width));
3137		}
3138	}
3139
3140	if ((type & AHD_TRANS_CUR) != 0) {
3141		if (!paused)
3142			ahd_pause(ahd);
3143		ahd_update_neg_table(ahd, devinfo, &tinfo->curr);
3144		if (!paused)
3145			ahd_unpause(ahd);
3146	}
3147
3148	update_needed += ahd_update_neg_request(ahd, devinfo, tstate,
3149						tinfo, AHD_NEG_TO_GOAL);
3150	if (update_needed && active)
3151		ahd_update_pending_scbs(ahd);
3152
3153}
3154
3155/*
3156 * Update the current state of tagged queuing for a given target.
3157 */
3158void
3159ahd_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3160	   ahd_queue_alg alg)
3161{
3162	ahd_platform_set_tags(ahd, devinfo, alg);
3163	ahd_send_async(ahd, devinfo->channel, devinfo->target,
3164		       devinfo->lun, AC_TRANSFER_NEG, &alg);
3165}
3166
3167static void
3168ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3169		     struct ahd_transinfo *tinfo)
3170{
3171	ahd_mode_state	saved_modes;
3172	u_int		period;
3173	u_int		ppr_opts;
3174	u_int		con_opts;
3175	u_int		offset;
3176	u_int		saved_negoaddr;
3177	uint8_t		iocell_opts[sizeof(ahd->iocell_opts)];
3178
3179	saved_modes = ahd_save_modes(ahd);
3180	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
3181
3182	saved_negoaddr = ahd_inb(ahd, NEGOADDR);
3183	ahd_outb(ahd, NEGOADDR, devinfo->target);
3184	period = tinfo->period;
3185	offset = tinfo->offset;
3186	memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts));
3187	ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ
3188					|MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI);
3189	con_opts = 0;
3190	if (period == 0)
3191		period = AHD_SYNCRATE_ASYNC;
3192	if (period == AHD_SYNCRATE_160) {
3193
3194		if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) {
3195			/*
3196			 * When the SPI4 spec was finalized, PACE transfers
3197			 * was not made a configurable option in the PPR
3198			 * message.  Instead it is assumed to be enabled for
3199			 * any syncrate faster than 80MHz.  Nevertheless,
3200			 * Harpoon2A4 allows this to be configurable.
3201			 *
3202			 * Harpoon2A4 also assumes at most 2 data bytes per
3203			 * negotiated REQ/ACK offset.  Paced transfers take
3204			 * 4, so we must adjust our offset.
3205			 */
3206			ppr_opts |= PPROPT_PACE;
3207			offset *= 2;
3208
3209			/*
3210			 * Harpoon2A assumed that there would be a
3211			 * fallback rate between 160 MHz and 80 MHz,
3212			 * so 7 is used as the period factor rather
3213			 * than 8 for 160MHz.
3214			 */
3215			period = AHD_SYNCRATE_REVA_160;
3216		}
3217		if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) == 0)
3218			iocell_opts[AHD_PRECOMP_SLEW_INDEX] &=
3219			    ~AHD_PRECOMP_MASK;
3220	} else {
3221		/*
3222		 * Precomp should be disabled for non-paced transfers.
3223		 */
3224		iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK;
3225
3226		if ((ahd->features & AHD_NEW_IOCELL_OPTS) != 0
3227		 && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0) {
3228			/*
3229			 * Slow down our CRC interval to be
3230			 * compatible with devices that can't
3231			 * handle a CRC at full speed.
3232			 */
3233			con_opts |= ENSLOWCRC;
3234		}
3235	}
3236
3237	ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP_SLEW);
3238	ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_PRECOMP_SLEW_INDEX]);
3239	ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_AMPLITUDE);
3240	ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_AMPLITUDE_INDEX]);
3241
3242	ahd_outb(ahd, NEGPERIOD, period);
3243	ahd_outb(ahd, NEGPPROPTS, ppr_opts);
3244	ahd_outb(ahd, NEGOFFSET, offset);
3245
3246	if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT)
3247		con_opts |= WIDEXFER;
3248
3249	/*
3250	 * During packetized transfers, the target will
3251	 * give us the opportunity to send command packets
3252	 * without us asserting attention.
3253	 */
3254	if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0)
3255		con_opts |= ENAUTOATNO;
3256	ahd_outb(ahd, NEGCONOPTS, con_opts);
3257	ahd_outb(ahd, NEGOADDR, saved_negoaddr);
3258	ahd_restore_modes(ahd, saved_modes);
3259}
3260
3261/*
3262 * When the transfer settings for a connection change, setup for
3263 * negotiation in pending SCBs to effect the change as quickly as
3264 * possible.  We also cancel any negotiations that are scheduled
3265 * for inflight SCBs that have not been started yet.
3266 */
3267static void
3268ahd_update_pending_scbs(struct ahd_softc *ahd)
3269{
3270	struct		scb *pending_scb;
3271	int		pending_scb_count;
3272	u_int		scb_tag;
3273	int		paused;
3274	u_int		saved_scbptr;
3275	ahd_mode_state	saved_modes;
3276
3277	/*
3278	 * Traverse the pending SCB list and ensure that all of the
3279	 * SCBs there have the proper settings.  We can only safely
3280	 * clear the negotiation required flag (setting requires the
3281	 * execution queue to be modified) and this is only possible
3282	 * if we are not already attempting to select out for this
3283	 * SCB.  For this reason, all callers only call this routine
3284	 * if we are changing the negotiation settings for the currently
3285	 * active transaction on the bus.
3286	 */
3287	pending_scb_count = 0;
3288	LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
3289		struct ahd_devinfo devinfo;
3290		struct hardware_scb *pending_hscb;
3291		struct ahd_initiator_tinfo *tinfo;
3292		struct ahd_tmode_tstate *tstate;
3293
3294		ahd_scb_devinfo(ahd, &devinfo, pending_scb);
3295		tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
3296					    devinfo.our_scsiid,
3297					    devinfo.target, &tstate);
3298		pending_hscb = pending_scb->hscb;
3299		if ((tstate->auto_negotiate & devinfo.target_mask) == 0
3300		 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
3301			pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
3302			pending_hscb->control &= ~MK_MESSAGE;
3303		}
3304		ahd_sync_scb(ahd, pending_scb,
3305			     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3306		pending_scb_count++;
3307	}
3308
3309	if (pending_scb_count == 0)
3310		return;
3311
3312	if (ahd_is_paused(ahd)) {
3313		paused = 1;
3314	} else {
3315		paused = 0;
3316		ahd_pause(ahd);
3317	}
3318
3319	/*
3320	 * Force the sequencer to reinitialize the selection for
3321	 * the command at the head of the execution queue if it
3322	 * has already been setup.  The negotiation changes may
3323	 * effect whether we select-out with ATN.
3324	 */
3325	saved_modes = ahd_save_modes(ahd);
3326	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
3327	ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
3328	saved_scbptr = ahd_get_scbptr(ahd);
3329	/* Ensure that the hscbs down on the card match the new information */
3330	for (scb_tag = 0; scb_tag < ahd->scb_data.maxhscbs; scb_tag++) {
3331		struct	hardware_scb *pending_hscb;
3332		u_int	control;
3333
3334		pending_scb = ahd_lookup_scb(ahd, scb_tag);
3335		if (pending_scb == NULL)
3336			continue;
3337		ahd_set_scbptr(ahd, scb_tag);
3338		pending_hscb = pending_scb->hscb;
3339		control = ahd_inb_scbram(ahd, SCB_CONTROL);
3340		control &= ~MK_MESSAGE;
3341		control |= pending_hscb->control & MK_MESSAGE;
3342		ahd_outb(ahd, SCB_CONTROL, control);
3343	}
3344	ahd_set_scbptr(ahd, saved_scbptr);
3345	ahd_restore_modes(ahd, saved_modes);
3346
3347	if (paused == 0)
3348		ahd_unpause(ahd);
3349}
3350
3351/**************************** Pathing Information *****************************/
3352static void
3353ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3354{
3355	ahd_mode_state	saved_modes;
3356	u_int		saved_scsiid;
3357	role_t		role;
3358	int		our_id;
3359
3360	saved_modes = ahd_save_modes(ahd);
3361	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
3362
3363	if (ahd_inb(ahd, SSTAT0) & TARGET)
3364		role = ROLE_TARGET;
3365	else
3366		role = ROLE_INITIATOR;
3367
3368	if (role == ROLE_TARGET
3369	 && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) {
3370		/* We were selected, so pull our id from TARGIDIN */
3371		our_id = ahd_inb(ahd, TARGIDIN) & OID;
3372	} else if (role == ROLE_TARGET)
3373		our_id = ahd_inb(ahd, TOWNID);
3374	else
3375		our_id = ahd_inb(ahd, IOWNID);
3376
3377	saved_scsiid = ahd_inb(ahd, SAVED_SCSIID);
3378	ahd_compile_devinfo(devinfo,
3379			    our_id,
3380			    SCSIID_TARGET(ahd, saved_scsiid),
3381			    ahd_inb(ahd, SAVED_LUN),
3382			    SCSIID_CHANNEL(ahd, saved_scsiid),
3383			    role);
3384	ahd_restore_modes(ahd, saved_modes);
3385}
3386
3387void
3388ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3389{
3390	printf("%s:%c:%d:%d: (0x%x) ", ahd_name(ahd), 'A',
3391	       devinfo->target, devinfo->lun, ahd_get_scbptr(ahd));
3392}
3393
3394struct ahd_phase_table_entry*
3395ahd_lookup_phase_entry(int phase)
3396{
3397	struct ahd_phase_table_entry *entry;
3398	struct ahd_phase_table_entry *last_entry;
3399
3400	/*
3401	 * num_phases doesn't include the default entry which
3402	 * will be returned if the phase doesn't match.
3403	 */
3404	last_entry = &ahd_phase_table[num_phases];
3405	for (entry = ahd_phase_table; entry < last_entry; entry++) {
3406		if (phase == entry->phase)
3407			break;
3408	}
3409	return (entry);
3410}
3411
3412void
3413ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target,
3414		    u_int lun, char channel, role_t role)
3415{
3416	devinfo->our_scsiid = our_id;
3417	devinfo->target = target;
3418	devinfo->lun = lun;
3419	devinfo->target_offset = target;
3420	devinfo->channel = channel;
3421	devinfo->role = role;
3422	if (channel == 'B')
3423		devinfo->target_offset += 8;
3424	devinfo->target_mask = (0x01 << devinfo->target_offset);
3425}
3426
3427static void
3428ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3429		struct scb *scb)
3430{
3431	role_t	role;
3432	int	our_id;
3433
3434	our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
3435	role = ROLE_INITIATOR;
3436	if ((scb->hscb->control & TARGET_SCB) != 0)
3437		role = ROLE_TARGET;
3438	ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb),
3439			    SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role);
3440}
3441
3442
3443/************************ Message Phase Processing ****************************/
3444/*
3445 * When an initiator transaction with the MK_MESSAGE flag either reconnects
3446 * or enters the initial message out phase, we are interrupted.  Fill our
3447 * outgoing message buffer with the appropriate message and begin handing
3448 * the message phase(s) manually.
3449 */
3450static void
3451ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3452			   struct scb *scb)
3453{
3454	/*
3455	 * To facilitate adding multiple messages together,
3456	 * each routine should increment the index and len
3457	 * variables instead of setting them explicitly.
3458	 */
3459	ahd->msgout_index = 0;
3460	ahd->msgout_len = 0;
3461
3462	if (ahd_currently_packetized(ahd))
3463		ahd->msg_flags |= MSG_FLAG_PACKETIZED;
3464
3465	if (ahd->send_msg_perror
3466	 && ahd_inb(ahd, MSG_OUT) == HOST_MSG) {
3467		ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror;
3468		ahd->msgout_len++;
3469		ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3470#ifdef AHD_DEBUG
3471		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3472			printf("Setting up for Parity Error delivery\n");
3473#endif
3474		return;
3475	} else if (scb == NULL) {
3476		printf("%s: WARNING. No pending message for "
3477		       "I_T msgin.  Issuing NO-OP\n", ahd_name(ahd));
3478		ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP;
3479		ahd->msgout_len++;
3480		ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3481		return;
3482	}
3483
3484	if ((scb->flags & SCB_DEVICE_RESET) == 0
3485	 && (scb->flags & SCB_PACKETIZED) == 0
3486	 && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) {
3487		u_int identify_msg;
3488
3489		identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
3490		if ((scb->hscb->control & DISCENB) != 0)
3491			identify_msg |= MSG_IDENTIFY_DISCFLAG;
3492		ahd->msgout_buf[ahd->msgout_index++] = identify_msg;
3493		ahd->msgout_len++;
3494
3495		if ((scb->hscb->control & TAG_ENB) != 0) {
3496			ahd->msgout_buf[ahd->msgout_index++] =
3497			    scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
3498			ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb);
3499			ahd->msgout_len += 2;
3500		}
3501	}
3502
3503	if (scb->flags & SCB_DEVICE_RESET) {
3504		ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET;
3505		ahd->msgout_len++;
3506		ahd_print_path(ahd, scb);
3507		printf("Bus Device Reset Message Sent\n");
3508		/*
3509		 * Clear our selection hardware in advance of
3510		 * the busfree.  We may have an entry in the waiting
3511		 * Q for this target, and we don't want to go about
3512		 * selecting while we handle the busfree and blow it
3513		 * away.
3514		 */
3515		ahd_outb(ahd, SCSISEQ0, 0);
3516	} else if ((scb->flags & SCB_ABORT) != 0) {
3517
3518		if ((scb->hscb->control & TAG_ENB) != 0) {
3519			ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT_TAG;
3520		} else {
3521			ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT;
3522		}
3523		ahd->msgout_len++;
3524		ahd_print_path(ahd, scb);
3525		printf("Abort%s Message Sent\n",
3526		       (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
3527		/*
3528		 * Clear our selection hardware in advance of
3529		 * the busfree.  We may have an entry in the waiting
3530		 * Q for this target, and we don't want to go about
3531		 * selecting while we handle the busfree and blow it
3532		 * away.
3533		 */
3534		ahd_outb(ahd, SCSISEQ0, 0);
3535	} else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
3536		ahd_build_transfer_msg(ahd, devinfo);
3537		/*
3538		 * Clear our selection hardware in advance of potential
3539		 * PPR IU status change busfree.  We may have an entry in
3540		 * the waiting Q for this target, and we don't want to go
3541		 * about selecting while we handle the busfree and blow
3542		 * it away.
3543		 */
3544		ahd_outb(ahd, SCSISEQ0, 0);
3545	} else {
3546		printf("ahd_intr: AWAITING_MSG for an SCB that "
3547		       "does not have a waiting message\n");
3548		printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
3549		       devinfo->target_mask);
3550		panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x "
3551		      "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control,
3552		      ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb(ahd, MSG_OUT),
3553		      scb->flags);
3554	}
3555
3556	/*
3557	 * Clear the MK_MESSAGE flag from the SCB so we aren't
3558	 * asked to send this message again.
3559	 */
3560	ahd_outb(ahd, SCB_CONTROL,
3561		 ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE);
3562	scb->hscb->control &= ~MK_MESSAGE;
3563	ahd->msgout_index = 0;
3564	ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3565}
3566
3567/*
3568 * Build an appropriate transfer negotiation message for the
3569 * currently active target.
3570 */
3571static void
3572ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3573{
3574	/*
3575	 * We need to initiate transfer negotiations.
3576	 * If our current and goal settings are identical,
3577	 * we want to renegotiate due to a check condition.
3578	 */
3579	struct	ahd_initiator_tinfo *tinfo;
3580	struct	ahd_tmode_tstate *tstate;
3581	int	dowide;
3582	int	dosync;
3583	int	doppr;
3584	u_int	period;
3585	u_int	ppr_options;
3586	u_int	offset;
3587
3588	tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
3589				    devinfo->target, &tstate);
3590	/*
3591	 * Filter our period based on the current connection.
3592	 * If we can't perform DT transfers on this segment (not in LVD
3593	 * mode for instance), then our decision to issue a PPR message
3594	 * may change.
3595	 */
3596	period = tinfo->goal.period;
3597	offset = tinfo->goal.offset;
3598	ppr_options = tinfo->goal.ppr_options;
3599	/* Target initiated PPR is not allowed in the SCSI spec */
3600	if (devinfo->role == ROLE_TARGET)
3601		ppr_options = 0;
3602	ahd_devlimited_syncrate(ahd, tinfo, &period,
3603				&ppr_options, devinfo->role);
3604	dowide = tinfo->curr.width != tinfo->goal.width;
3605	dosync = tinfo->curr.offset != offset || tinfo->curr.period != period;
3606	/*
3607	 * Only use PPR if we have options that need it, even if the device
3608	 * claims to support it.  There might be an expander in the way
3609	 * that doesn't.
3610	 */
3611	doppr = ppr_options != 0;
3612
3613	if (!dowide && !dosync && !doppr) {
3614		dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
3615		dosync = tinfo->goal.offset != 0;
3616	}
3617
3618	if (!dowide && !dosync && !doppr) {
3619		/*
3620		 * Force async with a WDTR message if we have a wide bus,
3621		 * or just issue an SDTR with a 0 offset.
3622		 */
3623		if ((ahd->features & AHD_WIDE) != 0)
3624			dowide = 1;
3625		else
3626			dosync = 1;
3627
3628		if (bootverbose) {
3629			ahd_print_devinfo(ahd, devinfo);
3630			printf("Ensuring async\n");
3631		}
3632	}
3633	/* Target initiated PPR is not allowed in the SCSI spec */
3634	if (devinfo->role == ROLE_TARGET)
3635		doppr = 0;
3636
3637	/*
3638	 * Both the PPR message and SDTR message require the
3639	 * goal syncrate to be limited to what the target device
3640	 * is capable of handling (based on whether an LVD->SE
3641	 * expander is on the bus), so combine these two cases.
3642	 * Regardless, guarantee that if we are using WDTR and SDTR
3643	 * messages that WDTR comes first.
3644	 */
3645	if (doppr || (dosync && !dowide)) {
3646
3647		offset = tinfo->goal.offset;
3648		ahd_validate_offset(ahd, tinfo, period, &offset,
3649				    doppr ? tinfo->goal.width
3650					  : tinfo->curr.width,
3651				    devinfo->role);
3652		if (doppr) {
3653			ahd_construct_ppr(ahd, devinfo, period, offset,
3654					  tinfo->goal.width, ppr_options);
3655		} else {
3656			ahd_construct_sdtr(ahd, devinfo, period, offset);
3657		}
3658	} else {
3659		ahd_construct_wdtr(ahd, devinfo, tinfo->goal.width);
3660	}
3661}
3662
3663/*
3664 * Build a synchronous negotiation message in our message
3665 * buffer based on the input parameters.
3666 */
3667static void
3668ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3669		   u_int period, u_int offset)
3670{
3671	if (offset == 0)
3672		period = AHD_ASYNC_XFER_PERIOD;
3673	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED;
3674	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_SDTR_LEN;
3675	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_SDTR;
3676	ahd->msgout_buf[ahd->msgout_index++] = period;
3677	ahd->msgout_buf[ahd->msgout_index++] = offset;
3678	ahd->msgout_len += 5;
3679	if (bootverbose) {
3680		printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
3681		       ahd_name(ahd), devinfo->channel, devinfo->target,
3682		       devinfo->lun, period, offset);
3683	}
3684}
3685
3686/*
3687 * Build a wide negotiation message in our message
3688 * buffer based on the input parameters.
3689 */
3690static void
3691ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3692		   u_int bus_width)
3693{
3694	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED;
3695	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_WDTR_LEN;
3696	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_WDTR;
3697	ahd->msgout_buf[ahd->msgout_index++] = bus_width;
3698	ahd->msgout_len += 4;
3699	if (bootverbose) {
3700		printf("(%s:%c:%d:%d): Sending WDTR %x\n",
3701		       ahd_name(ahd), devinfo->channel, devinfo->target,
3702		       devinfo->lun, bus_width);
3703	}
3704}
3705
3706/*
3707 * Build a parallel protocol request message in our message
3708 * buffer based on the input parameters.
3709 */
3710static void
3711ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
3712		  u_int period, u_int offset, u_int bus_width,
3713		  u_int ppr_options)
3714{
3715	/*
3716	 * Always request precompensation from
3717	 * the other target if we are running
3718	 * at paced syncrates.
3719	 */
3720	if (period <= AHD_SYNCRATE_PACED)
3721		ppr_options |= MSG_EXT_PPR_PCOMP_EN;
3722	if (offset == 0)
3723		period = AHD_ASYNC_XFER_PERIOD;
3724	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXTENDED;
3725	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_PPR_LEN;
3726	ahd->msgout_buf[ahd->msgout_index++] = MSG_EXT_PPR;
3727	ahd->msgout_buf[ahd->msgout_index++] = period;
3728	ahd->msgout_buf[ahd->msgout_index++] = 0;
3729	ahd->msgout_buf[ahd->msgout_index++] = offset;
3730	ahd->msgout_buf[ahd->msgout_index++] = bus_width;
3731	ahd->msgout_buf[ahd->msgout_index++] = ppr_options;
3732	ahd->msgout_len += 8;
3733	if (bootverbose) {
3734		printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period 0x%x, "
3735		       "offset 0x%x, ppr_options 0x%x\n", ahd_name(ahd),
3736		       devinfo->channel, devinfo->target, devinfo->lun,
3737		       bus_width, period, offset, ppr_options);
3738	}
3739}
3740
3741/*
3742 * Clear any active message state.
3743 */
3744static void
3745ahd_clear_msg_state(struct ahd_softc *ahd)
3746{
3747	ahd_mode_state saved_modes;
3748
3749	saved_modes = ahd_save_modes(ahd);
3750	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
3751	ahd->send_msg_perror = 0;
3752	ahd->msg_flags = MSG_FLAG_NONE;
3753	ahd->msgout_len = 0;
3754	ahd->msgin_index = 0;
3755	ahd->msg_type = MSG_TYPE_NONE;
3756	if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0) {
3757		/*
3758		 * The target didn't care to respond to our
3759		 * message request, so clear ATN.
3760		 */
3761		ahd_outb(ahd, CLRSINT1, CLRATNO);
3762	}
3763	ahd_outb(ahd, MSG_OUT, MSG_NOOP);
3764	ahd_outb(ahd, SEQ_FLAGS2,
3765		 ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING);
3766	ahd_restore_modes(ahd, saved_modes);
3767}
3768
3769/*
3770 * Manual message loop handler.
3771 */
3772static void
3773ahd_handle_message_phase(struct ahd_softc *ahd)
3774{
3775	struct	ahd_devinfo devinfo;
3776	u_int	bus_phase;
3777	int	end_session;
3778
3779	ahd_fetch_devinfo(ahd, &devinfo);
3780	end_session = FALSE;
3781	bus_phase = ahd_inb(ahd, LASTPHASE);
3782
3783	if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) {
3784		printf("LQIRETRY for LQIPHASE_OUTPKT\n");
3785		ahd_outb(ahd, LQCTL2, LQIRETRY);
3786	}
3787reswitch:
3788	switch (ahd->msg_type) {
3789	case MSG_TYPE_INITIATOR_MSGOUT:
3790	{
3791		int lastbyte;
3792		int phasemis;
3793		int msgdone;
3794
3795		if (ahd->msgout_len == 0 && ahd->send_msg_perror == 0)
3796			panic("HOST_MSG_LOOP interrupt with no active message");
3797
3798#ifdef AHD_DEBUG
3799		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3800			ahd_print_devinfo(ahd, &devinfo);
3801			printf("INITIATOR_MSG_OUT");
3802		}
3803#endif
3804		phasemis = bus_phase != P_MESGOUT;
3805		if (phasemis) {
3806#ifdef AHD_DEBUG
3807			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3808				printf(" PHASEMIS %s\n",
3809				       ahd_lookup_phase_entry(bus_phase)
3810							     ->phasemsg);
3811			}
3812#endif
3813			if (bus_phase == P_MESGIN) {
3814				/*
3815				 * Change gears and see if
3816				 * this messages is of interest to
3817				 * us or should be passed back to
3818				 * the sequencer.
3819				 */
3820				ahd_outb(ahd, CLRSINT1, CLRATNO);
3821				ahd->send_msg_perror = 0;
3822				ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN;
3823				ahd->msgin_index = 0;
3824				goto reswitch;
3825			}
3826			end_session = TRUE;
3827			break;
3828		}
3829
3830		if (ahd->send_msg_perror) {
3831			ahd_outb(ahd, CLRSINT1, CLRATNO);
3832			ahd_outb(ahd, CLRSINT1, CLRREQINIT);
3833#ifdef AHD_DEBUG
3834			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3835				printf(" byte 0x%x\n", ahd->send_msg_perror);
3836#endif
3837			/*
3838			 * If we are notifying the target of a CRC error
3839			 * during packetized operations, the target is
3840			 * within its rights to acknowledge our message
3841			 * with a busfree.
3842			 */
3843			if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0
3844			 && ahd->send_msg_perror == MSG_INITIATOR_DET_ERR)
3845				ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE;
3846
3847			ahd_outb(ahd, RETURN_2, ahd->send_msg_perror);
3848			ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE);
3849			break;
3850		}
3851
3852		msgdone	= ahd->msgout_index == ahd->msgout_len;
3853		if (msgdone) {
3854			/*
3855			 * The target has requested a retry.
3856			 * Re-assert ATN, reset our message index to
3857			 * 0, and try again.
3858			 */
3859			ahd->msgout_index = 0;
3860			ahd_assert_atn(ahd);
3861		}
3862
3863		lastbyte = ahd->msgout_index == (ahd->msgout_len - 1);
3864		if (lastbyte) {
3865			/* Last byte is signified by dropping ATN */
3866			ahd_outb(ahd, CLRSINT1, CLRATNO);
3867		}
3868
3869		/*
3870		 * Clear our interrupt status and present
3871		 * the next byte on the bus.
3872		 */
3873		ahd_outb(ahd, CLRSINT1, CLRREQINIT);
3874#ifdef AHD_DEBUG
3875		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3876			printf(" byte 0x%x\n",
3877			       ahd->msgout_buf[ahd->msgout_index]);
3878#endif
3879		ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]);
3880		ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE);
3881		break;
3882	}
3883	case MSG_TYPE_INITIATOR_MSGIN:
3884	{
3885		int phasemis;
3886		int message_done;
3887
3888#ifdef AHD_DEBUG
3889		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3890			ahd_print_devinfo(ahd, &devinfo);
3891			printf("INITIATOR_MSG_IN");
3892		}
3893#endif
3894		phasemis = bus_phase != P_MESGIN;
3895		if (phasemis) {
3896#ifdef AHD_DEBUG
3897			if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3898				printf(" PHASEMIS %s\n",
3899				       ahd_lookup_phase_entry(bus_phase)
3900							     ->phasemsg);
3901			}
3902#endif
3903			ahd->msgin_index = 0;
3904			if (bus_phase == P_MESGOUT
3905			 && (ahd->send_msg_perror != 0
3906			  || (ahd->msgout_len != 0
3907			   && ahd->msgout_index == 0))) {
3908				ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3909				goto reswitch;
3910			}
3911			end_session = TRUE;
3912			break;
3913		}
3914
3915		/* Pull the byte in without acking it */
3916		ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS);
3917#ifdef AHD_DEBUG
3918		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
3919			printf(" byte 0x%x\n",
3920			       ahd->msgin_buf[ahd->msgin_index]);
3921#endif
3922
3923		message_done = ahd_parse_msg(ahd, &devinfo);
3924
3925		if (message_done) {
3926			/*
3927			 * Clear our incoming message buffer in case there
3928			 * is another message following this one.
3929			 */
3930			ahd->msgin_index = 0;
3931
3932			/*
3933			 * If this message illicited a response,
3934			 * assert ATN so the target takes us to the
3935			 * message out phase.
3936			 */
3937			if (ahd->msgout_len != 0) {
3938#ifdef AHD_DEBUG
3939				if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) {
3940					ahd_print_devinfo(ahd, &devinfo);
3941					printf("Asserting ATN for response\n");
3942				}
3943#endif
3944				ahd_assert_atn(ahd);
3945			}
3946		} else
3947			ahd->msgin_index++;
3948
3949		if (message_done == MSGLOOP_TERMINATED) {
3950			end_session = TRUE;
3951		} else {
3952			/* Ack the byte */
3953			ahd_outb(ahd, CLRSINT1, CLRREQINIT);
3954			ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_READ);
3955		}
3956		break;
3957	}
3958	case MSG_TYPE_TARGET_MSGIN:
3959	{
3960		int msgdone;
3961		int msgout_request;
3962
3963		/*
3964		 * By default, the message loop will continue.
3965		 */
3966		ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
3967
3968		if (ahd->msgout_len == 0)
3969			panic("Target MSGIN with no active message");
3970
3971		/*
3972		 * If we interrupted a mesgout session, the initiator
3973		 * will not know this until our first REQ.  So, we
3974		 * only honor mesgout requests after we've sent our
3975		 * first byte.
3976		 */
3977		if ((ahd_inb(ahd, SCSISIGI) & ATNI) != 0
3978		 && ahd->msgout_index > 0)
3979			msgout_request = TRUE;
3980		else
3981			msgout_request = FALSE;
3982
3983		if (msgout_request) {
3984
3985			/*
3986			 * Change gears and see if
3987			 * this messages is of interest to
3988			 * us or should be passed back to
3989			 * the sequencer.
3990			 */
3991			ahd->msg_type = MSG_TYPE_TARGET_MSGOUT;
3992			ahd_outb(ahd, SCSISIGO, P_MESGOUT | BSYO);
3993			ahd->msgin_index = 0;
3994			/* Dummy read to REQ for first byte */
3995			ahd_inb(ahd, SCSIDAT);
3996			ahd_outb(ahd, SXFRCTL0,
3997				 ahd_inb(ahd, SXFRCTL0) | SPIOEN);
3998			break;
3999		}
4000
4001		msgdone = ahd->msgout_index == ahd->msgout_len;
4002		if (msgdone) {
4003			ahd_outb(ahd, SXFRCTL0,
4004				 ahd_inb(ahd, SXFRCTL0) & ~SPIOEN);
4005			end_session = TRUE;
4006			break;
4007		}
4008
4009		/*
4010		 * Present the next byte on the bus.
4011		 */
4012		ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN);
4013		ahd_outb(ahd, SCSIDAT, ahd->msgout_buf[ahd->msgout_index++]);
4014		break;
4015	}
4016	case MSG_TYPE_TARGET_MSGOUT:
4017	{
4018		int lastbyte;
4019		int msgdone;
4020
4021		/*
4022		 * By default, the message loop will continue.
4023		 */
4024		ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
4025
4026		/*
4027		 * The initiator signals that this is
4028		 * the last byte by dropping ATN.
4029		 */
4030		lastbyte = (ahd_inb(ahd, SCSISIGI) & ATNI) == 0;
4031
4032		/*
4033		 * Read the latched byte, but turn off SPIOEN first
4034		 * so that we don't inadvertently cause a REQ for the
4035		 * next byte.
4036		 */
4037		ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN);
4038		ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIDAT);
4039		msgdone = ahd_parse_msg(ahd, &devinfo);
4040		if (msgdone == MSGLOOP_TERMINATED) {
4041			/*
4042			 * The message is *really* done in that it caused
4043			 * us to go to bus free.  The sequencer has already
4044			 * been reset at this point, so pull the ejection
4045			 * handle.
4046			 */
4047			return;
4048		}
4049
4050		ahd->msgin_index++;
4051
4052		/*
4053		 * XXX Read spec about initiator dropping ATN too soon
4054		 *     and use msgdone to detect it.
4055		 */
4056		if (msgdone == MSGLOOP_MSGCOMPLETE) {
4057			ahd->msgin_index = 0;
4058
4059			/*
4060			 * If this message illicited a response, transition
4061			 * to the Message in phase and send it.
4062			 */
4063			if (ahd->msgout_len != 0) {
4064				ahd_outb(ahd, SCSISIGO, P_MESGIN | BSYO);
4065				ahd_outb(ahd, SXFRCTL0,
4066					 ahd_inb(ahd, SXFRCTL0) | SPIOEN);
4067				ahd->msg_type = MSG_TYPE_TARGET_MSGIN;
4068				ahd->msgin_index = 0;
4069				break;
4070			}
4071		}
4072
4073		if (lastbyte)
4074			end_session = TRUE;
4075		else {
4076			/* Ask for the next byte. */
4077			ahd_outb(ahd, SXFRCTL0,
4078				 ahd_inb(ahd, SXFRCTL0) | SPIOEN);
4079		}
4080
4081		break;
4082	}
4083	default:
4084		panic("Unknown REQINIT message type");
4085	}
4086
4087	if (end_session) {
4088		if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) {
4089			printf("%s: Returning to Idle Loop\n",
4090			       ahd_name(ahd));
4091			ahd_clear_msg_state(ahd);
4092
4093			/*
4094			 * Perform the equivalent of a clear_target_state.
4095			 */
4096			ahd_outb(ahd, LASTPHASE, P_BUSFREE);
4097			ahd_outb(ahd, SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT);
4098			ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET);
4099		} else {
4100			ahd_clear_msg_state(ahd);
4101			ahd_outb(ahd, RETURN_1, EXIT_MSG_LOOP);
4102		}
4103	}
4104}
4105
4106/*
4107 * See if we sent a particular extended message to the target.
4108 * If "full" is true, return true only if the target saw the full
4109 * message.  If "full" is false, return true if the target saw at
4110 * least the first byte of the message.
4111 */
4112static int
4113ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full)
4114{
4115	int found;
4116	u_int index;
4117
4118	found = FALSE;
4119	index = 0;
4120
4121	while (index < ahd->msgout_len) {
4122		if (ahd->msgout_buf[index] == MSG_EXTENDED) {
4123			u_int end_index;
4124
4125			end_index = index + 1 + ahd->msgout_buf[index + 1];
4126			if (ahd->msgout_buf[index+2] == msgval
4127			 && type == AHDMSG_EXT) {
4128
4129				if (full) {
4130					if (ahd->msgout_index > end_index)
4131						found = TRUE;
4132				} else if (ahd->msgout_index > index)
4133					found = TRUE;
4134			}
4135			index = end_index;
4136		} else if (ahd->msgout_buf[index] >= MSG_SIMPLE_TASK
4137			&& ahd->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
4138
4139			/* Skip tag type and tag id or residue param*/
4140			index += 2;
4141		} else {
4142			/* Single byte message */
4143			if (type == AHDMSG_1B
4144			 && ahd->msgout_index > index
4145			 && (ahd->msgout_buf[index] == msgval
4146			  || ((ahd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
4147			   && msgval == MSG_IDENTIFYFLAG)))
4148				found = TRUE;
4149			index++;
4150		}
4151
4152		if (found)
4153			break;
4154	}
4155	return (found);
4156}
4157
4158/*
4159 * Wait for a complete incoming message, parse it, and respond accordingly.
4160 */
4161static int
4162ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
4163{
4164	struct	ahd_initiator_tinfo *tinfo;
4165	struct	ahd_tmode_tstate *tstate;
4166	int	reject;
4167	int	done;
4168	int	response;
4169
4170	done = MSGLOOP_IN_PROG;
4171	response = FALSE;
4172	reject = FALSE;
4173	tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
4174				    devinfo->target, &tstate);
4175
4176	/*
4177	 * Parse as much of the message as is available,
4178	 * rejecting it if we don't support it.  When
4179	 * the entire message is available and has been
4180	 * handled, return MSGLOOP_MSGCOMPLETE, indicating
4181	 * that we have parsed an entire message.
4182	 *
4183	 * In the case of extended messages, we accept the length
4184	 * byte outright and perform more checking once we know the
4185	 * extended message type.
4186	 */
4187	switch (ahd->msgin_buf[0]) {
4188	case MSG_DISCONNECT:
4189	case MSG_SAVEDATAPOINTER:
4190	case MSG_CMDCOMPLETE:
4191	case MSG_RESTOREPOINTERS:
4192	case MSG_IGN_WIDE_RESIDUE:
4193		/*
4194		 * End our message loop as these are messages
4195		 * the sequencer handles on its own.
4196		 */
4197		done = MSGLOOP_TERMINATED;
4198		break;
4199	case MSG_MESSAGE_REJECT:
4200		response = ahd_handle_msg_reject(ahd, devinfo);
4201		/* FALLTHROUGH */
4202	case MSG_NOOP:
4203		done = MSGLOOP_MSGCOMPLETE;
4204		break;
4205	case MSG_EXTENDED:
4206	{
4207		/* Wait for enough of the message to begin validation */
4208		if (ahd->msgin_index < 2)
4209			break;
4210		switch (ahd->msgin_buf[2]) {
4211		case MSG_EXT_SDTR:
4212		{
4213			u_int	 period;
4214			u_int	 ppr_options;
4215			u_int	 offset;
4216			u_int	 saved_offset;
4217
4218			if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
4219				reject = TRUE;
4220				break;
4221			}
4222
4223			/*
4224			 * Wait until we have both args before validating
4225			 * and acting on this message.
4226			 *
4227			 * Add one to MSG_EXT_SDTR_LEN to account for
4228			 * the extended message preamble.
4229			 */
4230			if (ahd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
4231				break;
4232
4233			period = ahd->msgin_buf[3];
4234			ppr_options = 0;
4235			saved_offset = offset = ahd->msgin_buf[4];
4236			ahd_devlimited_syncrate(ahd, tinfo, &period,
4237						&ppr_options, devinfo->role);
4238			ahd_validate_offset(ahd, tinfo, period, &offset,
4239					    tinfo->curr.width, devinfo->role);
4240			if (bootverbose) {
4241				printf("(%s:%c:%d:%d): Received "
4242				       "SDTR period %x, offset %x\n\t"
4243				       "Filtered to period %x, offset %x\n",
4244				       ahd_name(ahd), devinfo->channel,
4245				       devinfo->target, devinfo->lun,
4246				       ahd->msgin_buf[3], saved_offset,
4247				       period, offset);
4248			}
4249			ahd_set_syncrate(ahd, devinfo, period,
4250					 offset, ppr_options,
4251					 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4252					 /*paused*/TRUE);
4253
4254			/*
4255			 * See if we initiated Sync Negotiation
4256			 * and didn't have to fall down to async
4257			 * transfers.
4258			 */
4259			if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, TRUE)) {
4260				/* We started it */
4261				if (saved_offset != offset) {
4262					/* Went too low - force async */
4263					reject = TRUE;
4264				}
4265			} else {
4266				/*
4267				 * Send our own SDTR in reply
4268				 */
4269				if (bootverbose
4270				 && devinfo->role == ROLE_INITIATOR) {
4271					printf("(%s:%c:%d:%d): Target "
4272					       "Initiated SDTR\n",
4273					       ahd_name(ahd), devinfo->channel,
4274					       devinfo->target, devinfo->lun);
4275				}
4276				ahd->msgout_index = 0;
4277				ahd->msgout_len = 0;
4278				ahd_construct_sdtr(ahd, devinfo,
4279						   period, offset);
4280				ahd->msgout_index = 0;
4281				response = TRUE;
4282			}
4283			done = MSGLOOP_MSGCOMPLETE;
4284			break;
4285		}
4286		case MSG_EXT_WDTR:
4287		{
4288			u_int bus_width;
4289			u_int saved_width;
4290			u_int sending_reply;
4291
4292			sending_reply = FALSE;
4293			if (ahd->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
4294				reject = TRUE;
4295				break;
4296			}
4297
4298			/*
4299			 * Wait until we have our arg before validating
4300			 * and acting on this message.
4301			 *
4302			 * Add one to MSG_EXT_WDTR_LEN to account for
4303			 * the extended message preamble.
4304			 */
4305			if (ahd->msgin_index < (MSG_EXT_WDTR_LEN + 1))
4306				break;
4307
4308			bus_width = ahd->msgin_buf[3];
4309			saved_width = bus_width;
4310			ahd_validate_width(ahd, tinfo, &bus_width,
4311					   devinfo->role);
4312			if (bootverbose) {
4313				printf("(%s:%c:%d:%d): Received WDTR "
4314				       "%x filtered to %x\n",
4315				       ahd_name(ahd), devinfo->channel,
4316				       devinfo->target, devinfo->lun,
4317				       saved_width, bus_width);
4318			}
4319
4320			if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, TRUE)) {
4321				/*
4322				 * Don't send a WDTR back to the
4323				 * target, since we asked first.
4324				 * If the width went higher than our
4325				 * request, reject it.
4326				 */
4327				if (saved_width > bus_width) {
4328					reject = TRUE;
4329					printf("(%s:%c:%d:%d): requested %dBit "
4330					       "transfers.  Rejecting...\n",
4331					       ahd_name(ahd), devinfo->channel,
4332					       devinfo->target, devinfo->lun,
4333					       8 * (0x01 << bus_width));
4334					bus_width = 0;
4335				}
4336			} else {
4337				/*
4338				 * Send our own WDTR in reply
4339				 */
4340				if (bootverbose
4341				 && devinfo->role == ROLE_INITIATOR) {
4342					printf("(%s:%c:%d:%d): Target "
4343					       "Initiated WDTR\n",
4344					       ahd_name(ahd), devinfo->channel,
4345					       devinfo->target, devinfo->lun);
4346				}
4347				ahd->msgout_index = 0;
4348				ahd->msgout_len = 0;
4349				ahd_construct_wdtr(ahd, devinfo, bus_width);
4350				ahd->msgout_index = 0;
4351				response = TRUE;
4352				sending_reply = TRUE;
4353			}
4354			/*
4355			 * After a wide message, we are async, but
4356			 * some devices don't seem to honor this portion
4357			 * of the spec.  Force a renegotiation of the
4358			 * sync component of our transfer agreement even
4359			 * if our goal is async.  By updating our width
4360			 * after forcing the negotiation, we avoid
4361			 * renegotiating for width.
4362			 */
4363			ahd_update_neg_request(ahd, devinfo, tstate,
4364					       tinfo, AHD_NEG_ALWAYS);
4365			ahd_set_width(ahd, devinfo, bus_width,
4366				      AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4367				      /*paused*/TRUE);
4368			if (sending_reply == FALSE && reject == FALSE) {
4369
4370				/*
4371				 * We will always have an SDTR to send.
4372				 */
4373				ahd->msgout_index = 0;
4374				ahd->msgout_len = 0;
4375				ahd_build_transfer_msg(ahd, devinfo);
4376				ahd->msgout_index = 0;
4377				response = TRUE;
4378			}
4379			done = MSGLOOP_MSGCOMPLETE;
4380			break;
4381		}
4382		case MSG_EXT_PPR:
4383		{
4384			u_int	period;
4385			u_int	offset;
4386			u_int	bus_width;
4387			u_int	ppr_options;
4388			u_int	saved_width;
4389			u_int	saved_offset;
4390			u_int	saved_ppr_options;
4391
4392			if (ahd->msgin_buf[1] != MSG_EXT_PPR_LEN) {
4393				reject = TRUE;
4394				break;
4395			}
4396
4397			/*
4398			 * Wait until we have all args before validating
4399			 * and acting on this message.
4400			 *
4401			 * Add one to MSG_EXT_PPR_LEN to account for
4402			 * the extended message preamble.
4403			 */
4404			if (ahd->msgin_index < (MSG_EXT_PPR_LEN + 1))
4405				break;
4406
4407			period = ahd->msgin_buf[3];
4408			offset = ahd->msgin_buf[5];
4409			bus_width = ahd->msgin_buf[6];
4410			saved_width = bus_width;
4411			ppr_options = ahd->msgin_buf[7];
4412			/*
4413			 * According to the spec, a DT only
4414			 * period factor with no DT option
4415			 * set implies async.
4416			 */
4417			if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
4418			 && period <= 9)
4419				offset = 0;
4420			saved_ppr_options = ppr_options;
4421			saved_offset = offset;
4422
4423			/*
4424			 * Transfer options are only available if we
4425			 * are negotiating wide.
4426			 */
4427			if (bus_width == 0)
4428				ppr_options &= MSG_EXT_PPR_QAS_REQ;
4429
4430			ahd_validate_width(ahd, tinfo, &bus_width,
4431					   devinfo->role);
4432			ahd_devlimited_syncrate(ahd, tinfo, &period,
4433						&ppr_options, devinfo->role);
4434			ahd_validate_offset(ahd, tinfo, period, &offset,
4435					    bus_width, devinfo->role);
4436
4437			if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, TRUE)) {
4438				/*
4439				 * If we are unable to do any of the
4440				 * requested options (we went too low),
4441				 * then we'll have to reject the message.
4442				 */
4443				if (saved_width > bus_width
4444				 || saved_offset != offset
4445				 || saved_ppr_options != ppr_options) {
4446					reject = TRUE;
4447					period = 0;
4448					offset = 0;
4449					bus_width = 0;
4450					ppr_options = 0;
4451				}
4452			} else {
4453				if (devinfo->role != ROLE_TARGET)
4454					printf("(%s:%c:%d:%d): Target "
4455					       "Initiated PPR\n",
4456					       ahd_name(ahd), devinfo->channel,
4457					       devinfo->target, devinfo->lun);
4458				else
4459					printf("(%s:%c:%d:%d): Initiator "
4460					       "Initiated PPR\n",
4461					       ahd_name(ahd), devinfo->channel,
4462					       devinfo->target, devinfo->lun);
4463				ahd->msgout_index = 0;
4464				ahd->msgout_len = 0;
4465				ahd_construct_ppr(ahd, devinfo, period, offset,
4466						  bus_width, ppr_options);
4467				ahd->msgout_index = 0;
4468				response = TRUE;
4469			}
4470			if (bootverbose) {
4471				printf("(%s:%c:%d:%d): Received PPR width %x, "
4472				       "period %x, offset %x,options %x\n"
4473				       "\tFiltered to width %x, period %x, "
4474				       "offset %x, options %x\n",
4475				       ahd_name(ahd), devinfo->channel,
4476				       devinfo->target, devinfo->lun,
4477				       saved_width, ahd->msgin_buf[3],
4478				       saved_offset, saved_ppr_options,
4479				       bus_width, period, offset, ppr_options);
4480			}
4481			ahd_set_width(ahd, devinfo, bus_width,
4482				      AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4483				      /*paused*/TRUE);
4484			ahd_set_syncrate(ahd, devinfo, period,
4485					 offset, ppr_options,
4486					 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4487					 /*paused*/TRUE);
4488
4489			done = MSGLOOP_MSGCOMPLETE;
4490			break;
4491		}
4492		default:
4493			/* Unknown extended message.  Reject it. */
4494			reject = TRUE;
4495			break;
4496		}
4497		break;
4498	}
4499#ifdef AHD_TARGET_MODE
4500	case MSG_BUS_DEV_RESET:
4501		ahd_handle_devreset(ahd, devinfo, CAM_LUN_WILDCARD,
4502				    CAM_BDR_SENT,
4503				    "Bus Device Reset Received",
4504				    /*verbose_level*/0);
4505		ahd_restart(ahd);
4506		done = MSGLOOP_TERMINATED;
4507		break;
4508	case MSG_ABORT_TAG:
4509	case MSG_ABORT:
4510	case MSG_CLEAR_QUEUE:
4511	{
4512		int tag;
4513
4514		/* Target mode messages */
4515		if (devinfo->role != ROLE_TARGET) {
4516			reject = TRUE;
4517			break;
4518		}
4519		tag = SCB_LIST_NULL;
4520		if (ahd->msgin_buf[0] == MSG_ABORT_TAG)
4521			tag = ahd_inb(ahd, INITIATOR_TAG);
4522		ahd_abort_scbs(ahd, devinfo->target, devinfo->channel,
4523			       devinfo->lun, tag, ROLE_TARGET,
4524			       CAM_REQ_ABORTED);
4525
4526		tstate = ahd->enabled_targets[devinfo->our_scsiid];
4527		if (tstate != NULL) {
4528			struct ahd_tmode_lstate* lstate;
4529
4530			lstate = tstate->enabled_luns[devinfo->lun];
4531			if (lstate != NULL) {
4532				ahd_queue_lstate_event(ahd, lstate,
4533						       devinfo->our_scsiid,
4534						       ahd->msgin_buf[0],
4535						       /*arg*/tag);
4536				ahd_send_lstate_events(ahd, lstate);
4537			}
4538		}
4539		ahd_restart(ahd);
4540		done = MSGLOOP_TERMINATED;
4541		break;
4542	}
4543#endif
4544	case MSG_QAS_REQUEST:
4545#ifdef AHD_DEBUG
4546		if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
4547			printf("%s: QAS request.  SCSISIGI == 0x%x\n",
4548			       ahd_name(ahd), ahd_inb(ahd, SCSISIGI));
4549#endif
4550		ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE;
4551		/* FALLTHROUGH */
4552	case MSG_TERM_IO_PROC:
4553	default:
4554		reject = TRUE;
4555		break;
4556	}
4557
4558	if (reject) {
4559		/*
4560		 * Setup to reject the message.
4561		 */
4562		ahd->msgout_index = 0;
4563		ahd->msgout_len = 1;
4564		ahd->msgout_buf[0] = MSG_MESSAGE_REJECT;
4565		done = MSGLOOP_MSGCOMPLETE;
4566		response = TRUE;
4567	}
4568
4569	if (done != MSGLOOP_IN_PROG && !response)
4570		/* Clear the outgoing message buffer */
4571		ahd->msgout_len = 0;
4572
4573	return (done);
4574}
4575
4576/*
4577 * Process a message reject message.
4578 */
4579static int
4580ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
4581{
4582	/*
4583	 * What we care about here is if we had an
4584	 * outstanding SDTR or WDTR message for this
4585	 * target.  If we did, this is a signal that
4586	 * the target is refusing negotiation.
4587	 */
4588	struct scb *scb;
4589	struct ahd_initiator_tinfo *tinfo;
4590	struct ahd_tmode_tstate *tstate;
4591	u_int scb_index;
4592	u_int last_msg;
4593	int   response = 0;
4594
4595	scb_index = ahd_get_scbptr(ahd);
4596	scb = ahd_lookup_scb(ahd, scb_index);
4597	tinfo = ahd_fetch_transinfo(ahd, devinfo->channel,
4598				    devinfo->our_scsiid,
4599				    devinfo->target, &tstate);
4600	/* Might be necessary */
4601	last_msg = ahd_inb(ahd, LAST_MSG);
4602
4603	if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
4604		if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/TRUE)
4605		 && tinfo->goal.period <= AHD_SYNCRATE_PACED) {
4606			/*
4607			 * Target may not like our SPI-4 PPR Options.
4608			 * Attempt to negotiate 80MHz which will turn
4609			 * off these options.
4610			 */
4611			if (bootverbose) {
4612				printf("(%s:%c:%d:%d): PPR Rejected. "
4613				       "Trying simple U160 PPR\n",
4614				       ahd_name(ahd), devinfo->channel,
4615				       devinfo->target, devinfo->lun);
4616			}
4617			tinfo->goal.period = AHD_SYNCRATE_DT;
4618			tinfo->goal.ppr_options &= MSG_EXT_PPR_IU_REQ
4619						|  MSG_EXT_PPR_QAS_REQ
4620						|  MSG_EXT_PPR_DT_REQ;
4621		} else {
4622			/*
4623			 * Target does not support the PPR message.
4624			 * Attempt to negotiate SPI-2 style.
4625			 */
4626			if (bootverbose) {
4627				printf("(%s:%c:%d:%d): PPR Rejected. "
4628				       "Trying WDTR/SDTR\n",
4629				       ahd_name(ahd), devinfo->channel,
4630				       devinfo->target, devinfo->lun);
4631			}
4632			tinfo->goal.ppr_options = 0;
4633			tinfo->curr.transport_version = 2;
4634			tinfo->goal.transport_version = 2;
4635		}
4636		ahd->msgout_index = 0;
4637		ahd->msgout_len = 0;
4638		ahd_build_transfer_msg(ahd, devinfo);
4639		ahd->msgout_index = 0;
4640		response = 1;
4641	} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
4642
4643		/* note 8bit xfers */
4644		printf("(%s:%c:%d:%d): refuses WIDE negotiation.  Using "
4645		       "8bit transfers\n", ahd_name(ahd),
4646		       devinfo->channel, devinfo->target, devinfo->lun);
4647		ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
4648			      AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4649			      /*paused*/TRUE);
4650		/*
4651		 * No need to clear the sync rate.  If the target
4652		 * did not accept the command, our syncrate is
4653		 * unaffected.  If the target started the negotiation,
4654		 * but rejected our response, we already cleared the
4655		 * sync rate before sending our WDTR.
4656		 */
4657		if (tinfo->goal.offset != tinfo->curr.offset) {
4658
4659			/* Start the sync negotiation */
4660			ahd->msgout_index = 0;
4661			ahd->msgout_len = 0;
4662			ahd_build_transfer_msg(ahd, devinfo);
4663			ahd->msgout_index = 0;
4664			response = 1;
4665		}
4666	} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
4667		/* note asynch xfers and clear flag */
4668		ahd_set_syncrate(ahd, devinfo, /*period*/0,
4669				 /*offset*/0, /*ppr_options*/0,
4670				 AHD_TRANS_ACTIVE|AHD_TRANS_GOAL,
4671				 /*paused*/TRUE);
4672		printf("(%s:%c:%d:%d): refuses synchronous negotiation. "
4673		       "Using asynchronous transfers\n",
4674		       ahd_name(ahd), devinfo->channel,
4675		       devinfo->target, devinfo->lun);
4676	} else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) {
4677		int tag_type;
4678		int mask;
4679
4680		tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
4681
4682		if (tag_type == MSG_SIMPLE_TASK) {
4683			printf("(%s:%c:%d:%d): refuses tagged commands.  "
4684			       "Performing non-tagged I/O\n", ahd_name(ahd),
4685			       devinfo->channel, devinfo->target, devinfo->lun);
4686			ahd_set_tags(ahd, devinfo, AHD_QUEUE_NONE);
4687			mask = ~0x23;
4688		} else {
4689			printf("(%s:%c:%d:%d): refuses %s tagged commands.  "
4690			       "Performing simple queue tagged I/O only\n",
4691			       ahd_name(ahd), devinfo->channel, devinfo->target,
4692			       devinfo->lun, tag_type == MSG_ORDERED_Q_TAG
4693			       ? "ordered" : "head of queue");
4694			ahd_set_tags(ahd, devinfo, AHD_QUEUE_BASIC);
4695			mask = ~0x03;
4696		}
4697
4698		/*
4699		 * Resend the identify for this CCB as the target
4700		 * may believe that the selection is invalid otherwise.
4701		 */
4702		ahd_outb(ahd, SCB_CONTROL,
4703			 ahd_inb_scbram(ahd, SCB_CONTROL) & mask);
4704		scb->hscb->control &= mask;
4705		ahd_set_transaction_tag(scb, /*enabled*/FALSE,
4706					/*type*/MSG_SIMPLE_TASK);
4707		ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG);
4708		ahd_assert_atn(ahd);
4709		ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
4710			     SCB_GET_TAG(scb));
4711
4712		/*
4713		 * Requeue all tagged commands for this target
4714		 * currently in our possession so they can be
4715		 * converted to untagged commands.
4716		 */
4717		ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
4718				   SCB_GET_CHANNEL(ahd, scb),
4719				   SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
4720				   ROLE_INITIATOR, CAM_REQUEUE_REQ,
4721				   SEARCH_COMPLETE);
4722	} else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_IDENTIFYFLAG, TRUE)) {
4723		/*
4724		 * Most likely the device believes that we had
4725		 * previously negotiated packetized.
4726		 */
4727		ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE
4728			       |  MSG_FLAG_IU_REQ_CHANGED;
4729
4730		ahd_force_renegotiation(ahd, devinfo);
4731		ahd->msgout_index = 0;
4732		ahd->msgout_len = 0;
4733		ahd_build_transfer_msg(ahd, devinfo);
4734		ahd->msgout_index = 0;
4735		response = 1;
4736	} else {
4737		/*
4738		 * Otherwise, we ignore it.
4739		 */
4740		printf("%s:%c:%d: Message reject for %x -- ignored\n",
4741		       ahd_name(ahd), devinfo->channel, devinfo->target,
4742		       last_msg);
4743	}
4744	return (response);
4745}
4746
4747/*
4748 * Process an ignore wide residue message.
4749 */
4750static void
4751ahd_handle_ign_wide_residue(struct ahd_softc *ahd,
4752    struct ahd_devinfo *devinfo)
4753{
4754	u_int scb_index;
4755	struct scb *scb;
4756
4757	printf("%s: ahd_handle_ign_wide_residue\n", ahd_name(ahd));
4758
4759	scb_index = ahd_get_scbptr(ahd);
4760	scb = ahd_lookup_scb(ahd, scb_index);
4761	/*
4762	 * XXX Actually check data direction in the sequencer?
4763	 * Perhaps add datadir to some spare bits in the hscb?
4764	 */
4765	if ((ahd_inb(ahd, SEQ_FLAGS) & DPHASE) == 0
4766	 || ahd_get_transfer_dir(scb) != CAM_DIR_IN) {
4767		/*
4768		 * Ignore the message if we haven't
4769		 * seen an appropriate data phase yet.
4770		 */
4771	} else {
4772		/*
4773		 * If the residual occurred on the last
4774		 * transfer and the transfer request was
4775		 * expected to end on an odd count, do
4776		 * nothing.  Otherwise, subtract a byte
4777		 * and update the residual count accordingly.
4778		 */
4779		uint32_t sgptr;
4780
4781		sgptr = ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR);
4782		if ((sgptr & SG_LIST_NULL) != 0
4783		 && (ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE)
4784		     & SCB_XFERLEN_ODD) != 0) {
4785			/*
4786			 * If the residual occurred on the last
4787			 * transfer and the transfer request was
4788			 * expected to end on an odd count, do
4789			 * nothing.
4790			 */
4791		} else {
4792			uint32_t data_cnt;
4793			uint64_t data_addr;
4794			uint32_t sglen;
4795
4796			/* Pull in the rest of the sgptr */
4797			sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR);
4798			data_cnt = ahd_inl_scbram(ahd, SCB_RESIDUAL_DATACNT);
4799			if ((sgptr & SG_LIST_NULL) != 0) {
4800				/*
4801				 * The residual data count is not updated
4802				 * for the command run to completion case.
4803				 * Explcitly zero the count.
4804				 */
4805				data_cnt &= ~AHD_SG_LEN_MASK;
4806			}
4807			data_addr = ahd_inq(ahd, SHADDR);
4808			data_cnt += 1;
4809			data_addr -= 1;
4810			sgptr &= SG_PTR_MASK;
4811			if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
4812				struct ahd_dma64_seg *sg;
4813
4814				sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
4815
4816				/*
4817				 * The residual sg ptr points to the next S/G
4818				 * to load so we must go back one.
4819				 */
4820				sg--;
4821				sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK;
4822				if (sg != scb->sg_list
4823				 && sglen < (data_cnt & AHD_SG_LEN_MASK)) {
4824
4825					sg--;
4826					sglen = ahd_le32toh(sg->len);
4827					/*
4828					 * Preserve High Address and SG_LIST
4829					 * bits while setting the count to 1.
4830					 */
4831					data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK));
4832					data_addr = ahd_le64toh(sg->addr)
4833						  + (sglen & AHD_SG_LEN_MASK)
4834						  - 1;
4835
4836					/*
4837					 * Increment sg so it points to the
4838					 * "next" sg.
4839					 */
4840					sg++;
4841					sgptr = ahd_sg_virt_to_bus(ahd, scb,
4842								   sg);
4843				}
4844			} else {
4845				struct ahd_dma_seg *sg;
4846
4847				sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
4848
4849				/*
4850				 * The residual sg ptr points to the next S/G
4851				 * to load so we must go back one.
4852				 */
4853				sg--;
4854				sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK;
4855				if (sg != scb->sg_list
4856				 && sglen < (data_cnt & AHD_SG_LEN_MASK)) {
4857
4858					sg--;
4859					sglen = ahd_le32toh(sg->len);
4860					/*
4861					 * Preserve High Address and SG_LIST
4862					 * bits while setting the count to 1.
4863					 */
4864					data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK));
4865					data_addr = ahd_le32toh(sg->addr)
4866						  + (sglen & AHD_SG_LEN_MASK)
4867						  - 1;
4868
4869					/*
4870					 * Increment sg so it points to the
4871					 * "next" sg.
4872					 */
4873					sg++;
4874					sgptr = ahd_sg_virt_to_bus(ahd, scb,
4875								   sg);
4876				}
4877			}
4878			/*
4879			 * Toggle the "oddness" of the transfer length
4880			 * to handle this mid-transfer ignore wide
4881			 * residue.  This ensures that the oddness is
4882			 * correct for subsequent data transfers.
4883			 */
4884			ahd_outb(ahd, SCB_TASK_ATTRIBUTE,
4885			    ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE)
4886			    ^ SCB_XFERLEN_ODD);
4887
4888			ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr);
4889			ahd_outl(ahd, SCB_RESIDUAL_DATACNT, data_cnt);
4890			/*
4891			 * The FIFO's pointers will be updated if/when the
4892			 * sequencer re-enters a data phase.
4893			 */
4894		}
4895	}
4896}
4897
4898
4899/*
4900 * Reinitialize the data pointers for the active transfer
4901 * based on its current residual.
4902 */
4903static void
4904ahd_reinitialize_dataptrs(struct ahd_softc *ahd)
4905{
4906	struct		 scb *scb;
4907	ahd_mode_state	 saved_modes;
4908	u_int		 scb_index;
4909	u_int		 wait;
4910	uint32_t	 sgptr;
4911	uint32_t	 resid;
4912	uint64_t	 dataptr;
4913
4914	AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK,
4915			 AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK);
4916
4917	scb_index = ahd_get_scbptr(ahd);
4918	scb = ahd_lookup_scb(ahd, scb_index);
4919
4920	/*
4921	 * Release and reacquire the FIFO so we
4922	 * have a clean slate.
4923	 */
4924	ahd_outb(ahd, DFFSXFRCTL, CLRCHN);
4925	wait = 1000;
4926	while (--wait && !(ahd_inb(ahd, MDFFSTAT) & FIFOFREE))
4927		ahd_delay(100);
4928	if (wait == 0) {
4929		ahd_print_path(ahd, scb);
4930		printf("ahd_reinitialize_dataptrs: Forcing FIFO free.\n");
4931		ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT);
4932	}
4933	saved_modes = ahd_save_modes(ahd);
4934	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
4935	ahd_outb(ahd, DFFSTAT,
4936		 ahd_inb(ahd, DFFSTAT)
4937		| (saved_modes == 0x11 ? CURRFIFO_1 : CURRFIFO_0));
4938
4939	/*
4940	 * Determine initial values for data_addr and data_cnt
4941	 * for resuming the data phase.
4942	 */
4943	sgptr = (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 3) << 24)
4944	      | (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 2) << 16)
4945	      | (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR + 1) << 8)
4946	      |	ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR);
4947	sgptr &= SG_PTR_MASK;
4948
4949	resid = (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 2) << 16)
4950	      | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 1) << 8)
4951	      | ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT);
4952
4953	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
4954		struct ahd_dma64_seg *sg;
4955
4956		sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
4957
4958		/* The residual sg_ptr always points to the next sg */
4959		sg--;
4960
4961		dataptr = ahd_le64toh(sg->addr)
4962			+ (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK)
4963			- resid;
4964		ahd_outb(ahd, HADDR + 7, dataptr >> 56);
4965		ahd_outb(ahd, HADDR + 6, dataptr >> 48);
4966		ahd_outb(ahd, HADDR + 5, dataptr >> 40);
4967		ahd_outb(ahd, HADDR + 4, dataptr >> 32);
4968	} else {
4969		struct	 ahd_dma_seg *sg;
4970
4971		sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
4972
4973		/* The residual sg_ptr always points to the next sg */
4974		sg--;
4975
4976		dataptr = ahd_le32toh(sg->addr)
4977			+ (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK)
4978			- resid;
4979		ahd_outb(ahd, HADDR + 4,
4980			 (ahd_le32toh(sg->len) & ~AHD_SG_LEN_MASK) >> 24);
4981	}
4982	ahd_outb(ahd, HADDR + 3, dataptr >> 24);
4983	ahd_outb(ahd, HADDR + 2, dataptr >> 16);
4984	ahd_outb(ahd, HADDR + 1, dataptr >> 8);
4985	ahd_outb(ahd, HADDR, dataptr);
4986	ahd_outb(ahd, HCNT + 2, resid >> 16);
4987	ahd_outb(ahd, HCNT + 1, resid >> 8);
4988	ahd_outb(ahd, HCNT, resid);
4989}
4990
4991/*
4992 * Handle the effects of issuing a bus device reset message.
4993 */
4994static void
4995ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
4996		    u_int lun, cam_status status, const char *message,
4997		    int verbose_level)
4998{
4999#ifdef AHD_TARGET_MODE
5000	struct ahd_tmode_tstate* tstate;
5001#endif
5002	int found;
5003
5004	found = ahd_abort_scbs(ahd, devinfo->target, devinfo->channel,
5005			       lun, SCB_LIST_NULL, devinfo->role,
5006			       status);
5007
5008#ifdef AHD_TARGET_MODE
5009	/*
5010	 * Send an immediate notify ccb to all target mord peripheral
5011	 * drivers affected by this action.
5012	 */
5013	tstate = ahd->enabled_targets[devinfo->our_scsiid];
5014	if (tstate != NULL) {
5015		u_int cur_lun;
5016		u_int max_lun;
5017
5018		if (lun != CAM_LUN_WILDCARD) {
5019			cur_lun = 0;
5020			max_lun = AHD_NUM_LUNS - 1;
5021		} else {
5022			cur_lun = lun;
5023			max_lun = lun;
5024		}
5025		for (cur_lun <= max_lun; cur_lun++) {
5026			struct ahd_tmode_lstate* lstate;
5027
5028			lstate = tstate->enabled_luns[cur_lun];
5029			if (lstate == NULL)
5030				continue;
5031
5032			ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid,
5033					       MSG_BUS_DEV_RESET, /*arg*/0);
5034			ahd_send_lstate_events(ahd, lstate);
5035		}
5036	}
5037#endif
5038
5039	/*
5040	 * Go back to async/narrow transfers and renegotiate.
5041	 */
5042	ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
5043		      AHD_TRANS_CUR, /*paused*/TRUE);
5044	ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0,
5045			 /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE);
5046
5047	ahd_send_async(ahd, devinfo->channel, devinfo->target,
5048		       lun, AC_SENT_BDR, NULL);
5049
5050	if (message != NULL
5051	 && (verbose_level <= bootverbose))
5052		printf("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd),
5053		       message, devinfo->channel, devinfo->target, found);
5054}
5055
5056#ifdef AHD_TARGET_MODE
5057static void
5058ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
5059		       struct scb *scb)
5060{
5061
5062	/*
5063	 * To facilitate adding multiple messages together,
5064	 * each routine should increment the index and len
5065	 * variables instead of setting them explicitly.
5066	 */
5067	ahd->msgout_index = 0;
5068	ahd->msgout_len = 0;
5069
5070	if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0)
5071		ahd_build_transfer_msg(ahd, devinfo);
5072	else
5073		panic("ahd_intr: AWAITING target message with no message");
5074
5075	ahd->msgout_index = 0;
5076	ahd->msg_type = MSG_TYPE_TARGET_MSGIN;
5077}
5078#endif
5079/**************************** Initialization **********************************/
5080static u_int
5081ahd_sglist_size(struct ahd_softc *ahd)
5082{
5083	bus_size_t list_size;
5084
5085	list_size = sizeof(struct ahd_dma_seg) * AHD_NSEG;
5086	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
5087		list_size = sizeof(struct ahd_dma64_seg) * AHD_NSEG;
5088	return (list_size);
5089}
5090
5091/*
5092 * Calculate the optimum S/G List allocation size.  S/G elements used
5093 * for a given transaction must be physically contiguous.  Assume the
5094 * OS will allocate full pages to us, so it doesn't make sense to request
5095 * less than a page.
5096 */
5097static u_int
5098ahd_sglist_allocsize(struct ahd_softc *ahd)
5099{
5100	bus_size_t sg_list_increment;
5101	bus_size_t sg_list_size;
5102	bus_size_t max_list_size;
5103	bus_size_t best_list_size;
5104
5105	/* Start out with the minimum required for AHD_NSEG. */
5106	sg_list_increment = ahd_sglist_size(ahd);
5107	sg_list_size = sg_list_increment;
5108
5109	/* Get us as close as possible to a page in size. */
5110	while ((sg_list_size + sg_list_increment) <= PAGE_SIZE)
5111		sg_list_size += sg_list_increment;
5112
5113	/*
5114	 * Try to reduce the amount of wastage by allocating
5115	 * multiple pages.
5116	 */
5117	best_list_size = sg_list_size;
5118	max_list_size = roundup(sg_list_increment, PAGE_SIZE);
5119	if (max_list_size < 4 * PAGE_SIZE)
5120		max_list_size = 4 * PAGE_SIZE;
5121	if (max_list_size > (AHD_SCB_MAX_ALLOC * sg_list_increment))
5122		max_list_size = (AHD_SCB_MAX_ALLOC * sg_list_increment);
5123	while ((sg_list_size + sg_list_increment) <= max_list_size
5124	   &&  (sg_list_size % PAGE_SIZE) != 0) {
5125		bus_size_t new_mod;
5126		bus_size_t best_mod;
5127
5128		sg_list_size += sg_list_increment;
5129		new_mod = sg_list_size % PAGE_SIZE;
5130		best_mod = best_list_size % PAGE_SIZE;
5131		if (new_mod > best_mod || new_mod == 0) {
5132			best_list_size = sg_list_size;
5133		}
5134	}
5135	return (best_list_size);
5136}
5137
5138int
5139ahd_softc_init(struct ahd_softc *ahd)
5140{
5141
5142	ahd->unpause = 0;
5143	ahd->pause = PAUSE;
5144	return (0);
5145}
5146
5147void
5148ahd_set_unit(struct ahd_softc *ahd, int unit)
5149{
5150	ahd->unit = unit;
5151}
5152
5153void
5154ahd_set_name(struct ahd_softc *ahd, const char *name)
5155{
5156	ahd->name = name;
5157}
5158
5159void
5160ahd_free(struct ahd_softc *ahd)
5161{
5162	int i;
5163
5164	switch (ahd->init_level) {
5165	default:
5166	case 2:
5167		ahd_shutdown(ahd);
5168		TAILQ_REMOVE(&ahd_tailq, ahd, links);
5169		/* FALLTHROUGH */
5170	case 1:
5171		bus_dmamap_unload(ahd->parent_dmat,
5172		    ahd->shared_data_map.dmamap);
5173		bus_dmamap_destroy(ahd->parent_dmat,
5174		    ahd->shared_data_map.dmamap);
5175		bus_dmamem_unmap(ahd->parent_dmat, (void *)ahd->qoutfifo,
5176		    ahd->shared_data_size);
5177		bus_dmamem_free(ahd->parent_dmat,
5178		    &ahd->shared_data_map.dmasegs, ahd->shared_data_map.nseg);
5179		break;
5180	case 0:
5181		break;
5182	}
5183
5184	ahd_platform_free(ahd);
5185	ahd_fini_scbdata(ahd);
5186	for (i = 0; i < AHD_NUM_TARGETS; i++) {
5187		struct ahd_tmode_tstate *tstate;
5188
5189		tstate = ahd->enabled_targets[i];
5190		if (tstate != NULL) {
5191#if AHD_TARGET_MODE
5192			int j;
5193
5194			for (j = 0; j < AHD_NUM_LUNS; j++) {
5195				struct ahd_tmode_lstate *lstate;
5196
5197				lstate = tstate->enabled_luns[j];
5198				if (lstate != NULL) {
5199					xpt_free_path(lstate->path);
5200					free(lstate, M_DEVBUF);
5201				}
5202			}
5203#endif
5204			free(tstate, M_DEVBUF);
5205		}
5206	}
5207#if AHD_TARGET_MODE
5208	if (ahd->black_hole != NULL) {
5209		xpt_free_path(ahd->black_hole->path);
5210		free(ahd->black_hole, M_DEVBUF);
5211	}
5212#endif
5213	if (ahd->seep_config != NULL)
5214		free(ahd->seep_config, M_DEVBUF);
5215	if (ahd->saved_stack != NULL)
5216		free(ahd->saved_stack, M_DEVBUF);
5217#ifndef __FreeBSD__
5218	free(ahd, M_DEVBUF);
5219#endif
5220	return;
5221}
5222
5223void
5224ahd_shutdown(void *arg)
5225{
5226	struct	ahd_softc *ahd;
5227
5228	ahd = arg;
5229
5230#ifdef AHD_DEBUG
5231	printf("%s: ahd_shutdown\n", ahd_name(ahd));
5232#endif
5233	/*
5234	 * Stop periodic timer callbacks.
5235	 */
5236	ahd_timer_stop(&ahd->reset_timer);
5237	ahd_timer_stop(&ahd->stat_timer);
5238
5239	/* This will reset most registers to 0, but not all */
5240	ahd_reset(ahd, /*reinit*/FALSE);
5241}
5242
5243/*
5244 * Reset the controller and record some information about it
5245 * that is only available just after a reset.  If "reinit" is
5246 * non-zero, this reset occurred after initial configuration
5247 * and the caller requests that the chip be fully reinitialized
5248 * to a runable state.  Chip interrupts are *not* enabled after
5249 * a reinitialization.  The caller must enable interrupts via
5250 * ahd_intr_enable().
5251 */
5252int
5253ahd_reset(struct ahd_softc *ahd, int reinit)
5254{
5255	u_int	 sxfrctl1;
5256	int	 wait;
5257	uint32_t cmd;
5258	struct ahd_pci_busdata	*bd = ahd->bus_data;
5259
5260	/*
5261	 * Preserve the value of the SXFRCTL1 register for all channels.
5262	 * It contains settings that affect termination and we don't want
5263	 * to disturb the integrity of the bus.
5264	 */
5265	ahd_pause(ahd);
5266	ahd_update_modes(ahd);
5267	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
5268	sxfrctl1 = ahd_inb(ahd, SXFRCTL1);
5269
5270	cmd = pci_conf_read(bd->pc, bd->tag, PCI_COMMAND_STATUS_REG);
5271
5272	if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) {
5273		uint32_t mod_cmd;
5274
5275		/*
5276		 * A4 Razor #632
5277		 * During the assertion of CHIPRST, the chip
5278		 * does not disable its parity logic prior to
5279		 * the start of the reset.  This may cause a
5280		 * parity error to be detected and thus a
5281		 * spurious SERR or PERR assertion.  Disble
5282		 * PERR and SERR responses during the CHIPRST.
5283		 */
5284		mod_cmd = cmd &
5285		    ~(PCI_COMMAND_PARITY_ENABLE|PCI_COMMAND_SERR_ENABLE);
5286		pci_conf_write(bd->pc, bd->tag,
5287		    PCI_COMMAND_STATUS_REG, mod_cmd);
5288	}
5289	ahd_outb(ahd, HCNTRL, CHIPRST | ahd->pause);
5290
5291	/*
5292	 * Ensure that the reset has finished.  We delay 1000us
5293	 * prior to reading the register to make sure the chip
5294	 * has sufficiently completed its reset to handle register
5295	 * accesses.
5296	 */
5297	wait = 1000;
5298	do {
5299		ahd_delay(1000);
5300	} while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK));
5301
5302	if (wait == 0) {
5303		printf("%s: WARNING - Failed chip reset!  "
5304		       "Trying to initialize anyway.\n", ahd_name(ahd));
5305	}
5306	ahd_outb(ahd, HCNTRL, ahd->pause);
5307
5308	if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) {
5309		/*
5310		 * Clear any latched PCI error status and restore
5311		 * previous SERR and PERR response enables.
5312		 */
5313		pci_conf_write(bd->pc, bd->tag, PCI_COMMAND_STATUS_REG, cmd |
5314		    (PCI_STATUS_PARITY_ERROR | PCI_STATUS_TARGET_TARGET_ABORT |
5315		     PCI_STATUS_MASTER_TARGET_ABORT | PCI_STATUS_MASTER_ABORT |
5316		     PCI_STATUS_SPECIAL_ERROR));
5317	}
5318
5319	/*
5320	 * Mode should be SCSI after a chip reset, but lets
5321	 * set it just to be safe. We touch the MODE_PTR
5322	 * register directly so as to bypass the lazy update
5323	 * ode in ahd_set_modes().
5324	 */
5325	ahd_known_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
5326	ahd_outb(ahd, MODE_PTR,
5327		 ahd_build_mode_state(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI));
5328
5329	/*
5330	 * Restore SXFRCTL1.
5331	 *
5332	 * We must always initialize STPWEN to 1 before we
5333	 * restore the saved values.  STPWEN is initialized
5334	 * to a tri-state condition which can only be cleared
5335	 * by turning it on.
5336	 */
5337	ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN);
5338	ahd_outb(ahd, SXFRCTL1, sxfrctl1);
5339
5340	/* Determine chip configuration */
5341	ahd->features &= ~AHD_WIDE;
5342	if ((ahd_inb(ahd, SBLKCTL) & SELWIDE) != 0)
5343		ahd->features |= AHD_WIDE;
5344
5345	/*
5346	 * If a recovery action has forced a chip reset,
5347	 * re-initialize the chip to our liking.
5348	 */
5349	if (reinit != 0)
5350		ahd_chip_init(ahd);
5351
5352	return (0);
5353}
5354
5355/*
5356 * Determine the number of SCBs available on the controller
5357 */
5358int
5359ahd_probe_scbs(struct ahd_softc *ahd) {
5360	int i;
5361
5362	AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
5363			 ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
5364	for (i = 0; i < AHD_SCB_MAX; i++) {
5365		int j;
5366		int ret;
5367
5368		ahd_set_scbptr(ahd, i);
5369		ahd_outw(ahd, SCB_BASE, i);
5370		for (j = 2; j < 64; j++)
5371			ahd_outb(ahd, SCB_BASE+j, 0);
5372		/* Start out life as unallocated (needing an abort) */
5373		ahd_outb(ahd, SCB_CONTROL, MK_MESSAGE);
5374		ret = ahd_inw_scbram(ahd, SCB_BASE);
5375		if (ret != i) {
5376			printf("%s: ahd_probe_scbs (!=%d): returned 0x%x\n",
5377			    ahd_name(ahd), i, ret);
5378			break;
5379		}
5380		ahd_set_scbptr(ahd, 0);
5381		ret = ahd_inw_scbram(ahd, SCB_BASE);
5382		if (ret != 0) {
5383			printf("ahd_probe_scbs (non zero): returned 0x%x\n",
5384			    ret);
5385			break;
5386		}
5387	}
5388	return (i);
5389}
5390
5391static void
5392ahd_initialize_hscbs(struct ahd_softc *ahd)
5393{
5394	int i;
5395
5396	for (i = 0; i < ahd->scb_data.maxhscbs; i++) {
5397		ahd_set_scbptr(ahd, i);
5398
5399		/* Clear the control byte. */
5400		ahd_outb(ahd, SCB_CONTROL, 0);
5401
5402		/* Set the next pointer */
5403		ahd_outw(ahd, SCB_NEXT, SCB_LIST_NULL);
5404	}
5405}
5406
5407static int
5408ahd_init_scbdata(struct ahd_softc *ahd)
5409{
5410	struct	scb_data *scb_data;
5411	int	i;
5412
5413	scb_data = &ahd->scb_data;
5414	TAILQ_INIT(&scb_data->free_scbs);
5415	for (i = 0; i < AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT; i++)
5416		LIST_INIT(&scb_data->free_scb_lists[i]);
5417	LIST_INIT(&scb_data->any_dev_free_scb_list);
5418	SLIST_INIT(&scb_data->hscb_maps);
5419	SLIST_INIT(&scb_data->sg_maps);
5420	SLIST_INIT(&scb_data->sense_maps);
5421
5422	/* Determine the number of hardware SCBs and initialize them */
5423	scb_data->maxhscbs = ahd_probe_scbs(ahd);
5424	if (scb_data->maxhscbs == 0) {
5425		printf("%s: No SCB space found\n", ahd_name(ahd));
5426		return (ENXIO);
5427	}
5428	ahd_initialize_hscbs(ahd);
5429
5430	/*
5431	 * Create our DMA tags.  These tags define the kinds of device
5432	 * accessible memory allocations and memory mappings we will
5433	 * need to perform during normal operation.
5434	 *
5435	 * Unless we need to further restrict the allocation, we rely
5436	 * on the restrictions of the parent dmat, hence the common
5437	 * use of MAXADDR and MAXSIZE.
5438	 */
5439
5440	/* Perform initial CCB allocation */
5441	ahd_alloc_scbs(ahd);
5442
5443	if (scb_data->numscbs == 0) {
5444		printf("%s: ahd_init_scbdata - "
5445		       "Unable to allocate initial scbs\n",
5446		       ahd_name(ahd));
5447		goto error_exit;
5448	}
5449
5450	/*
5451	 * Note that we were successfull
5452	 */
5453	return (0);
5454
5455error_exit:
5456
5457	return (ENOMEM);
5458}
5459
5460static struct scb *
5461ahd_find_scb_by_tag(struct ahd_softc *ahd, u_int tag)
5462{
5463	struct scb *scb;
5464
5465	/*
5466	 * Look on the pending list.
5467	 */
5468	LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
5469		if (SCB_GET_TAG(scb) == tag)
5470			return (scb);
5471	}
5472
5473	/*
5474	 * Then on all of the collision free lists.
5475	 */
5476	TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
5477		struct scb *list_scb;
5478
5479		list_scb = scb;
5480		do {
5481			if (SCB_GET_TAG(list_scb) == tag)
5482				return (list_scb);
5483			list_scb = LIST_NEXT(list_scb, collision_links);
5484		} while (list_scb);
5485	}
5486
5487	/*
5488	 * And finally on the generic free list.
5489	 */
5490	LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) {
5491		if (SCB_GET_TAG(scb) == tag)
5492			return (scb);
5493	}
5494
5495	return (NULL);
5496}
5497
5498static void
5499ahd_fini_scbdata(struct ahd_softc *ahd)
5500{
5501	struct scb_data *scb_data;
5502
5503	scb_data = &ahd->scb_data;
5504	if (scb_data == NULL)
5505		return;
5506
5507	switch (scb_data->init_level) {
5508	default:
5509	case 3:
5510	{
5511		struct map_node *sns_map;
5512
5513		while ((sns_map = SLIST_FIRST(&scb_data->sense_maps)) != NULL) {
5514			SLIST_REMOVE_HEAD(&scb_data->sense_maps, links);
5515			ahd_freedmamem(ahd->parent_dmat, PAGE_SIZE,
5516				       sns_map->dmamap, (void *)sns_map->vaddr,
5517				       &sns_map->dmasegs, sns_map->nseg);
5518			free(sns_map, M_DEVBUF);
5519		}
5520		/* FALLTHROUGH */
5521	}
5522	case 2:
5523	{
5524		struct map_node *sg_map;
5525
5526		while ((sg_map = SLIST_FIRST(&scb_data->sg_maps)) != NULL) {
5527			SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
5528			ahd_freedmamem(ahd->parent_dmat,
5529				       ahd_sglist_allocsize(ahd),
5530				       sg_map->dmamap, (void *)sg_map->vaddr,
5531				       &sg_map->dmasegs, sg_map->nseg);
5532			free(sg_map, M_DEVBUF);
5533		}
5534		/* FALLTHROUGH */
5535	}
5536	case 1:
5537	{
5538		struct map_node *hscb_map;
5539
5540		while ((hscb_map = SLIST_FIRST(&scb_data->hscb_maps)) != NULL) {
5541			SLIST_REMOVE_HEAD(&scb_data->hscb_maps, links);
5542			ahd_freedmamem(ahd->parent_dmat, PAGE_SIZE,
5543				       hscb_map->dmamap,
5544				       (void *)hscb_map->vaddr,
5545				       &hscb_map->dmasegs, hscb_map->nseg);
5546			free(hscb_map, M_DEVBUF);
5547		}
5548		/* FALLTHROUGH */
5549	}
5550	case 0:
5551		break;
5552	}
5553}
5554
5555/*
5556 * DSP filter Bypass must be enabled until the first selection
5557 * after a change in bus mode (Razor #491 and #493).
5558 */
5559static void
5560ahd_setup_iocell_workaround(struct ahd_softc *ahd)
5561{
5562	ahd_mode_state saved_modes;
5563
5564	saved_modes = ahd_save_modes(ahd);
5565	ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
5566	ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL)
5567	       | BYPASSENAB | RCVROFFSTDIS | XMITOFFSTDIS);
5568	ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI));
5569#ifdef AHD_DEBUG
5570	if ((ahd_debug & AHD_SHOW_MISC) != 0)
5571		printf("%s: Setting up iocell workaround\n", ahd_name(ahd));
5572#endif
5573	ahd_restore_modes(ahd, saved_modes);
5574	ahd->flags &= ~AHD_HAD_FIRST_SEL;
5575}
5576
5577static void
5578ahd_iocell_first_selection(struct ahd_softc *ahd)
5579{
5580	ahd_mode_state	saved_modes;
5581	u_int		sblkctl;
5582
5583	if ((ahd->flags & AHD_HAD_FIRST_SEL) != 0)
5584		return;
5585	saved_modes = ahd_save_modes(ahd);
5586	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
5587	sblkctl = ahd_inb(ahd, SBLKCTL);
5588	ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
5589#ifdef AHD_DEBUG
5590	if ((ahd_debug & AHD_SHOW_MISC) != 0)
5591		printf("%s: iocell first selection\n", ahd_name(ahd));
5592#endif
5593	if ((sblkctl & ENAB40) != 0) {
5594		ahd_outb(ahd, DSPDATACTL,
5595			 ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB);
5596#ifdef AHD_DEBUG
5597		if ((ahd_debug & AHD_SHOW_MISC) != 0)
5598			printf("%s: BYPASS now disabled\n", ahd_name(ahd));
5599#endif
5600	}
5601	ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI));
5602	ahd_outb(ahd, CLRINT, CLRSCSIINT);
5603	ahd_restore_modes(ahd, saved_modes);
5604	ahd->flags |= AHD_HAD_FIRST_SEL;
5605}
5606
5607/*************************** SCB Management ***********************************/
5608static void
5609ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx)
5610{
5611	struct	scb_list *free_list;
5612	struct	scb_tailq *free_tailq;
5613	struct	scb *first_scb;
5614
5615	scb->flags |= SCB_ON_COL_LIST;
5616	AHD_SET_SCB_COL_IDX(scb, col_idx);
5617	free_list = &ahd->scb_data.free_scb_lists[col_idx];
5618	free_tailq = &ahd->scb_data.free_scbs;
5619	first_scb = LIST_FIRST(free_list);
5620	if (first_scb != NULL) {
5621		LIST_INSERT_AFTER(first_scb, scb, collision_links);
5622	} else {
5623		LIST_INSERT_HEAD(free_list, scb, collision_links);
5624		TAILQ_INSERT_TAIL(free_tailq, scb, links.tqe);
5625	}
5626}
5627
5628static void
5629ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb)
5630{
5631	struct	scb_list *free_list;
5632	struct	scb_tailq *free_tailq;
5633	struct	scb *first_scb;
5634	u_int	col_idx;
5635
5636	scb->flags &= ~SCB_ON_COL_LIST;
5637	col_idx = AHD_GET_SCB_COL_IDX(ahd, scb);
5638	free_list = &ahd->scb_data.free_scb_lists[col_idx];
5639	free_tailq = &ahd->scb_data.free_scbs;
5640	first_scb = LIST_FIRST(free_list);
5641	if (first_scb == scb) {
5642		struct scb *next_scb;
5643
5644		/*
5645		 * Maintain order in the collision free
5646		 * lists for fairness if this device has
5647		 * other colliding tags active.
5648		 */
5649		next_scb = LIST_NEXT(scb, collision_links);
5650		if (next_scb != NULL) {
5651			TAILQ_INSERT_AFTER(free_tailq, scb,
5652					   next_scb, links.tqe);
5653		}
5654		TAILQ_REMOVE(free_tailq, scb, links.tqe);
5655	}
5656	LIST_REMOVE(scb, collision_links);
5657}
5658
5659/*
5660 * Get a free scb. If there are none, see if we can allocate a new SCB.
5661 */
5662struct scb *
5663ahd_get_scb(struct ahd_softc *ahd, u_int col_idx)
5664{
5665	struct scb *scb;
5666	TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
5667		if (AHD_GET_SCB_COL_IDX(ahd, scb) != col_idx) {
5668			ahd_rem_col_list(ahd, scb);
5669			goto found;
5670		}
5671	}
5672	if ((scb = LIST_FIRST(&ahd->scb_data.any_dev_free_scb_list)) == NULL)
5673		return (NULL);
5674	LIST_REMOVE(scb, links.le);
5675	if (col_idx != AHD_NEVER_COL_IDX
5676	 && (scb->col_scb != NULL)
5677	 && (scb->col_scb->flags & SCB_ACTIVE) == 0) {
5678		LIST_REMOVE(scb->col_scb, links.le);
5679		ahd_add_col_list(ahd, scb->col_scb, col_idx);
5680	}
5681found:
5682	scb->flags |= SCB_ACTIVE;
5683	return (scb);
5684}
5685
5686/*
5687 * Return an SCB resource to the free list.
5688 */
5689void
5690ahd_free_scb(struct ahd_softc *ahd, struct scb *scb)
5691{
5692
5693	/* Clean up for the next user */
5694	scb->flags = SCB_FLAG_NONE;
5695	scb->hscb->control = 0;
5696	ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = NULL;
5697
5698	if (scb->col_scb == NULL) {
5699
5700		/*
5701		 * No collision possible.  Just free normally.
5702		 */
5703		LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
5704				 scb, links.le);
5705	} else if ((scb->col_scb->flags & SCB_ON_COL_LIST) != 0) {
5706
5707		/*
5708		 * The SCB we might have collided with is on
5709		 * a free collision list.  Put both SCBs on
5710		 * the generic list.
5711		 */
5712		ahd_rem_col_list(ahd, scb->col_scb);
5713		LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
5714				 scb, links.le);
5715		LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
5716				 scb->col_scb, links.le);
5717	} else if ((scb->col_scb->flags
5718		  & (SCB_PACKETIZED|SCB_ACTIVE)) == SCB_ACTIVE
5719		&& (scb->col_scb->hscb->control & TAG_ENB) != 0) {
5720
5721		/*
5722		 * The SCB we might collide with on the next allocation
5723		 * is still active in a non-packetized, tagged, context.
5724		 * Put us on the SCB collision list.
5725		 */
5726		ahd_add_col_list(ahd, scb,
5727				 AHD_GET_SCB_COL_IDX(ahd, scb->col_scb));
5728	} else {
5729		/*
5730		 * The SCB we might collide with on the next allocation
5731		 * is either active in a packetized context, or free.
5732		 * Since we can't collide, put this SCB on the generic
5733		 * free list.
5734		 */
5735		LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list,
5736				 scb, links.le);
5737	}
5738
5739	ahd_platform_scb_free(ahd, scb);
5740}
5741
5742int
5743ahd_alloc_scbs(struct ahd_softc *ahd)
5744{
5745	struct scb_data *scb_data;
5746	struct scb	*next_scb;
5747	struct hardware_scb *hscb;
5748	struct map_node *hscb_map;
5749	struct map_node *sg_map;
5750	struct map_node *sense_map;
5751	uint8_t		*segs;
5752	uint8_t		*sense_data;
5753	bus_addr_t	 hscb_busaddr;
5754	bus_addr_t	 sg_busaddr;
5755	bus_addr_t	 sense_busaddr;
5756	int		 newcount;
5757	int		 i;
5758
5759	scb_data = &ahd->scb_data;
5760	if (scb_data->numscbs >= AHD_SCB_MAX_ALLOC)
5761		/* Can't allocate any more */
5762		return (0);
5763
5764	KASSERT(scb_data->scbs_left >= 0);
5765	if (scb_data->scbs_left != 0) {
5766		int offset;
5767
5768		offset = (PAGE_SIZE / sizeof(*hscb)) - scb_data->scbs_left;
5769		hscb_map = SLIST_FIRST(&scb_data->hscb_maps);
5770		hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset];
5771		hscb_busaddr = hscb_map->physaddr + (offset * sizeof(*hscb));
5772	} else {
5773		hscb_map = malloc(sizeof(*hscb_map), M_DEVBUF, M_WAITOK);
5774
5775		if (hscb_map == NULL)
5776			return (0);
5777
5778		memset(hscb_map, 0, sizeof(*hscb_map));
5779
5780		/* Allocate the next batch of hardware SCBs */
5781		if (ahd_createdmamem(ahd->parent_dmat, PAGE_SIZE,
5782				     ahd->sc_dmaflags,
5783				     &hscb_map->dmamap,
5784				     (void **)&hscb_map->vaddr,
5785				     &hscb_map->physaddr, &hscb_map->dmasegs,
5786				     &hscb_map->nseg, ahd_name(ahd),
5787				     "hardware SCB structures") < 0) {
5788			free(hscb_map, M_DEVBUF);
5789			return (0);
5790		}
5791
5792		SLIST_INSERT_HEAD(&scb_data->hscb_maps, hscb_map, links);
5793
5794		hscb = (struct hardware_scb *)hscb_map->vaddr;
5795		hscb_busaddr = hscb_map->physaddr;
5796		scb_data->scbs_left = PAGE_SIZE / sizeof(*hscb);
5797	}
5798
5799	scb_data->init_level++;
5800
5801	if (scb_data->sgs_left != 0) {
5802		int offset;
5803
5804		offset = ((ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd))
5805		       - scb_data->sgs_left) * ahd_sglist_size(ahd);
5806		sg_map = SLIST_FIRST(&scb_data->sg_maps);
5807		segs = sg_map->vaddr + offset;
5808		sg_busaddr = sg_map->physaddr + offset;
5809	} else {
5810		sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_WAITOK);
5811
5812		if (sg_map == NULL)
5813			return (0);
5814
5815		memset(sg_map, 0, sizeof(*sg_map));
5816
5817		/* Allocate the next batch of S/G lists */
5818		if (ahd_createdmamem(ahd->parent_dmat,
5819				     ahd_sglist_allocsize(ahd),
5820				     ahd->sc_dmaflags,
5821				     &sg_map->dmamap, (void **)&sg_map->vaddr,
5822				     &sg_map->physaddr, &sg_map->dmasegs,
5823				     &sg_map->nseg, ahd_name(ahd),
5824				     "SG data structures") < 0) {
5825			free(sg_map, M_DEVBUF);
5826			return (0);
5827		}
5828
5829		SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
5830
5831		segs = sg_map->vaddr;
5832		sg_busaddr = sg_map->physaddr;
5833		scb_data->sgs_left =
5834		    ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd);
5835#ifdef AHD_DEBUG
5836		if (ahd_debug & AHD_SHOW_MEMORY)
5837			printf("%s: ahd_alloc_scbs - Mapped SG data\n",
5838			    ahd_name(ahd));
5839#endif
5840	}
5841
5842	scb_data->init_level++;
5843
5844
5845	if (scb_data->sense_left != 0) {
5846		int offset;
5847
5848		offset = PAGE_SIZE - (AHD_SENSE_BUFSIZE * scb_data->sense_left);
5849		sense_map = SLIST_FIRST(&scb_data->sense_maps);
5850		sense_data = sense_map->vaddr + offset;
5851		sense_busaddr = sense_map->physaddr + offset;
5852	} else {
5853		sense_map = malloc(sizeof(*sense_map), M_DEVBUF, M_WAITOK);
5854
5855		if (sense_map == NULL)
5856			return (0);
5857
5858		memset(sense_map, 0, sizeof(*sense_map));
5859
5860		/* Allocate the next batch of sense buffers */
5861		if (ahd_createdmamem(ahd->parent_dmat, PAGE_SIZE,
5862				     ahd->sc_dmaflags,
5863				     &sense_map->dmamap,
5864				     (void **)&sense_map->vaddr,
5865				     &sense_map->physaddr, &sense_map->dmasegs,
5866				     &sense_map->nseg, ahd_name(ahd),
5867				     "Sense Data structures") < 0) {
5868			free(sense_map, M_DEVBUF);
5869			return (0);
5870		}
5871
5872		SLIST_INSERT_HEAD(&scb_data->sense_maps, sense_map, links);
5873
5874		sense_data = sense_map->vaddr;
5875		sense_busaddr = sense_map->physaddr;
5876		scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE;
5877#ifdef AHD_DEBUG
5878		if (ahd_debug & AHD_SHOW_MEMORY)
5879			printf("%s: ahd_alloc_scbs - Mapped sense data\n",
5880			    ahd_name(ahd));
5881#endif
5882	}
5883
5884	scb_data->init_level++;
5885
5886	newcount = MIN(scb_data->sense_left, scb_data->scbs_left);
5887	newcount = MIN(newcount, scb_data->sgs_left);
5888	newcount = MIN(newcount, (AHD_SCB_MAX_ALLOC - scb_data->numscbs));
5889	scb_data->sense_left -= newcount;
5890	scb_data->scbs_left -= newcount;
5891	scb_data->sgs_left -= newcount;
5892
5893	for (i = 0; i < newcount; i++) {
5894		u_int col_tag;
5895
5896		struct scb_platform_data *pdata;
5897#ifndef __linux__
5898		int error;
5899#endif
5900		next_scb = malloc(sizeof(*next_scb), M_DEVBUF, M_WAITOK);
5901		if (next_scb == NULL)
5902			break;
5903
5904		pdata = malloc(sizeof(*pdata), M_DEVBUF, M_WAITOK);
5905		if (pdata == NULL) {
5906			free(next_scb, M_DEVBUF);
5907			break;
5908		}
5909		next_scb->platform_data = pdata;
5910		next_scb->hscb_map = hscb_map;
5911		next_scb->sg_map = sg_map;
5912		next_scb->sense_map = sense_map;
5913		next_scb->sg_list = segs;
5914		next_scb->sense_data = sense_data;
5915		next_scb->sense_busaddr = sense_busaddr;
5916		memset(hscb, 0, sizeof(*hscb));
5917		next_scb->hscb = hscb;
5918		hscb->hscb_busaddr = ahd_htole32(hscb_busaddr);
5919		KASSERT((vaddr_t)hscb >= (vaddr_t)hscb_map->vaddr &&
5920			(vaddr_t)hscb < (vaddr_t)hscb_map->vaddr + PAGE_SIZE);
5921
5922		/*
5923		 * The sequencer always starts with the second entry.
5924		 * The first entry is embedded in the scb.
5925		 */
5926		next_scb->sg_list_busaddr = sg_busaddr;
5927		if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
5928			next_scb->sg_list_busaddr
5929			    += sizeof(struct ahd_dma64_seg);
5930		else
5931			next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg);
5932		next_scb->ahd_softc = ahd;
5933		next_scb->flags = SCB_FLAG_NONE;
5934
5935		error = bus_dmamap_create(ahd->parent_dmat,
5936					  AHD_MAXTRANSFER_SIZE, AHD_NSEG,
5937					  MAXBSIZE, 0,
5938					  BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW|
5939					  ahd->sc_dmaflags,
5940					  &next_scb->dmamap);
5941		if (error != 0) {
5942			free(next_scb, M_DEVBUF);
5943			free(pdata, M_DEVBUF);
5944			break;
5945		}
5946		next_scb->hscb->tag = ahd_htole16(scb_data->numscbs);
5947		col_tag = scb_data->numscbs ^ 0x100;
5948		next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag);
5949		if (next_scb->col_scb != NULL)
5950			next_scb->col_scb->col_scb = next_scb;
5951		ahd_free_scb(ahd, next_scb);
5952		hscb++;
5953		hscb_busaddr += sizeof(*hscb);
5954		segs += ahd_sglist_size(ahd);
5955		sg_busaddr += ahd_sglist_size(ahd);
5956		sense_data += AHD_SENSE_BUFSIZE;
5957		sense_busaddr += AHD_SENSE_BUFSIZE;
5958		scb_data->numscbs++;
5959	}
5960	return (i);
5961}
5962
5963void
5964ahd_controller_info(struct ahd_softc *ahd, char *tbuf, size_t l)
5965{
5966	const char *speed;
5967	const char *type;
5968	int len;
5969	char *ep;
5970
5971	ep = tbuf + l;
5972
5973	len = snprintf(tbuf, ep - tbuf, "%s: ",
5974	    ahd_chip_names[ahd->chip & AHD_CHIPID_MASK]);
5975	tbuf += len;
5976
5977	speed = "Ultra320 ";
5978	if ((ahd->features & AHD_WIDE) != 0) {
5979		type = "Wide ";
5980	} else {
5981		type = "Single ";
5982	}
5983	len = snprintf(tbuf, ep - tbuf, "%s%sChannel %c, SCSI Id=%d, ",
5984		      speed, type, ahd->channel, ahd->our_id);
5985	tbuf += len;
5986
5987	snprintf(tbuf, ep - tbuf, "%s, %d SCBs", ahd->bus_description,
5988		ahd->scb_data.maxhscbs);
5989}
5990
5991static const char *channel_strings[] = {
5992	"Primary Low",
5993	"Primary High",
5994	"Secondary Low",
5995	"Secondary High"
5996};
5997
5998static const char *termstat_strings[] = {
5999	"Terminated Correctly",
6000	"Over Terminated",
6001	"Under Terminated",
6002	"Not Configured"
6003};
6004
6005/*
6006 * Start the board, ready for normal operation
6007 */
6008int
6009ahd_init(struct ahd_softc *ahd)
6010{
6011	uint8_t		*next_vaddr;
6012	bus_addr_t	 next_baddr;
6013	size_t		 driver_data_size;
6014	int		 i;
6015	int		 error;
6016	u_int		 warn_user;
6017	uint8_t		 current_sensing;
6018	uint8_t		 fstat;
6019
6020	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
6021
6022	ahd->stack_size = ahd_probe_stack_size(ahd);
6023	ahd->saved_stack = malloc(ahd->stack_size * sizeof(uint16_t),
6024				  M_DEVBUF, M_NOWAIT);
6025	if (ahd->saved_stack == NULL)
6026		return (ENOMEM);
6027	/* Zero the memory */
6028	memset(ahd->saved_stack, 0, ahd->stack_size * sizeof(uint16_t));
6029
6030	/*
6031	 * Verify that the compiler hasn't over-agressively
6032	 * padded important structures.
6033	 */
6034	if (sizeof(struct hardware_scb) != 64)
6035		panic("Hardware SCB size is incorrect");
6036
6037#ifdef AHD_DEBUG
6038	if ((ahd_debug & AHD_DEBUG_SEQUENCER) != 0)
6039		ahd->flags |= AHD_SEQUENCER_DEBUG;
6040#endif
6041
6042	/*
6043	 * Default to allowing initiator operations.
6044	 */
6045	ahd->flags |= AHD_INITIATORROLE;
6046
6047	/*
6048	 * Only allow target mode features if this unit has them enabled.
6049	 */
6050	if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0)
6051		ahd->features &= ~AHD_TARGETMODE;
6052
6053	/*
6054	 * DMA tag for our command fifos and other data in system memory
6055	 * the card's sequencer must be able to access.  For initiator
6056	 * roles, we need to allocate space for the qoutfifo.  When providing
6057	 * for the target mode role, we must additionally provide space for
6058	 * the incoming target command fifo.
6059	 */
6060	driver_data_size = AHD_SCB_MAX * sizeof(uint16_t)
6061			 + sizeof(struct hardware_scb);
6062	if ((ahd->features & AHD_TARGETMODE) != 0)
6063		driver_data_size += AHD_TMODE_CMDS * sizeof(struct target_cmd);
6064	if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0)
6065		driver_data_size += PKT_OVERRUN_BUFSIZE;
6066	ahd->shared_data_size = driver_data_size;
6067
6068	memset(&ahd->shared_data_map, 0, sizeof(ahd->shared_data_map));
6069	ahd->sc_dmaflags = BUS_DMA_NOWAIT;
6070
6071	if (ahd_createdmamem(ahd->parent_dmat, ahd->shared_data_size,
6072			     ahd->sc_dmaflags,
6073			     &ahd->shared_data_map.dmamap,
6074			     (void **)&ahd->shared_data_map.vaddr,
6075			     &ahd->shared_data_map.physaddr,
6076			     &ahd->shared_data_map.dmasegs,
6077			     &ahd->shared_data_map.nseg, ahd_name(ahd),
6078			     "shared data") < 0)
6079		return (ENOMEM);
6080	ahd->qoutfifo = (void *) ahd->shared_data_map.vaddr;
6081
6082	ahd->init_level++;
6083
6084	next_vaddr = (uint8_t *)&ahd->qoutfifo[AHD_QOUT_SIZE];
6085	next_baddr = ahd->shared_data_map.physaddr +
6086	    AHD_QOUT_SIZE * sizeof(uint16_t);
6087	if ((ahd->features & AHD_TARGETMODE) != 0) {
6088		ahd->targetcmds = (struct target_cmd *)next_vaddr;
6089		next_vaddr += AHD_TMODE_CMDS * sizeof(struct target_cmd);
6090		next_baddr += AHD_TMODE_CMDS * sizeof(struct target_cmd);
6091	}
6092
6093	if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) {
6094		ahd->overrun_buf = next_vaddr;
6095		next_vaddr += PKT_OVERRUN_BUFSIZE;
6096		next_baddr += PKT_OVERRUN_BUFSIZE;
6097	}
6098
6099	/*
6100	 * We need one SCB to serve as the "next SCB".  Since the
6101	 * tag identifier in this SCB will never be used, there is
6102	 * no point in using a valid HSCB tag from an SCB pulled from
6103	 * the standard free pool.  So, we allocate this "sentinel"
6104	 * specially from the DMA safe memory chunk used for the QOUTFIFO.
6105	 */
6106	ahd->next_queued_hscb = (struct hardware_scb *)next_vaddr;
6107	ahd->next_queued_hscb_map = &ahd->shared_data_map;
6108	ahd->next_queued_hscb->hscb_busaddr = ahd_htole32(next_baddr);
6109
6110	memset(&ahd->scb_data, 0, sizeof(struct scb_data));
6111
6112	/* Allocate SCB data now that parent_dmat is initialized */
6113	if (ahd_init_scbdata(ahd) != 0)
6114		return (ENOMEM);
6115
6116	if ((ahd->flags & AHD_INITIATORROLE) == 0)
6117		ahd->flags &= ~AHD_RESET_BUS_A;
6118
6119	/*
6120	 * Before committing these settings to the chip, give
6121	 * the OSM one last chance to modify our configuration.
6122	 */
6123	ahd_platform_init(ahd);
6124
6125	/* Bring up the chip. */
6126	ahd_chip_init(ahd);
6127
6128	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
6129
6130	if ((ahd->flags & AHD_CURRENT_SENSING) == 0)
6131		goto init_done;
6132
6133	/*
6134	 * Verify termination based on current draw and
6135	 * warn user if the bus is over/under terminated.
6136	 */
6137	error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL,
6138				   CURSENSE_ENB);
6139	if (error != 0) {
6140		printf("%s: current sensing timeout 1\n", ahd_name(ahd));
6141		goto init_done;
6142	}
6143	for (i = 20, fstat = FLX_FSTAT_BUSY;
6144	     (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) {
6145		error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat);
6146		if (error != 0) {
6147			printf("%s: current sensing timeout 2\n",
6148			       ahd_name(ahd));
6149			goto init_done;
6150		}
6151	}
6152	if (i == 0) {
6153		printf("%s: Timedout during current-sensing test\n",
6154		       ahd_name(ahd));
6155		goto init_done;
6156	}
6157
6158	/* Latch Current Sensing status. */
6159	error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, &current_sensing);
6160	if (error != 0) {
6161		printf("%s: current sensing timeout 3\n", ahd_name(ahd));
6162		goto init_done;
6163	}
6164
6165	/* Diable current sensing. */
6166	ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0);
6167
6168#ifdef AHD_DEBUG
6169	if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) {
6170		printf("%s: current_sensing == 0x%x\n",
6171		       ahd_name(ahd), current_sensing);
6172	}
6173#endif
6174	warn_user = 0;
6175	for (i = 0; i < 4; i++, current_sensing >>= FLX_CSTAT_SHIFT) {
6176		u_int term_stat;
6177
6178		term_stat = (current_sensing & FLX_CSTAT_MASK);
6179		switch (term_stat) {
6180		case FLX_CSTAT_OVER:
6181		case FLX_CSTAT_UNDER:
6182			warn_user++;
6183		case FLX_CSTAT_INVALID:
6184		case FLX_CSTAT_OKAY:
6185			if (warn_user == 0 && bootverbose == 0)
6186				break;
6187			printf("%s: %s Channel %s\n", ahd_name(ahd),
6188			       channel_strings[i], termstat_strings[term_stat]);
6189			break;
6190		}
6191	}
6192	if (warn_user) {
6193		printf("%s: WARNING. Termination is not configured correctly.\n"
6194		       "%s: WARNING. SCSI bus operations may FAIL.\n",
6195		       ahd_name(ahd), ahd_name(ahd));
6196	}
6197init_done:
6198	ahd_reset_current_bus(ahd);
6199	ahd_restart(ahd);
6200	ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US,
6201			ahd_stat_timer, ahd);
6202
6203	return (0);
6204}
6205
6206/*
6207 * (Re)initialize chip state after a chip reset.
6208 */
6209static void
6210ahd_chip_init(struct ahd_softc *ahd)
6211{
6212	uint32_t busaddr;
6213	u_int	 sxfrctl1;
6214	u_int	 scsiseq_template;
6215	u_int	 wait;
6216	u_int	 i;
6217	u_int	 target;
6218
6219	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
6220	/*
6221	 * Take the LED out of diagnostic mode
6222	 */
6223	ahd_outb(ahd, SBLKCTL, ahd_inb(ahd, SBLKCTL) & ~(DIAGLEDEN|DIAGLEDON));
6224
6225	/*
6226	 * Return HS_MAILBOX to its default value.
6227	 */
6228	ahd->hs_mailbox = 0;
6229	ahd_outb(ahd, HS_MAILBOX, 0);
6230
6231	/* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1. */
6232	ahd_outb(ahd, IOWNID, ahd->our_id);
6233	ahd_outb(ahd, TOWNID, ahd->our_id);
6234	sxfrctl1 = (ahd->flags & AHD_TERM_ENB_A) != 0 ? STPWEN : 0;
6235	sxfrctl1 |= (ahd->flags & AHD_SPCHK_ENB_A) != 0 ? ENSPCHK : 0;
6236	if ((ahd->bugs & AHD_LONG_SETIMO_BUG)
6237	 && (ahd->seltime != STIMESEL_MIN)) {
6238		/*
6239		 * The selection timer duration is twice as long
6240		 * as it should be.  Halve it by adding "1" to
6241		 * the user specified setting.
6242		 */
6243		sxfrctl1 |= ahd->seltime + STIMESEL_BUG_ADJ;
6244	} else {
6245		sxfrctl1 |= ahd->seltime;
6246	}
6247
6248	ahd_outb(ahd, SXFRCTL0, DFON);
6249	ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN);
6250	ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
6251
6252	/*
6253	 * Now that termination is set, wait for up
6254	 * to 500ms for our transceivers to settle.  If
6255	 * the adapter does not have a cable attached,
6256	 * the transceivers may never settle, so don't
6257	 * complain if we fail here.
6258	 */
6259	for (wait = 10000;
6260	     (ahd_inb(ahd, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
6261	     wait--)
6262		ahd_delay(100);
6263
6264	/* Clear any false bus resets due to the transceivers settling */
6265	ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI);
6266	ahd_outb(ahd, CLRINT, CLRSCSIINT);
6267
6268	/* Initialize mode specific S/G state. */
6269	for (i = 0; i < 2; i++) {
6270		ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i);
6271		ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR);
6272		ahd_outb(ahd, SG_STATE, 0);
6273		ahd_outb(ahd, CLRSEQINTSRC, 0xFF);
6274		ahd_outb(ahd, SEQIMODE,
6275			 ENSAVEPTRS|ENCFG4DATA|ENCFG4ISTAT
6276			|ENCFG4TSTAT|ENCFG4ICMD|ENCFG4TCMD);
6277	}
6278
6279	ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
6280	ahd_outb(ahd, DSCOMMAND0, ahd_inb(ahd, DSCOMMAND0)|MPARCKEN|CACHETHEN);
6281	ahd_outb(ahd, DFF_THRSH, RD_DFTHRSH_75|WR_DFTHRSH_75);
6282	ahd_outb(ahd, SIMODE0, ENIOERR|ENOVERRUN);
6283	ahd_outb(ahd, SIMODE3, ENNTRAMPERR|ENOSRAMPERR);
6284	if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) {
6285		ahd_outb(ahd, OPTIONMODE, AUTOACKEN|AUTO_MSGOUT_DE);
6286	} else {
6287		ahd_outb(ahd, OPTIONMODE, AUTOACKEN|BUSFREEREV|AUTO_MSGOUT_DE);
6288	}
6289	ahd_outb(ahd, SCSCHKN, CURRFIFODEF|WIDERESEN|SHVALIDSTDIS);
6290	if ((ahd->chip & AHD_BUS_MASK) == AHD_PCIX)
6291		/*
6292		 * Do not issue a target abort when a split completion
6293		 * error occurs.  Let our PCIX interrupt handler deal
6294		 * with it instead. H2A4 Razor #625
6295		 */
6296		ahd_outb(ahd, PCIXCTL, ahd_inb(ahd, PCIXCTL) | SPLTSTADIS);
6297
6298	if ((ahd->bugs & AHD_LQOOVERRUN_BUG) != 0)
6299		ahd_outb(ahd, LQOSCSCTL, LQONOCHKOVER);
6300
6301	/*
6302	 * Tweak IOCELL settings.
6303	 */
6304	if ((ahd->flags & AHD_HP_BOARD) != 0) {
6305		for (i = 0; i < NUMDSPS; i++) {
6306			ahd_outb(ahd, DSPSELECT, i);
6307			ahd_outb(ahd, WRTBIASCTL, WRTBIASCTL_HP_DEFAULT);
6308		}
6309#ifdef AHD_DEBUG
6310		if ((ahd_debug & AHD_SHOW_MISC) != 0)
6311			printf("%s: WRTBIASCTL now 0x%x\n", ahd_name(ahd),
6312			       WRTBIASCTL_HP_DEFAULT);
6313#endif
6314	}
6315	ahd_setup_iocell_workaround(ahd);
6316
6317	/*
6318	 * Enable LQI Manager interrupts.
6319	 */
6320	ahd_outb(ahd, LQIMODE1, ENLQIPHASE_LQ|ENLQIPHASE_NLQ|ENLIQABORT
6321			      | ENLQICRCI_LQ|ENLQICRCI_NLQ|ENLQIBADLQI
6322			      | ENLQIOVERI_LQ|ENLQIOVERI_NLQ);
6323	ahd_outb(ahd, LQOMODE0, ENLQOATNLQ|ENLQOATNPKT|ENLQOTCRC);
6324	/*
6325	 * An interrupt from LQOBUSFREE is made redundant by the
6326	 * BUSFREE interrupt.  We choose to have the sequencer catch
6327	 * LQOPHCHGINPKT errors manually for the command phase at the
6328	 * start of a packetized selection case.
6329		ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE|ENLQOPHACHGINPKT);
6330	 */
6331	ahd_outb(ahd, LQOMODE1, 0);
6332
6333	/*
6334	 * Setup sequencer interrupt handlers.
6335	 */
6336	ahd_outw(ahd, INTVEC1_ADDR, ahd_resolve_seqaddr(ahd, LABEL_seq_isr));
6337	ahd_outw(ahd, INTVEC2_ADDR, ahd_resolve_seqaddr(ahd, LABEL_timer_isr));
6338
6339	/*
6340	 * Setup SCB Offset registers.
6341	 */
6342	if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) {
6343		ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb,
6344			 pkt_long_lun));
6345	} else {
6346		ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, lun));
6347	}
6348	ahd_outb(ahd, CMDLENPTR, offsetof(struct hardware_scb, cdb_len));
6349	ahd_outb(ahd, ATTRPTR, offsetof(struct hardware_scb, task_attribute));
6350	ahd_outb(ahd, FLAGPTR, offsetof(struct hardware_scb, task_management));
6351	ahd_outb(ahd, CMDPTR, offsetof(struct hardware_scb,
6352				       shared_data.idata.cdb));
6353	ahd_outb(ahd, QNEXTPTR,
6354		 offsetof(struct hardware_scb, next_hscb_busaddr));
6355	ahd_outb(ahd, ABRTBITPTR, MK_MESSAGE_BIT_OFFSET);
6356	ahd_outb(ahd, ABRTBYTEPTR, offsetof(struct hardware_scb, control));
6357	if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) {
6358		ahd_outb(ahd, LUNLEN,
6359			 sizeof(ahd->next_queued_hscb->pkt_long_lun) - 1);
6360	} else {
6361		ahd_outb(ahd, LUNLEN, LUNLEN_SINGLE_LEVEL_LUN);
6362	}
6363	ahd_outb(ahd, CDBLIMIT, SCB_CDB_LEN_PTR - 1);
6364	ahd_outb(ahd, MAXCMD, 0xFF);
6365	ahd_outb(ahd, SCBAUTOPTR,
6366		 AUSCBPTR_EN | offsetof(struct hardware_scb, tag));
6367
6368	/* We haven't been enabled for target mode yet. */
6369	ahd_outb(ahd, MULTARGID, 0);
6370	ahd_outb(ahd, MULTARGID + 1, 0);
6371
6372	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
6373	/* Initialize the negotiation table. */
6374	if ((ahd->features & AHD_NEW_IOCELL_OPTS) == 0) {
6375		/*
6376		 * Clear the spare bytes in the neg table to avoid
6377		 * spurious parity errors.
6378		 */
6379		for (target = 0; target < AHD_NUM_TARGETS; target++) {
6380			ahd_outb(ahd, NEGOADDR, target);
6381			ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PER_DEV0);
6382			for (i = 0; i < AHD_NUM_PER_DEV_ANNEXCOLS; i++)
6383				ahd_outb(ahd, ANNEXDAT, 0);
6384		}
6385	}
6386
6387	for (target = 0; target < AHD_NUM_TARGETS; target++) {
6388		struct	 ahd_devinfo devinfo;
6389		struct	 ahd_initiator_tinfo *tinfo;
6390		struct	 ahd_tmode_tstate *tstate;
6391
6392		tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
6393					    target, &tstate);
6394		ahd_compile_devinfo(&devinfo, ahd->our_id,
6395				    target, CAM_LUN_WILDCARD,
6396				    'A', ROLE_INITIATOR);
6397		ahd_update_neg_table(ahd, &devinfo, &tinfo->curr);
6398	}
6399
6400	ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR);
6401	ahd_outb(ahd, CLRINT, CLRSCSIINT);
6402
6403#if NEEDS_MORE_TESTING
6404	/*
6405	 * Always enable abort on incoming L_Qs if this feature is
6406	 * supported.  We use this to catch invalid SCB references.
6407	 */
6408	if ((ahd->bugs & AHD_ABORT_LQI_BUG) == 0)
6409		ahd_outb(ahd, LQCTL1, ABORTPENDING);
6410	else
6411#endif
6412		ahd_outb(ahd, LQCTL1, 0);
6413
6414	/* All of our queues are empty */
6415	ahd->qoutfifonext = 0;
6416	ahd->qoutfifonext_valid_tag = QOUTFIFO_ENTRY_VALID_LE;
6417	ahd_outb(ahd, QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID >> 8);
6418	for (i = 0; i < AHD_QOUT_SIZE; i++)
6419		ahd->qoutfifo[i] = 0;
6420	ahd_sync_qoutfifo(ahd, BUS_DMASYNC_PREREAD);
6421
6422	ahd->qinfifonext = 0;
6423	for (i = 0; i < AHD_QIN_SIZE; i++)
6424		ahd->qinfifo[i] = SCB_LIST_NULL;
6425
6426	if ((ahd->features & AHD_TARGETMODE) != 0) {
6427		/* All target command blocks start out invalid. */
6428		for (i = 0; i < AHD_TMODE_CMDS; i++)
6429			ahd->targetcmds[i].cmd_valid = 0;
6430		ahd_sync_tqinfifo(ahd, BUS_DMASYNC_PREREAD);
6431		ahd->tqinfifonext = 1;
6432		ahd_outb(ahd, KERNEL_TQINPOS, ahd->tqinfifonext - 1);
6433		ahd_outb(ahd, TQINPOS, ahd->tqinfifonext);
6434	}
6435
6436	/* Initialize Scratch Ram. */
6437	ahd_outb(ahd, SEQ_FLAGS, 0);
6438	ahd_outb(ahd, SEQ_FLAGS2, 0);
6439
6440	/* We don't have any waiting selections */
6441	ahd_outw(ahd, WAITING_TID_HEAD, SCB_LIST_NULL);
6442	ahd_outw(ahd, WAITING_TID_TAIL, SCB_LIST_NULL);
6443	for (i = 0; i < AHD_NUM_TARGETS; i++) {
6444		ahd_outw(ahd, WAITING_SCB_TAILS + (2 * i), SCB_LIST_NULL);
6445	}
6446
6447	/*
6448	 * Nobody is waiting to be DMAed into the QOUTFIFO.
6449	 */
6450	ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL);
6451	ahd_outw(ahd, COMPLETE_SCB_DMAINPROG_HEAD, SCB_LIST_NULL);
6452	ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL);
6453
6454	/*
6455	 * The Freeze Count is 0.
6456	 */
6457	ahd_outw(ahd, QFREEZE_COUNT, 0);
6458
6459	/*
6460	 * Tell the sequencer where it can find our arrays in memory.
6461	 */
6462	busaddr = ahd->shared_data_map.physaddr;
6463	ahd_outb(ahd, SHARED_DATA_ADDR, busaddr & 0xFF);
6464	ahd_outb(ahd, SHARED_DATA_ADDR + 1, (busaddr >> 8) & 0xFF);
6465	ahd_outb(ahd, SHARED_DATA_ADDR + 2, (busaddr >> 16) & 0xFF);
6466	ahd_outb(ahd, SHARED_DATA_ADDR + 3, (busaddr >> 24) & 0xFF);
6467	ahd_outb(ahd, QOUTFIFO_NEXT_ADDR, busaddr & 0xFF);
6468	ahd_outb(ahd, QOUTFIFO_NEXT_ADDR + 1, (busaddr >> 8) & 0xFF);
6469	ahd_outb(ahd, QOUTFIFO_NEXT_ADDR + 2, (busaddr >> 16) & 0xFF);
6470	ahd_outb(ahd, QOUTFIFO_NEXT_ADDR + 3, (busaddr >> 24) & 0xFF);
6471	/*
6472	 * Setup the allowed SCSI Sequences based on operational mode.
6473	 * If we are a target, we'll enable select in operations once
6474	 * we've had a lun enabled.
6475	 */
6476	scsiseq_template = ENAUTOATNP;
6477	if ((ahd->flags & AHD_INITIATORROLE) != 0)
6478		scsiseq_template |= ENRSELI;
6479	ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq_template);
6480
6481	/* There are no busy SCBs yet. */
6482	for (target = 0; target < AHD_NUM_TARGETS; target++) {
6483		int lun;
6484
6485		for (lun = 0; lun < AHD_NUM_LUNS_NONPKT; lun++)
6486			ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(target, 'A', lun));
6487	}
6488
6489	/*
6490	 * Initialize the group code to command length table.
6491	 * Vendor Unique codes are set to 0 so we only capture
6492	 * the first byte of the cdb.  These can be overridden
6493	 * when target mode is enabled.
6494	 */
6495	ahd_outb(ahd, CMDSIZE_TABLE, 5);
6496	ahd_outb(ahd, CMDSIZE_TABLE + 1, 9);
6497	ahd_outb(ahd, CMDSIZE_TABLE + 2, 9);
6498	ahd_outb(ahd, CMDSIZE_TABLE + 3, 0);
6499	ahd_outb(ahd, CMDSIZE_TABLE + 4, 15);
6500	ahd_outb(ahd, CMDSIZE_TABLE + 5, 11);
6501	ahd_outb(ahd, CMDSIZE_TABLE + 6, 0);
6502	ahd_outb(ahd, CMDSIZE_TABLE + 7, 0);
6503
6504	/* Tell the sequencer of our initial queue positions */
6505	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
6506	ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512);
6507	ahd->qinfifonext = 0;
6508	ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
6509	ahd_set_hescb_qoff(ahd, 0);
6510	ahd_set_snscb_qoff(ahd, 0);
6511	ahd_set_sescb_qoff(ahd, 0);
6512	ahd_set_sdscb_qoff(ahd, 0);
6513
6514	/*
6515	 * Tell the sequencer which SCB will be the next one it receives.
6516	 */
6517	busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr);
6518	ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 0, busaddr & 0xFF);
6519	ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 1, (busaddr >> 8) & 0xFF);
6520	ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 2, (busaddr >> 16) & 0xFF);
6521	ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 3, (busaddr >> 24) & 0xFF);
6522
6523	/*
6524	 * Default to coalescing disabled.
6525	 */
6526	ahd_outw(ahd, INT_COALESCING_CMDCOUNT, 0);
6527	ahd_outw(ahd, CMDS_PENDING, 0);
6528	ahd_update_coalescing_values(ahd, ahd->int_coalescing_timer,
6529				     ahd->int_coalescing_maxcmds,
6530				     ahd->int_coalescing_mincmds);
6531	ahd_enable_coalescing(ahd, FALSE);
6532
6533	ahd_loadseq(ahd);
6534	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
6535}
6536
6537/*
6538 * Setup default device and controller settings.
6539 * This should only be called if our probe has
6540 * determined that no configuration data is available.
6541 */
6542int
6543ahd_default_config(struct ahd_softc *ahd)
6544{
6545	int	targ;
6546
6547	ahd->our_id = 7;
6548
6549	/*
6550	 * Allocate a tstate to house information for our
6551	 * initiator presence on the bus as well as the user
6552	 * data for any target mode initiator.
6553	 */
6554	if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) {
6555		printf("%s: unable to allocate ahd_tmode_tstate.  "
6556		       "Failing attach\n", ahd_name(ahd));
6557		return (ENOMEM);
6558	}
6559
6560	for (targ = 0; targ < AHD_NUM_TARGETS; targ++) {
6561		struct	 ahd_devinfo devinfo;
6562		struct	 ahd_initiator_tinfo *tinfo;
6563		struct	 ahd_tmode_tstate *tstate;
6564		uint16_t target_mask;
6565
6566		tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
6567					    targ, &tstate);
6568		/*
6569		 * We support SPC2 and SPI4.
6570		 */
6571		tinfo->user.protocol_version = 4;
6572		tinfo->user.transport_version = 4;
6573
6574		target_mask = 0x01 << targ;
6575		ahd->user_discenable |= target_mask;
6576		tstate->discenable |= target_mask;
6577		ahd->user_tagenable |= target_mask;
6578#ifdef AHD_FORCE_160
6579		tinfo->user.period = AHD_SYNCRATE_DT;
6580#else
6581		tinfo->user.period = AHD_SYNCRATE_160;
6582#endif
6583		tinfo->user.offset= MAX_OFFSET;
6584		tinfo->user.ppr_options = MSG_EXT_PPR_RDSTRM
6585					| MSG_EXT_PPR_WRFLOW
6586					| MSG_EXT_PPR_HOLDMCS
6587					| MSG_EXT_PPR_IU_REQ
6588					| MSG_EXT_PPR_QAS_REQ
6589					| MSG_EXT_PPR_DT_REQ;
6590		if ((ahd->features & AHD_RTI) != 0)
6591			tinfo->user.ppr_options |= MSG_EXT_PPR_RTI;
6592
6593		tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
6594
6595		/*
6596		 * Start out Async/Narrow/Untagged and with
6597		 * conservative protocol support.
6598		 */
6599		tinfo->goal.protocol_version = 2;
6600		tinfo->goal.transport_version = 2;
6601		tinfo->curr.protocol_version = 2;
6602		tinfo->curr.transport_version = 2;
6603		ahd_compile_devinfo(&devinfo, ahd->our_id,
6604				    targ, CAM_LUN_WILDCARD,
6605				    'A', ROLE_INITIATOR);
6606		tstate->tagenable &= ~target_mask;
6607		ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
6608			      AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE);
6609		ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0,
6610				 /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL,
6611				 /*paused*/TRUE);
6612	}
6613	return (0);
6614}
6615
6616/*
6617 * Parse device configuration information.
6618 */
6619int
6620ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc)
6621{
6622	int targ;
6623	int max_targ;
6624
6625	max_targ = sc->max_targets & CFMAXTARG;
6626	ahd->our_id = sc->brtime_id & CFSCSIID;
6627
6628	/*
6629	 * Allocate a tstate to house information for our
6630	 * initiator presence on the bus as well as the user
6631	 * data for any target mode initiator.
6632	 */
6633	if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) {
6634		printf("%s: unable to allocate ahd_tmode_tstate.  "
6635		       "Failing attach\n", ahd_name(ahd));
6636		return (ENOMEM);
6637	}
6638
6639	for (targ = 0; targ < max_targ; targ++) {
6640		struct	 ahd_devinfo devinfo;
6641		struct	 ahd_initiator_tinfo *tinfo;
6642		struct	 ahd_transinfo *user_tinfo;
6643		struct	 ahd_tmode_tstate *tstate;
6644		uint16_t target_mask;
6645
6646		tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
6647					    targ, &tstate);
6648		user_tinfo = &tinfo->user;
6649
6650		/*
6651		 * We support SPC2 and SPI4.
6652		 */
6653		tinfo->user.protocol_version = 4;
6654		tinfo->user.transport_version = 4;
6655
6656		target_mask = 0x01 << targ;
6657		ahd->user_discenable &= ~target_mask;
6658		tstate->discenable &= ~target_mask;
6659		ahd->user_tagenable &= ~target_mask;
6660		if (sc->device_flags[targ] & CFDISC) {
6661			tstate->discenable |= target_mask;
6662			ahd->user_discenable |= target_mask;
6663			ahd->user_tagenable |= target_mask;
6664		} else {
6665			/*
6666			 * Cannot be packetized without disconnection.
6667			 */
6668			sc->device_flags[targ] &= ~CFPACKETIZED;
6669		}
6670
6671		user_tinfo->ppr_options = 0;
6672		user_tinfo->period = (sc->device_flags[targ] & CFXFER);
6673		if (user_tinfo->period < CFXFER_ASYNC) {
6674			if (user_tinfo->period <= AHD_PERIOD_10MHz)
6675				user_tinfo->ppr_options |= MSG_EXT_PPR_DT_REQ;
6676			user_tinfo->offset = MAX_OFFSET;
6677		} else  {
6678			user_tinfo->offset = 0;
6679			user_tinfo->period = AHD_ASYNC_XFER_PERIOD;
6680		}
6681#ifdef AHD_FORCE_160
6682		if (user_tinfo->period <= AHD_SYNCRATE_160)
6683			user_tinfo->period = AHD_SYNCRATE_DT;
6684#endif
6685
6686		if ((sc->device_flags[targ] & CFPACKETIZED) != 0) {
6687			user_tinfo->ppr_options |= MSG_EXT_PPR_RDSTRM
6688						|  MSG_EXT_PPR_WRFLOW
6689						|  MSG_EXT_PPR_HOLDMCS
6690						|  MSG_EXT_PPR_IU_REQ;
6691			if ((ahd->features & AHD_RTI) != 0)
6692				user_tinfo->ppr_options |= MSG_EXT_PPR_RTI;
6693		}
6694
6695		if ((sc->device_flags[targ] & CFQAS) != 0)
6696			user_tinfo->ppr_options |= MSG_EXT_PPR_QAS_REQ;
6697
6698		if ((sc->device_flags[targ] & CFWIDEB) != 0)
6699			user_tinfo->width = MSG_EXT_WDTR_BUS_16_BIT;
6700		else
6701			user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT;
6702#ifdef AHD_DEBUG
6703		if ((ahd_debug & AHD_SHOW_MISC) != 0)
6704			printf("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width,
6705			       user_tinfo->period, user_tinfo->offset,
6706			       user_tinfo->ppr_options);
6707#endif
6708		/*
6709		 * Start out Async/Narrow/Untagged and with
6710		 * conservative protocol support.
6711		 */
6712		tstate->tagenable &= ~target_mask;
6713		tinfo->goal.protocol_version = 2;
6714		tinfo->goal.transport_version = 2;
6715		tinfo->curr.protocol_version = 2;
6716		tinfo->curr.transport_version = 2;
6717		ahd_compile_devinfo(&devinfo, ahd->our_id,
6718				    targ, CAM_LUN_WILDCARD,
6719				    'A', ROLE_INITIATOR);
6720		ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
6721			      AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE);
6722		ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0,
6723				 /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL,
6724				 /*paused*/TRUE);
6725	}
6726
6727	ahd->flags &= ~AHD_SPCHK_ENB_A;
6728	if (sc->bios_control & CFSPARITY)
6729		ahd->flags |= AHD_SPCHK_ENB_A;
6730
6731	ahd->flags &= ~AHD_RESET_BUS_A;
6732	if (sc->bios_control & CFRESETB)
6733		ahd->flags |= AHD_RESET_BUS_A;
6734
6735	ahd->flags &= ~AHD_EXTENDED_TRANS_A;
6736	if (sc->bios_control & CFEXTEND)
6737		ahd->flags |= AHD_EXTENDED_TRANS_A;
6738
6739	ahd->flags &= ~AHD_BIOS_ENABLED;
6740	if ((sc->bios_control & CFBIOSSTATE) == CFBS_ENABLED)
6741		ahd->flags |= AHD_BIOS_ENABLED;
6742
6743	ahd->flags &= ~AHD_STPWLEVEL_A;
6744	if ((sc->adapter_control & CFSTPWLEVEL) != 0)
6745		ahd->flags |= AHD_STPWLEVEL_A;
6746
6747	return (0);
6748}
6749
6750/*
6751 * Parse device configuration information.
6752 */
6753int
6754ahd_parse_vpddata(struct ahd_softc *ahd, struct vpd_config *vpd)
6755{
6756	int error;
6757
6758	error = ahd_verify_vpd_cksum(vpd);
6759	if (error == 0)
6760		return (EINVAL);
6761	if ((vpd->bios_flags & VPDBOOTHOST) != 0)
6762		ahd->flags |= AHD_BOOT_CHANNEL;
6763	return (0);
6764}
6765
6766void
6767ahd_intr_enable(struct ahd_softc *ahd, int enable)
6768{
6769	u_int hcntrl;
6770
6771	hcntrl = ahd_inb(ahd, HCNTRL);
6772	hcntrl &= ~INTEN;
6773	ahd->pause &= ~INTEN;
6774	ahd->unpause &= ~INTEN;
6775	if (enable) {
6776		hcntrl |= INTEN;
6777		ahd->pause |= INTEN;
6778		ahd->unpause |= INTEN;
6779	}
6780	ahd_outb(ahd, HCNTRL, hcntrl);
6781}
6782
6783void
6784ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds,
6785			     u_int mincmds)
6786{
6787	if (timer > AHD_TIMER_MAX_US)
6788		timer = AHD_TIMER_MAX_US;
6789	ahd->int_coalescing_timer = timer;
6790
6791	if (maxcmds > AHD_INT_COALESCING_MAXCMDS_MAX)
6792		maxcmds = AHD_INT_COALESCING_MAXCMDS_MAX;
6793	if (mincmds > AHD_INT_COALESCING_MINCMDS_MAX)
6794		mincmds = AHD_INT_COALESCING_MINCMDS_MAX;
6795	ahd->int_coalescing_maxcmds = maxcmds;
6796	ahd_outw(ahd, INT_COALESCING_TIMER, timer / AHD_TIMER_US_PER_TICK);
6797	ahd_outb(ahd, INT_COALESCING_MAXCMDS, -maxcmds);
6798	ahd_outb(ahd, INT_COALESCING_MINCMDS, -mincmds);
6799}
6800
6801void
6802ahd_enable_coalescing(struct ahd_softc *ahd, int enable)
6803{
6804
6805	ahd->hs_mailbox &= ~ENINT_COALESCE;
6806	if (enable)
6807		ahd->hs_mailbox |= ENINT_COALESCE;
6808	ahd_outb(ahd, HS_MAILBOX, ahd->hs_mailbox);
6809	ahd_flush_device_writes(ahd);
6810	ahd_run_qoutfifo(ahd);
6811}
6812
6813/*
6814 * Ensure that the card is paused in a location
6815 * outside of all critical sections and that all
6816 * pending work is completed prior to returning.
6817 * This routine should only be called from outside
6818 * an interrupt context.
6819 */
6820void
6821ahd_pause_and_flushwork(struct ahd_softc *ahd)
6822{
6823	u_int intstat;
6824	u_int maxloops;
6825	u_int qfreeze_cnt;
6826
6827	maxloops = 1000;
6828	ahd->flags |= AHD_ALL_INTERRUPTS;
6829	ahd_pause(ahd);
6830	/*
6831	 * Increment the QFreeze Count so that the sequencer
6832	 * will not start new selections.  We do this only
6833	 * until we are safely paused without further selections
6834	 * pending.
6835	 */
6836	ahd_outw(ahd, QFREEZE_COUNT, ahd_inw(ahd, QFREEZE_COUNT) + 1);
6837	ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) | SELECTOUT_QFROZEN);
6838	do {
6839		struct scb *waiting_scb;
6840
6841		ahd_unpause(ahd);
6842		ahd_intr(ahd);
6843		ahd_pause(ahd);
6844		ahd_clear_critical_section(ahd);
6845		intstat = ahd_inb(ahd, INTSTAT);
6846		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
6847		if ((ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) == 0)
6848			ahd_outb(ahd, SCSISEQ0,
6849				 ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
6850		/*
6851		 * In the non-packetized case, the sequencer (for Rev A),
6852		 * relies on ENSELO remaining set after SELDO.  The hardware
6853		 * auto-clears ENSELO in the packetized case.
6854		 */
6855		waiting_scb = ahd_lookup_scb(ahd,
6856					     ahd_inw(ahd, WAITING_TID_HEAD));
6857		if (waiting_scb != NULL
6858		 && (waiting_scb->flags & SCB_PACKETIZED) == 0
6859		 && (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0)
6860			ahd_outb(ahd, SCSISEQ0,
6861				 ahd_inb(ahd, SCSISEQ0) | ENSELO);
6862	} while (--maxloops
6863		 && (intstat != 0xFF || (ahd->features & AHD_REMOVABLE) == 0)
6864		 && ((intstat & INT_PEND) != 0
6865		  || (ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0
6866		  || (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0));
6867	if (maxloops == 0) {
6868		printf("Infinite interrupt loop, INTSTAT = %x",
6869		      ahd_inb(ahd, INTSTAT));
6870	}
6871	qfreeze_cnt = ahd_inw(ahd, QFREEZE_COUNT);
6872	if (qfreeze_cnt == 0) {
6873		printf("%s: ahd_pause_and_flushwork with 0 qfreeze count!\n",
6874		       ahd_name(ahd));
6875	} else {
6876		qfreeze_cnt--;
6877	}
6878	ahd_outw(ahd, QFREEZE_COUNT, qfreeze_cnt);
6879	if (qfreeze_cnt == 0)
6880		ahd_outb(ahd, SEQ_FLAGS2,
6881			 ahd_inb(ahd, SEQ_FLAGS2) & ~SELECTOUT_QFROZEN);
6882
6883	ahd_flush_qoutfifo(ahd);
6884
6885	ahd_platform_flushwork(ahd);
6886	ahd->flags &= ~AHD_ALL_INTERRUPTS;
6887}
6888
6889int
6890ahd_suspend(struct ahd_softc *ahd)
6891{
6892
6893	ahd_pause_and_flushwork(ahd);
6894
6895	if (LIST_FIRST(&ahd->pending_scbs) != NULL) {
6896		ahd_unpause(ahd);
6897		return (EBUSY);
6898	}
6899	ahd_shutdown(ahd);
6900	return (0);
6901}
6902
6903int
6904ahd_resume(struct ahd_softc *ahd)
6905{
6906
6907	ahd_reset(ahd, /*reinit*/TRUE);
6908	ahd_intr_enable(ahd, TRUE);
6909	ahd_restart(ahd);
6910	return (0);
6911}
6912
6913/************************** Busy Target Table *********************************/
6914/*
6915 * Set SCBPTR to the SCB that contains the busy
6916 * table entry for TCL.  Return the offset into
6917 * the SCB that contains the entry for TCL.
6918 * saved_scbid is dereferenced and set to the
6919 * scbid that should be restored once manipualtion
6920 * of the TCL entry is complete.
6921 */
6922static inline u_int
6923ahd_index_busy_tcl(struct ahd_softc *ahd, u_int *saved_scbid, u_int tcl)
6924{
6925	/*
6926	 * Index to the SCB that contains the busy entry.
6927	 */
6928	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
6929	*saved_scbid = ahd_get_scbptr(ahd);
6930	ahd_set_scbptr(ahd, TCL_LUN(tcl)
6931		     | ((TCL_TARGET_OFFSET(tcl) & 0xC) << 4));
6932
6933	/*
6934	 * And now calculate the SCB offset to the entry.
6935	 * Each entry is 2 bytes wide, hence the
6936	 * multiplication by 2.
6937	 */
6938	return (((TCL_TARGET_OFFSET(tcl) & 0x3) << 1) + SCB_DISCONNECTED_LISTS);
6939}
6940
6941/*
6942 * Return the untagged transaction id for a given target/channel lun.
6943 */
6944u_int
6945ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl)
6946{
6947	u_int scbid;
6948	u_int scb_offset;
6949	u_int saved_scbptr;
6950
6951	scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl);
6952	scbid = ahd_inw_scbram(ahd, scb_offset);
6953	ahd_set_scbptr(ahd, saved_scbptr);
6954	return (scbid);
6955}
6956
6957void
6958ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid)
6959{
6960	u_int scb_offset;
6961	u_int saved_scbptr;
6962
6963	scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl);
6964	ahd_outw(ahd, scb_offset, scbid);
6965	ahd_set_scbptr(ahd, saved_scbptr);
6966}
6967
6968/************************** SCB and SCB queue management **********************/
6969int
6970ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target,
6971	      char channel, int lun, u_int tag, role_t role)
6972{
6973	int targ = SCB_GET_TARGET(ahd, scb);
6974	char chan = SCB_GET_CHANNEL(ahd, scb);
6975	int slun = SCB_GET_LUN(scb);
6976	int match;
6977
6978	match = ((chan == channel) || (channel == ALL_CHANNELS));
6979	if (match != 0)
6980		match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
6981	if (match != 0)
6982		match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
6983	if (match != 0) {
6984#if AHD_TARGET_MODE
6985		int group;
6986
6987		group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
6988		if (role == ROLE_INITIATOR) {
6989			match = (group != XPT_FC_GROUP_TMODE)
6990			      && ((tag == SCB_GET_TAG(scb))
6991			       || (tag == SCB_LIST_NULL));
6992		} else if (role == ROLE_TARGET) {
6993			match = (group == XPT_FC_GROUP_TMODE)
6994			      && ((tag == scb->io_ctx->csio.tag_id)
6995			       || (tag == SCB_LIST_NULL));
6996		}
6997#else /* !AHD_TARGET_MODE */
6998		match = ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL));
6999#endif /* AHD_TARGET_MODE */
7000	}
7001
7002	return match;
7003}
7004
7005void
7006ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb)
7007{
7008	int	target;
7009	char	channel;
7010	int	lun;
7011
7012	target = SCB_GET_TARGET(ahd, scb);
7013	lun = SCB_GET_LUN(scb);
7014	channel = SCB_GET_CHANNEL(ahd, scb);
7015
7016	ahd_search_qinfifo(ahd, target, channel, lun,
7017			   /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
7018			   CAM_REQUEUE_REQ, SEARCH_COMPLETE);
7019
7020	ahd_platform_freeze_devq(ahd, scb);
7021}
7022
7023void
7024ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb)
7025{
7026	struct scb	*prev_scb;
7027	ahd_mode_state	 saved_modes;
7028
7029	saved_modes = ahd_save_modes(ahd);
7030	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
7031	prev_scb = NULL;
7032	if (ahd_qinfifo_count(ahd) != 0) {
7033		u_int prev_tag;
7034		u_int prev_pos;
7035
7036		prev_pos = AHD_QIN_WRAP(ahd->qinfifonext - 1);
7037		prev_tag = ahd->qinfifo[prev_pos];
7038		prev_scb = ahd_lookup_scb(ahd, prev_tag);
7039	}
7040	ahd_qinfifo_requeue(ahd, prev_scb, scb);
7041	ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
7042	ahd_restore_modes(ahd, saved_modes);
7043}
7044
7045static void
7046ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb,
7047		    struct scb *scb)
7048{
7049	if (prev_scb == NULL) {
7050		uint32_t busaddr;
7051
7052		busaddr = ahd_le32toh(scb->hscb->hscb_busaddr);
7053		ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 0, busaddr & 0xFF);
7054		ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 1, (busaddr >> 8) & 0xFF);
7055		ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 2, (busaddr >> 16) & 0xFF);
7056		ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 3, (busaddr >> 24) & 0xFF);
7057	} else {
7058		prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
7059		ahd_sync_scb(ahd, prev_scb,
7060			     BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
7061	}
7062	ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
7063	ahd->qinfifonext++;
7064	scb->hscb->next_hscb_busaddr = ahd->next_queued_hscb->hscb_busaddr;
7065	ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
7066}
7067
7068static int
7069ahd_qinfifo_count(struct ahd_softc *ahd)
7070{
7071	u_int qinpos;
7072	u_int wrap_qinpos;
7073	u_int wrap_qinfifonext;
7074
7075	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
7076	qinpos = ahd_get_snscb_qoff(ahd);
7077	wrap_qinpos = AHD_QIN_WRAP(qinpos);
7078	wrap_qinfifonext = AHD_QIN_WRAP(ahd->qinfifonext);
7079	if (wrap_qinfifonext >= wrap_qinpos)
7080		return (wrap_qinfifonext - wrap_qinpos);
7081	else
7082		return (wrap_qinfifonext
7083		      + NUM_ELEMENTS(ahd->qinfifo) - wrap_qinpos);
7084}
7085
7086void
7087ahd_reset_cmds_pending(struct ahd_softc *ahd)
7088{
7089	struct		scb *scb;
7090	ahd_mode_state	saved_modes;
7091	u_int		pending_cmds;
7092
7093	saved_modes = ahd_save_modes(ahd);
7094	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
7095
7096	/*
7097	 * Don't count any commands as outstanding that the
7098	 * sequencer has already marked for completion.
7099	 */
7100	ahd_flush_qoutfifo(ahd);
7101
7102	pending_cmds = 0;
7103	LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
7104		pending_cmds++;
7105	}
7106	ahd_outw(ahd, CMDS_PENDING, pending_cmds - ahd_qinfifo_count(ahd));
7107	ahd_restore_modes(ahd, saved_modes);
7108	ahd->flags &= ~AHD_UPDATE_PEND_CMDS;
7109}
7110
7111int
7112ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
7113		   int lun, u_int tag, role_t role, uint32_t status,
7114		   ahd_search_action action)
7115{
7116	struct scb	*scb;
7117	struct scb	*prev_scb;
7118	ahd_mode_state	 saved_modes;
7119	u_int		 qinstart;
7120	u_int		 qinpos;
7121	u_int		 qintail;
7122	u_int		 tid_next;
7123	u_int		 tid_prev;
7124	u_int		 scbid;
7125	u_int		 savedscbptr;
7126	uint32_t	 busaddr;
7127	int		 found;
7128	int		 targets;
7129	int		 pending_cmds;
7130	int		 qincount;
7131
7132	/* Must be in CCHAN mode */
7133	saved_modes = ahd_save_modes(ahd);
7134	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
7135
7136	/*
7137	 * Halt any pending SCB DMA.  The sequencer will reinitiate
7138	 * this DMA if the qinfifo is not empty once we unpause.
7139	 */
7140	if ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN|CCSCBDIR))
7141	    == (CCARREN|CCSCBEN|CCSCBDIR)) {
7142		ahd_outb(ahd, CCSCBCTL,
7143			 ahd_inb(ahd, CCSCBCTL) & ~(CCARREN|CCSCBEN));
7144		while ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN)) != 0)
7145			;
7146	}
7147	/* Determine sequencer's position in the qinfifo. */
7148	qintail = AHD_QIN_WRAP(ahd->qinfifonext);
7149	qinstart = ahd_get_snscb_qoff(ahd);
7150	qinpos = AHD_QIN_WRAP(qinstart);
7151	found = 0;
7152	prev_scb = NULL;
7153
7154	pending_cmds = 0;
7155	LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
7156		pending_cmds++;
7157	}
7158	qincount = ahd_qinfifo_count(ahd);
7159
7160	if (action == SEARCH_PRINT) {
7161		printf("qinstart = 0x%x qinfifonext = 0x%x\n",
7162		       qinstart, ahd->qinfifonext);
7163	}
7164
7165	/*
7166	 * Start with an empty queue.  Entries that are not chosen
7167	 * for removal will be re-added to the queue as we go.
7168	 */
7169	ahd->qinfifonext = qinstart;
7170	busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr);
7171	ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 0, busaddr & 0xFF);
7172	ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 1, (busaddr >> 8) & 0xFF);
7173	ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 2, (busaddr >> 16) & 0xFF);
7174	ahd_outb(ahd, NEXT_QUEUED_SCB_ADDR + 3, (busaddr >> 24) & 0xFF);
7175
7176	while (qinpos != qintail) {
7177		scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]);
7178		if (scb == NULL) {
7179			panic("Loop 1\n");
7180		}
7181
7182		if (ahd_match_scb(ahd, scb, target, channel, lun, tag, role)) {
7183			/*
7184			 * We found an scb that needs to be acted on.
7185			 */
7186			found++;
7187			switch (action) {
7188			case SEARCH_COMPLETE:
7189			{
7190				cam_status ostat;
7191				cam_status cstat;
7192
7193				ostat = ahd_get_scsi_status(scb);
7194				if (ostat == CAM_REQ_INPROG)
7195					ahd_set_scsi_status(scb, status);
7196				cstat = ahd_get_transaction_status(scb);
7197				if (cstat != CAM_REQ_CMP)
7198					ahd_freeze_scb(scb);
7199				if ((scb->flags & SCB_ACTIVE) == 0)
7200					printf("Inactive SCB in qinfifo\n");
7201				if ((cam_status)scb->xs->error != CAM_REQ_CMP)
7202					printf("SEARCH_COMPLETE(0x%x):"
7203					       " ostat 0x%x, cstat 0x%x, "
7204					       "xs_error 0x%x\n",
7205					       SCB_GET_TAG(scb), ostat, cstat,
7206					       scb->xs->error);
7207				ahd_done(ahd, scb);
7208
7209				/* FALLTHROUGH */
7210			}
7211			case SEARCH_REMOVE:
7212				break;
7213			case SEARCH_PRINT:
7214				printf(" 0x%x", ahd->qinfifo[qinpos]);
7215				/* FALLTHROUGH */
7216			case SEARCH_COUNT:
7217				ahd_qinfifo_requeue(ahd, prev_scb, scb);
7218				prev_scb = scb;
7219				break;
7220			}
7221		} else {
7222			ahd_qinfifo_requeue(ahd, prev_scb, scb);
7223			prev_scb = scb;
7224		}
7225		qinpos = AHD_QIN_WRAP(qinpos+1);
7226	}
7227
7228	ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
7229
7230	if (action == SEARCH_PRINT)
7231		printf("\nWAITING_TID_QUEUES:\n");
7232
7233	/*
7234	 * Search waiting for selection lists.  We traverse the
7235	 * list of "their ids" waiting for selection and, if
7236	 * appropriate, traverse the SCBs of each "their id"
7237	 * looking for matches.
7238	 */
7239	savedscbptr = ahd_get_scbptr(ahd);
7240	tid_next = ahd_inw(ahd, WAITING_TID_HEAD);
7241	tid_prev = SCB_LIST_NULL;
7242	targets = 0;
7243	for (scbid = tid_next; !SCBID_IS_NULL(scbid); scbid = tid_next) {
7244		u_int tid_head;
7245
7246		/*
7247		 * We limit based on the number of SCBs since
7248		 * MK_MESSAGE SCBs are not in the per-tid lists.
7249		 */
7250		targets++;
7251		if (targets > AHD_SCB_MAX) {
7252			panic("TID LIST LOOP");
7253		}
7254		if (scbid >= ahd->scb_data.numscbs) {
7255			printf("%s: Waiting TID List inconsistency. "
7256			       "SCB index == 0x%x, yet numscbs == 0x%x.",
7257			       ahd_name(ahd), scbid, ahd->scb_data.numscbs);
7258			ahd_dump_card_state(ahd);
7259			panic("for safety");
7260		}
7261		scb = ahd_lookup_scb(ahd, scbid);
7262		if (scb == NULL) {
7263			printf("%s: SCB = 0x%x Not Active!\n",
7264			       ahd_name(ahd), scbid);
7265			panic("Waiting TID List traversal\n");
7266			break;
7267		}
7268		ahd_set_scbptr(ahd, scbid);
7269		tid_next = ahd_inw_scbram(ahd, SCB_NEXT2);
7270		if (ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD,
7271				  SCB_LIST_NULL, ROLE_UNKNOWN) == 0) {
7272			tid_prev = scbid;
7273			continue;
7274		}
7275
7276		/*
7277		 * We found a list of scbs that needs to be searched.
7278		 */
7279		if (action == SEARCH_PRINT)
7280			printf("       %d ( ", SCB_GET_TARGET(ahd, scb));
7281		tid_head = scbid;
7282		found += ahd_search_scb_list(ahd, target, channel,
7283					     lun, tag, role, status,
7284					     action, &tid_head,
7285					     SCB_GET_TARGET(ahd, scb));
7286		if (tid_head != scbid)
7287			ahd_stitch_tid_list(ahd, tid_prev, tid_head, tid_next);
7288		if (!SCBID_IS_NULL(tid_head))
7289			tid_prev = tid_head;
7290		if (action == SEARCH_PRINT)
7291			printf(")\n");
7292	}
7293	ahd_set_scbptr(ahd, savedscbptr);
7294	ahd_restore_modes(ahd, saved_modes);
7295	return (found);
7296}
7297
7298static int
7299ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
7300		    int lun, u_int tag, role_t role, uint32_t status,
7301		    ahd_search_action action, u_int *list_head, u_int tid)
7302{
7303	struct	scb *scb;
7304	u_int	scbid;
7305	u_int	next;
7306	u_int	prev;
7307	int	found;
7308
7309	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
7310	found = 0;
7311	prev = SCB_LIST_NULL;
7312	next = *list_head;
7313	for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) {
7314		if (scbid >= ahd->scb_data.numscbs) {
7315			printf("%s:SCB List inconsistency. "
7316			       "SCB == 0x%x, yet numscbs == 0x%x.",
7317			       ahd_name(ahd), scbid, ahd->scb_data.numscbs);
7318			ahd_dump_card_state(ahd);
7319			panic("for safety");
7320		}
7321		scb = ahd_lookup_scb(ahd, scbid);
7322		if (scb == NULL) {
7323			printf("%s: SCB = %d Not Active!\n",
7324			       ahd_name(ahd), scbid);
7325			panic("Waiting List traversal\n");
7326		}
7327		ahd_set_scbptr(ahd, scbid);
7328		next = ahd_inw_scbram(ahd, SCB_NEXT);
7329		if (ahd_match_scb(ahd, scb, target, channel,
7330				  lun, SCB_LIST_NULL, role) == 0) {
7331			prev = scbid;
7332			continue;
7333		}
7334		found++;
7335		switch (action) {
7336		case SEARCH_COMPLETE:
7337		{
7338			cam_status ostat;
7339			cam_status cstat;
7340
7341			ostat = ahd_get_scsi_status(scb);
7342			if (ostat == CAM_REQ_INPROG)
7343				ahd_set_scsi_status(scb, status);
7344			cstat = ahd_get_transaction_status(scb);
7345			if (cstat != CAM_REQ_CMP)
7346				ahd_freeze_scb(scb);
7347			if ((scb->flags & SCB_ACTIVE) == 0)
7348				printf("Inactive SCB in Waiting List\n");
7349			ahd_done(ahd, scb);
7350			/* FALLTHROUGH */
7351		}
7352		case SEARCH_REMOVE:
7353			ahd_rem_wscb(ahd, scbid, prev, next, tid);
7354			if (prev == SCB_LIST_NULL)
7355				*list_head = next;
7356			break;
7357		case SEARCH_PRINT:
7358			printf("0x%x ", scbid);
7359		case SEARCH_COUNT:
7360			prev = scbid;
7361			break;
7362		}
7363		if (found > AHD_SCB_MAX)
7364			panic("SCB LIST LOOP");
7365	}
7366	if (action == SEARCH_COMPLETE
7367	 || action == SEARCH_REMOVE)
7368		ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING) - found);
7369	return (found);
7370}
7371
7372static void
7373ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev,
7374		    u_int tid_cur, u_int tid_next)
7375{
7376	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
7377
7378	if (SCBID_IS_NULL(tid_cur)) {
7379
7380		/* Bypass current TID list */
7381		if (SCBID_IS_NULL(tid_prev)) {
7382			ahd_outw(ahd, WAITING_TID_HEAD, tid_next);
7383		} else {
7384			ahd_set_scbptr(ahd, tid_prev);
7385			ahd_outw(ahd, SCB_NEXT2, tid_next);
7386		}
7387		if (SCBID_IS_NULL(tid_next))
7388			ahd_outw(ahd, WAITING_TID_TAIL, tid_prev);
7389	} else {
7390
7391		/* Stitch through tid_cur */
7392		if (SCBID_IS_NULL(tid_prev)) {
7393			ahd_outw(ahd, WAITING_TID_HEAD, tid_cur);
7394		} else {
7395			ahd_set_scbptr(ahd, tid_prev);
7396			ahd_outw(ahd, SCB_NEXT2, tid_cur);
7397		}
7398		ahd_set_scbptr(ahd, tid_cur);
7399		ahd_outw(ahd, SCB_NEXT2, tid_next);
7400
7401		if (SCBID_IS_NULL(tid_next))
7402			ahd_outw(ahd, WAITING_TID_TAIL, tid_cur);
7403	}
7404}
7405
7406/*
7407 * Manipulate the waiting for selection list and return the
7408 * scb that follows the one that we remove.
7409 */
7410static u_int
7411ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
7412	     u_int prev, u_int next, u_int tid)
7413{
7414	u_int tail_offset;
7415
7416	AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
7417	if (!SCBID_IS_NULL(prev)) {
7418		ahd_set_scbptr(ahd, prev);
7419		ahd_outw(ahd, SCB_NEXT, next);
7420	}
7421
7422	/*
7423	 * SCBs that had MK_MESSAGE set in them will not
7424	 * be queued to the per-target lists, so don't
7425	 * blindly clear the tail pointer.
7426	 */
7427	tail_offset = WAITING_SCB_TAILS + (2 * tid);
7428	if (SCBID_IS_NULL(next)
7429	 && ahd_inw(ahd, tail_offset) == scbid)
7430		ahd_outw(ahd, tail_offset, prev);
7431	ahd_add_scb_to_free_list(ahd, scbid);
7432	return (next);
7433}
7434
7435/*
7436 * Add the SCB as selected by SCBPTR onto the on chip list of
7437 * free hardware SCBs.  This list is empty/unused if we are not
7438 * performing SCB paging.
7439 */
7440static void
7441ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid)
7442{
7443#ifdef notdef
7444/* XXX Need some other mechanism to designate "free". */
7445	/*
7446	 * Invalidate the tag so that our abort
7447	 * routines don't think it's active.
7448	 */
7449	ahd_outb(ahd, SCB_TAG, SCB_LIST_NULL);
7450#endif
7451}
7452
7453/******************************** Error Handling ******************************/
7454/*
7455 * Abort all SCBs that match the given description (target/channel/lun/tag),
7456 * setting their status to the passed in status if the status has not already
7457 * been modified from CAM_REQ_INPROG.  This routine assumes that the sequencer
7458 * is paused before it is called.
7459 */
7460int
7461ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel,
7462	       int lun, u_int tag, role_t role, uint32_t status)
7463{
7464	struct		scb *scbp;
7465	struct		scb *scbp_next;
7466	u_int		i, j;
7467	u_int		maxtarget;
7468	u_int		minlun;
7469	u_int		maxlun;
7470	int		found;
7471	ahd_mode_state	saved_modes;
7472
7473	/* restore this when we're done */
7474	saved_modes = ahd_save_modes(ahd);
7475	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
7476
7477	found = ahd_search_qinfifo(ahd, target, channel, lun, SCB_LIST_NULL,
7478				   role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
7479
7480	/*
7481	 * Clean out the busy target table for any untagged commands.
7482	 */
7483	i = 0;
7484	maxtarget = 16;
7485	if (target != CAM_TARGET_WILDCARD) {
7486		i = target;
7487		if (channel == 'B')
7488			i += 8;
7489		maxtarget = i + 1;
7490	}
7491
7492	if (lun == CAM_LUN_WILDCARD) {
7493		minlun = 0;
7494		maxlun = AHD_NUM_LUNS_NONPKT;
7495	} else if (lun >= AHD_NUM_LUNS_NONPKT) {
7496		minlun = maxlun = 0;
7497	} else {
7498		minlun = lun;
7499		maxlun = lun + 1;
7500	}
7501
7502	if (role != ROLE_TARGET) {
7503		for (;i < maxtarget; i++) {
7504			for (j = minlun;j < maxlun; j++) {
7505				u_int scbid;
7506				u_int tcl;
7507
7508				tcl = BUILD_TCL_RAW(i, 'A', j);
7509				scbid = ahd_find_busy_tcl(ahd, tcl);
7510				scbp = ahd_lookup_scb(ahd, scbid);
7511				if (scbp == NULL
7512				 || ahd_match_scb(ahd, scbp, target, channel,
7513						  lun, tag, role) == 0)
7514					continue;
7515				ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(i, 'A', j));
7516			}
7517		}
7518	}
7519
7520	/*
7521	 * Don't abort commands that have already completed,
7522	 * but haven't quite made it up to the host yet.
7523	 */
7524	ahd_flush_qoutfifo(ahd);
7525
7526	/*
7527	 * Go through the pending CCB list and look for
7528	 * commands for this target that are still active.
7529	 * These are other tagged commands that were
7530	 * disconnected when the reset occurred.
7531	 */
7532	scbp_next = LIST_FIRST(&ahd->pending_scbs);
7533	while (scbp_next != NULL) {
7534		scbp = scbp_next;
7535		scbp_next = LIST_NEXT(scbp, pending_links);
7536		if (ahd_match_scb(ahd, scbp, target, channel, lun, tag, role)) {
7537			cam_status ostat;
7538
7539			ostat = ahd_get_scsi_status(scbp);
7540			if (ostat == CAM_REQ_INPROG)
7541				ahd_set_scsi_status(scbp, status);
7542			if (ahd_get_transaction_status(scbp) != CAM_REQ_CMP)
7543				ahd_freeze_scb(scbp);
7544			if ((scbp->flags & SCB_ACTIVE) == 0)
7545				printf("Inactive SCB on pending list\n");
7546			ahd_done(ahd, scbp);
7547			found++;
7548		}
7549	}
7550	ahd_restore_modes(ahd, saved_modes);
7551	ahd_platform_abort_scbs(ahd, target, channel, lun, tag, role, status);
7552	ahd->flags |= AHD_UPDATE_PEND_CMDS;
7553	return found;
7554}
7555
7556static void
7557ahd_reset_current_bus(struct ahd_softc *ahd)
7558{
7559	uint8_t scsiseq;
7560
7561	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
7562	ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENSCSIRST);
7563	scsiseq = ahd_inb(ahd, SCSISEQ0) & ~(ENSELO|ENARBO|SCSIRSTO);
7564	ahd_outb(ahd, SCSISEQ0, scsiseq | SCSIRSTO);
7565	ahd_flush_device_writes(ahd);
7566	ahd_delay(AHD_BUSRESET_DELAY);
7567	/* Turn off the bus reset */
7568	ahd_outb(ahd, SCSISEQ0, scsiseq);
7569	ahd_flush_device_writes(ahd);
7570	ahd_delay(AHD_BUSRESET_DELAY);
7571	if ((ahd->bugs & AHD_SCSIRST_BUG) != 0) {
7572		/*
7573		 * 2A Razor #474
7574		 * Certain chip state is not cleared for
7575		 * SCSI bus resets that we initiate, so
7576		 * we must reset the chip.
7577		 */
7578		ahd_reset(ahd, /*reinit*/TRUE);
7579		ahd_intr_enable(ahd, /*enable*/TRUE);
7580		AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
7581	}
7582
7583	ahd_clear_intstat(ahd);
7584}
7585
7586int
7587ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
7588{
7589	struct	ahd_devinfo devinfo;
7590	u_int	initiator;
7591	u_int	target;
7592	u_int	max_scsiid;
7593	int	found;
7594	u_int	fifo;
7595	u_int	next_fifo;
7596
7597
7598	ahd->pending_device = NULL;
7599
7600	ahd_compile_devinfo(&devinfo,
7601			    CAM_TARGET_WILDCARD,
7602			    CAM_TARGET_WILDCARD,
7603			    CAM_LUN_WILDCARD,
7604			    channel, ROLE_UNKNOWN);
7605	ahd_pause(ahd);
7606
7607	/* Make sure the sequencer is in a safe location. */
7608	ahd_clear_critical_section(ahd);
7609
7610#if AHD_TARGET_MODE
7611	if ((ahd->flags & AHD_TARGETROLE) != 0) {
7612		ahd_run_tqinfifo(ahd, /*paused*/TRUE);
7613	}
7614#endif
7615	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
7616
7617	/*
7618	 * Disable selections so no automatic hardware
7619	 * functions will modify chip state.
7620	 */
7621	ahd_outb(ahd, SCSISEQ0, 0);
7622	ahd_outb(ahd, SCSISEQ1, 0);
7623
7624	/*
7625	 * Safely shut down our DMA engines.  Always start with
7626	 * the FIFO that is not currently active (if any are
7627	 * actively connected).
7628	 */
7629	next_fifo = fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO;
7630	if (next_fifo > CURRFIFO_1)
7631		/* If disconneced, arbitrarily start with FIFO1. */
7632		next_fifo = fifo = 0;
7633	do {
7634		next_fifo ^= CURRFIFO_1;
7635		ahd_set_modes(ahd, next_fifo, next_fifo);
7636		ahd_outb(ahd, DFCNTRL,
7637			 ahd_inb(ahd, DFCNTRL) & ~(SCSIEN|HDMAEN));
7638		while ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0)
7639			ahd_delay(10);
7640		/*
7641		 * Set CURRFIFO to the now inactive channel.
7642		 */
7643		ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
7644		ahd_outb(ahd, DFFSTAT, next_fifo);
7645	} while (next_fifo != fifo);
7646
7647	/*
7648	 * Reset the bus if we are initiating this reset
7649	 */
7650	ahd_clear_msg_state(ahd);
7651	ahd_outb(ahd, SIMODE1,
7652		 ahd_inb(ahd, SIMODE1) & ~(ENBUSFREE|ENSCSIRST|ENBUSFREE));
7653
7654	if (initiate_reset)
7655		ahd_reset_current_bus(ahd);
7656
7657	ahd_clear_intstat(ahd);
7658
7659	/*
7660	 * Clean up all the state information for the
7661	 * pending transactions on this bus.
7662	 */
7663	found = ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, channel,
7664			       CAM_LUN_WILDCARD, SCB_LIST_NULL,
7665			       ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
7666
7667	/*
7668	 * Cleanup anything left in the FIFOs.
7669	 */
7670	ahd_clear_fifo(ahd, 0);
7671	ahd_clear_fifo(ahd, 1);
7672
7673	/*
7674	 * Revert to async/narrow transfers until we renegotiate.
7675	 */
7676	max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7;
7677	for (target = 0; target <= max_scsiid; target++) {
7678
7679		if (ahd->enabled_targets[target] == NULL)
7680			continue;
7681		for (initiator = 0; initiator <= max_scsiid; initiator++) {
7682			struct ahd_devinfo dinfo;
7683
7684			ahd_compile_devinfo(&dinfo, target, initiator,
7685					    CAM_LUN_WILDCARD,
7686					    'A', ROLE_UNKNOWN);
7687			ahd_set_width(ahd, &dinfo, MSG_EXT_WDTR_BUS_8_BIT,
7688				      AHD_TRANS_CUR, /*paused*/TRUE);
7689			ahd_set_syncrate(ahd, &dinfo, /*period*/0,
7690					 /*offset*/0, /*ppr_options*/0,
7691					 AHD_TRANS_CUR, /*paused*/TRUE);
7692		}
7693	}
7694
7695#ifdef AHD_TARGET_MODE
7696	max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7;
7697
7698	/*
7699	 * Send an immediate notify ccb to all target more peripheral
7700	 * drivers affected by this action.
7701	 */
7702	for (target = 0; target <= max_scsiid; target++) {
7703		struct ahd_tmode_tstate* tstate;
7704		u_int lun;
7705
7706		tstate = ahd->enabled_targets[target];
7707		if (tstate == NULL)
7708			continue;
7709		for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
7710			struct ahd_tmode_lstate* lstate;
7711
7712			lstate = tstate->enabled_luns[lun];
7713			if (lstate == NULL)
7714				continue;
7715
7716			ahd_queue_lstate_event(ahd, lstate, CAM_TARGET_WILDCARD,
7717					       EVENT_TYPE_BUS_RESET, /*arg*/0);
7718			ahd_send_lstate_events(ahd, lstate);
7719		}
7720	}
7721#endif
7722
7723	/* Notify the XPT that a bus reset occurred */
7724	ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD,
7725		       CAM_LUN_WILDCARD, AC_BUS_RESET, NULL);
7726	ahd_restart(ahd);
7727
7728	/*
7729	 * Freeze the SIMQ until our poller can determine that
7730	 * the bus reset has really gone away.  We set the initial
7731	 * timer to 0 to have the check performed as soon as possible
7732	 * from the timer context.
7733	 */
7734	if ((ahd->flags & AHD_RESET_POLL_ACTIVE) == 0) {
7735		ahd->flags |= AHD_RESET_POLL_ACTIVE;
7736		ahd_freeze_simq(ahd);
7737		ahd_timer_reset(&ahd->reset_timer, 0, ahd_reset_poll, ahd);
7738	}
7739	return (found);
7740}
7741
7742
7743#define AHD_RESET_POLL_US 1000
7744static void
7745ahd_reset_poll(void *arg)
7746{
7747	struct	ahd_softc *ahd;
7748	u_int	scsiseq1;
7749	u_long	l;
7750	int	s;
7751
7752	ahd_list_lock(&l);
7753	ahd = arg;
7754	if (ahd == NULL) {
7755		printf("ahd_reset_poll: Instance %p no longer exists\n", arg);
7756		ahd_list_unlock(&l);
7757		return;
7758	}
7759	ahd_lock(ahd, &s);
7760	ahd_pause(ahd);
7761	ahd_update_modes(ahd);
7762	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
7763	ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI);
7764	if ((ahd_inb(ahd, SSTAT1) & SCSIRSTI) != 0) {
7765		ahd_timer_reset(&ahd->reset_timer, AHD_RESET_POLL_US,
7766				ahd_reset_poll, ahd);
7767		ahd_unpause(ahd);
7768		ahd_unlock(ahd, &s);
7769		ahd_list_unlock(&l);
7770		return;
7771	}
7772
7773	/* Reset is now low.  Complete chip reinitialization. */
7774	ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST);
7775	scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE);
7776	ahd_outb(ahd, SCSISEQ1, scsiseq1 & (ENSELI|ENRSELI|ENAUTOATNP));
7777	ahd_unpause(ahd);
7778	ahd->flags &= ~AHD_RESET_POLL_ACTIVE;
7779	ahd_unlock(ahd, &s);
7780	ahd_release_simq(ahd);
7781	ahd_list_unlock(&l);
7782}
7783
7784/**************************** Statistics Processing ***************************/
7785static void
7786ahd_stat_timer(void *arg)
7787{
7788	struct	ahd_softc *ahd;
7789	u_long	l;
7790	int	s;
7791	int	enint_coal;
7792
7793	ahd_list_lock(&l);
7794	ahd = arg;
7795	if (ahd == NULL) {
7796		printf("ahd_stat_timer: Instance %p no longer exists\n", arg);
7797		ahd_list_unlock(&l);
7798		return;
7799	}
7800	ahd_lock(ahd, &s);
7801
7802	enint_coal = ahd->hs_mailbox & ENINT_COALESCE;
7803	if (ahd->cmdcmplt_total > ahd->int_coalescing_threshold)
7804		enint_coal |= ENINT_COALESCE;
7805	else if (ahd->cmdcmplt_total < ahd->int_coalescing_stop_threshold)
7806		enint_coal &= ~ENINT_COALESCE;
7807
7808	if (enint_coal != (ahd->hs_mailbox & ENINT_COALESCE)) {
7809		ahd_enable_coalescing(ahd, enint_coal);
7810#ifdef AHD_DEBUG
7811		if ((ahd_debug & AHD_SHOW_INT_COALESCING) != 0)
7812			printf("%s: Interrupt coalescing "
7813			       "now %sabled. Cmds %d\n",
7814			       ahd_name(ahd),
7815			       (enint_coal & ENINT_COALESCE) ? "en" : "dis",
7816			       ahd->cmdcmplt_total);
7817#endif
7818	}
7819
7820	ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1);
7821	ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket];
7822	ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0;
7823	ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US,
7824			ahd_stat_timer, ahd);
7825	ahd_unlock(ahd, &s);
7826	ahd_list_unlock(&l);
7827}
7828
7829/****************************** Status Processing *****************************/
7830void
7831ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb)
7832{
7833	if (scb->hscb->shared_data.istatus.scsi_status != 0) {
7834		ahd_handle_scsi_status(ahd, scb);
7835	} else {
7836		ahd_calc_residual(ahd, scb);
7837		ahd_done(ahd, scb);
7838	}
7839}
7840
7841void
7842ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
7843{
7844	struct hardware_scb *hscb;
7845	u_int  qfreeze_cnt;
7846
7847	/*
7848	 * The sequencer freezes its select-out queue
7849	 * anytime a SCSI status error occurs.  We must
7850	 * handle the error and decrement the QFREEZE count
7851	 * to allow the sequencer to continue.
7852	 */
7853	hscb = scb->hscb;
7854
7855	/* Freeze the queue until the client sees the error. */
7856	ahd_freeze_devq(ahd, scb);
7857	ahd_freeze_scb(scb);
7858	qfreeze_cnt = ahd_inw(ahd, QFREEZE_COUNT);
7859	if (qfreeze_cnt == 0) {
7860		printf("%s: Bad status with 0 qfreeze count!\n", ahd_name(ahd));
7861	} else {
7862		qfreeze_cnt--;
7863		ahd_outw(ahd, QFREEZE_COUNT, qfreeze_cnt);
7864	}
7865	if (qfreeze_cnt == 0)
7866		ahd_outb(ahd, SEQ_FLAGS2,
7867			 ahd_inb(ahd, SEQ_FLAGS2) & ~SELECTOUT_QFROZEN);
7868
7869	/* Don't want to clobber the original sense code */
7870	if ((scb->flags & SCB_SENSE) != 0) {
7871		/*
7872		 * Clear the SCB_SENSE Flag and perform
7873		 * a normal command completion.
7874		 */
7875		scb->flags &= ~SCB_SENSE;
7876		ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
7877		ahd_done(ahd, scb);
7878		return;
7879	}
7880	ahd_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status);
7881	ahd_set_xfer_status(scb, hscb->shared_data.istatus.scsi_status);
7882	switch (hscb->shared_data.istatus.scsi_status) {
7883	case STATUS_PKT_SENSE:
7884	{
7885		struct scsi_status_iu_header *siu;
7886
7887		ahd_sync_sense(ahd, scb, BUS_DMASYNC_POSTREAD);
7888		siu = (struct scsi_status_iu_header *)scb->sense_data;
7889		ahd_set_scsi_status(scb, siu->status);
7890#ifdef AHD_DEBUG
7891		if ((ahd_debug & AHD_SHOW_SENSE) != 0) {
7892			ahd_print_path(ahd, scb);
7893			printf("SCB 0x%x Received PKT Status of 0x%x\n",
7894			       SCB_GET_TAG(scb), siu->status);
7895		}
7896#endif
7897		if ((siu->flags & SIU_RSPVALID) != 0) {
7898			scsipi_printaddr(scb->xs->xs_periph);
7899			if (scsi_4btoul(siu->pkt_failures_length) < 4) {
7900				printf("Unable to parse pkt_failures\n");
7901			} else {
7902
7903				switch (SIU_PKTFAIL_CODE(siu)) {
7904				case SIU_PFC_NONE:
7905					printf("No packet failure found\n");
7906					break;
7907				case SIU_PFC_CIU_FIELDS_INVALID:
7908					printf("Invalid Command IU Field\n");
7909					break;
7910				case SIU_PFC_TMF_NOT_SUPPORTED:
7911					printf("TMF not supportd\n");
7912					break;
7913				case SIU_PFC_TMF_FAILED:
7914					printf("TMF failed\n");
7915					break;
7916				case SIU_PFC_INVALID_TYPE_CODE:
7917					printf("Invalid L_Q Type code\n");
7918					break;
7919				case SIU_PFC_ILLEGAL_REQUEST:
7920					printf("Illegal request\n");
7921				default:
7922					break;
7923				}
7924			}
7925			if (siu->status == SCSI_STATUS_OK)
7926				ahd_set_transaction_status(scb,
7927				    CAM_REQ_CMP_ERR);
7928		}
7929		if ((siu->flags & SIU_SNSVALID) != 0) {
7930			scb->flags |= SCB_PKT_SENSE;
7931#ifdef AHD_DEBUG
7932			if ((ahd_debug & AHD_SHOW_SENSE) != 0) {
7933				printf("Sense data available (%d)\n",
7934				       siu->sense_length[0]);
7935				printf("SK 0x%x ASC 0x%x ASCQ 0x%x\n",
7936				       ((uint8_t)scb->sense_data[
7937				        SIU_SENSE_OFFSET(siu)+2]) & 0x0F,
7938				       ((uint8_t)scb->sense_data[
7939				        SIU_SENSE_OFFSET(siu)+12]),
7940				       ((uint8_t)scb->sense_data[
7941				        SIU_SENSE_OFFSET(siu)+13]));
7942			}
7943#endif
7944		}
7945		ahd_done(ahd, scb);
7946		break;
7947	}
7948	case SCSI_STATUS_CMD_TERMINATED:
7949	case SCSI_STATUS_CHECK_COND:
7950	{
7951		struct ahd_devinfo devinfo;
7952		struct ahd_dma_seg *sg;
7953		struct scsi_request_sense *sc;
7954		struct ahd_initiator_tinfo *targ_info;
7955		struct ahd_tmode_tstate *tstate;
7956		struct ahd_transinfo *tinfo;
7957#ifdef AHD_DEBUG
7958		if (ahd_debug & AHD_SHOW_SENSE) {
7959			ahd_print_path(ahd, scb);
7960			printf("SCB %d: requests Check Status\n",
7961			       SCB_GET_TAG(scb));
7962		}
7963#endif
7964
7965		if (ahd_perform_autosense(scb) == 0)
7966			break;
7967
7968		ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb),
7969				    SCB_GET_TARGET(ahd, scb),
7970				    SCB_GET_LUN(scb),
7971				    SCB_GET_CHANNEL(ahd, scb),
7972				    ROLE_INITIATOR);
7973		targ_info = ahd_fetch_transinfo(ahd,
7974						devinfo.channel,
7975						devinfo.our_scsiid,
7976						devinfo.target,
7977						&tstate);
7978		tinfo = &targ_info->curr;
7979		sg = scb->sg_list;
7980		sc = (struct scsi_request_sense *)hscb->shared_data.idata.cdb;
7981		/*
7982		 * Save off the residual if there is one.
7983		 */
7984		ahd_update_residual(ahd, scb);
7985#ifdef AHD_DEBUG
7986		if (ahd_debug & AHD_SHOW_SENSE) {
7987			ahd_print_path(ahd, scb);
7988			printf("Sending Sense\n");
7989		}
7990#endif
7991		scb->sg_count = 0;
7992		sg = ahd_sg_setup(ahd, scb, sg, ahd_get_sense_bufaddr(ahd, scb),
7993				  ahd_get_sense_bufsize(ahd, scb),
7994				  /*last*/TRUE);
7995		memset(sc, 0, sizeof(*sc));
7996		sc->opcode = SCSI_REQUEST_SENSE;
7997		sc->length = ahd_get_sense_bufsize(ahd, scb);
7998
7999		/*
8000		 * We can't allow the target to disconnect.
8001		 * This will be an untagged transaction and
8002		 * having the target disconnect will make this
8003		 * transaction indistinguishable from outstanding
8004		 * tagged transactions.
8005		 */
8006		hscb->control = 0;
8007
8008		/*
8009		 * This request sense could be because the
8010		 * the device lost power or in some other
8011		 * way has lost our transfer negotiations.
8012		 * Renegotiate if appropriate.  Unit attention
8013		 * errors will be reported before any data
8014		 * phases occur.
8015		 */
8016		if (ahd_get_residual(scb) == ahd_get_transfer_length(scb)) {
8017			ahd_update_neg_request(ahd, &devinfo,
8018					       tstate, targ_info,
8019					       AHD_NEG_IF_NON_ASYNC);
8020		}
8021		if (tstate->auto_negotiate & devinfo.target_mask) {
8022			hscb->control |= MK_MESSAGE;
8023			scb->flags &=
8024			    ~(SCB_NEGOTIATE|SCB_ABORT|SCB_DEVICE_RESET);
8025			scb->flags |= SCB_AUTO_NEGOTIATE;
8026		}
8027		hscb->cdb_len = sizeof(*sc);
8028		ahd_setup_data_scb(ahd, scb);
8029		scb->flags |= SCB_SENSE;
8030		ahd_queue_scb(ahd, scb);
8031		/*
8032		 * Ensure we have enough time to actually
8033		 * retrieve the sense.
8034		 */
8035		ahd_scb_timer_reset(scb, 5 * 1000000);
8036		break;
8037	}
8038	case SCSI_STATUS_OK:
8039		printf("%s: Interrupted for status of 0? (SCB 0x%x)\n",
8040		       ahd_name(ahd), SCB_GET_TAG(scb));
8041		/* FALLTHROUGH */
8042	default:
8043		ahd_done(ahd, scb);
8044		break;
8045	}
8046}
8047
8048/*
8049 * Calculate the residual for a just completed SCB.
8050 */
8051void
8052ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb)
8053{
8054	struct hardware_scb *hscb;
8055	struct initiator_status *spkt;
8056	uint32_t sgptr;
8057	uint32_t resid_sgptr;
8058	uint32_t resid;
8059
8060	/*
8061	 * 5 cases.
8062	 * 1) No residual.
8063	 *    SG_STATUS_VALID clear in sgptr.
8064	 * 2) Transferless command
8065	 * 3) Never performed any transfers.
8066	 *    sgptr has SG_FULL_RESID set.
8067	 * 4) No residual but target did not
8068	 *    save data pointers after the
8069	 *    last transfer, so sgptr was
8070	 *    never updated.
8071	 * 5) We have a partial residual.
8072	 *    Use residual_sgptr to determine
8073	 *    where we are.
8074	 */
8075
8076	hscb = scb->hscb;
8077	sgptr = ahd_le32toh(hscb->sgptr);
8078	if ((sgptr & SG_STATUS_VALID) == 0)
8079		/* Case 1 */
8080		return;
8081	sgptr &= ~SG_STATUS_VALID;
8082
8083	if ((sgptr & SG_LIST_NULL) != 0)
8084		/* Case 2 */
8085		return;
8086
8087	/*
8088	 * Residual fields are the same in both
8089	 * target and initiator status packets,
8090	 * so we can always use the initiator fields
8091	 * regardless of the role for this SCB.
8092	 */
8093	spkt = &hscb->shared_data.istatus;
8094	resid_sgptr = ahd_le32toh(spkt->residual_sgptr);
8095	if ((sgptr & SG_FULL_RESID) != 0) {
8096		/* Case 3 */
8097		resid = ahd_get_transfer_length(scb);
8098	} else if ((resid_sgptr & SG_LIST_NULL) != 0) {
8099		/* Case 4 */
8100		return;
8101	} else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) {
8102		ahd_print_path(ahd, scb);
8103		printf("data overrun detected Tag == 0x%x.\n",
8104		       SCB_GET_TAG(scb));
8105		ahd_freeze_devq(ahd, scb);
8106		ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);
8107		ahd_freeze_scb(scb);
8108		return;
8109	} else if ((resid_sgptr & ~SG_PTR_MASK) != 0) {
8110		panic("Bogus resid sgptr value 0x%x\n", resid_sgptr);
8111		/* NOTREACHED */
8112	} else {
8113		struct ahd_dma_seg *sg;
8114
8115		/*
8116		 * Remainder of the SG where the transfer
8117		 * stopped.
8118		 */
8119		resid = ahd_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK;
8120		sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK);
8121
8122		/* The residual sg_ptr always points to the next sg */
8123		sg--;
8124
8125		/*
8126		 * Add up the contents of all residual
8127		 * SG segments that are after the SG where
8128		 * the transfer stopped.
8129		 */
8130		while ((ahd_le32toh(sg->len) & AHD_DMA_LAST_SEG) == 0) {
8131			sg++;
8132			resid += ahd_le32toh(sg->len) & AHD_SG_LEN_MASK;
8133		}
8134	}
8135
8136	if ((scb->flags & SCB_SENSE) == 0)
8137		ahd_set_residual(scb, resid);
8138		/*else
8139		  ahd_set_sense_residual(scb, resid);*/
8140
8141#ifdef AHD_DEBUG
8142	if ((ahd_debug & AHD_SHOW_MISC) != 0) {
8143		ahd_print_path(ahd, scb);
8144		printf("Handled %sResidual of %d bytes\n",
8145		       (scb->flags & SCB_SENSE) ? "Sense " : "", resid);
8146	}
8147#endif
8148}
8149
8150/******************************* Target Mode **********************************/
8151#ifdef AHD_TARGET_MODE
8152/*
8153 * Add a target mode event to this lun's queue
8154 */
8155static void
8156ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate,
8157		       u_int initiator_id, u_int event_type, u_int event_arg)
8158{
8159	struct ahd_tmode_event *event;
8160	int pending;
8161
8162	xpt_freeze_devq(lstate->path, /*count*/1);
8163	if (lstate->event_w_idx >= lstate->event_r_idx)
8164		pending = lstate->event_w_idx - lstate->event_r_idx;
8165	else
8166		pending = AHD_TMODE_EVENT_BUFFER_SIZE + 1
8167			- (lstate->event_r_idx - lstate->event_w_idx);
8168
8169	if (event_type == EVENT_TYPE_BUS_RESET
8170	 || event_type == MSG_BUS_DEV_RESET) {
8171		/*
8172		 * Any earlier events are irrelevant, so reset our buffer.
8173		 * This has the effect of allowing us to deal with reset
8174		 * floods (an external device holding down the reset line)
8175		 * without losing the event that is really interesting.
8176		 */
8177		lstate->event_r_idx = 0;
8178		lstate->event_w_idx = 0;
8179		xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
8180	}
8181
8182	if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) {
8183		xpt_print_path(lstate->path);
8184		printf("immediate event %x:%x lost\n",
8185		       lstate->event_buffer[lstate->event_r_idx].event_type,
8186		       lstate->event_buffer[lstate->event_r_idx].event_arg);
8187		lstate->event_r_idx++;
8188		if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE)
8189			lstate->event_r_idx = 0;
8190		xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
8191	}
8192
8193	event = &lstate->event_buffer[lstate->event_w_idx];
8194	event->initiator_id = initiator_id;
8195	event->event_type = event_type;
8196	event->event_arg = event_arg;
8197	lstate->event_w_idx++;
8198	if (lstate->event_w_idx == AHD_TMODE_EVENT_BUFFER_SIZE)
8199		lstate->event_w_idx = 0;
8200}
8201
8202/*
8203 * Send any target mode events queued up waiting
8204 * for immediate notify resources.
8205 */
8206void
8207ahd_send_lstate_events(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate)
8208{
8209	struct ccb_hdr *ccbh;
8210	struct ccb_immed_notify *inot;
8211
8212	while (lstate->event_r_idx != lstate->event_w_idx
8213	    && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
8214		struct ahd_tmode_event *event;
8215
8216		event = &lstate->event_buffer[lstate->event_r_idx];
8217		SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
8218		inot = (struct ccb_immed_notify *)ccbh;
8219		switch (event->event_type) {
8220		case EVENT_TYPE_BUS_RESET:
8221			ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
8222			break;
8223		default:
8224			ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
8225			inot->message_args[0] = event->event_type;
8226			inot->message_args[1] = event->event_arg;
8227			break;
8228		}
8229		inot->initiator_id = event->initiator_id;
8230		inot->sense_len = 0;
8231		xpt_done((union ccb *)inot);
8232		lstate->event_r_idx++;
8233		if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE)
8234			lstate->event_r_idx = 0;
8235	}
8236}
8237#endif
8238
8239/******************** Sequencer Program Patching/Download *********************/
8240
8241#ifdef AHD_DUMP_SEQ
8242void
8243ahd_dumpseq(struct ahd_softc* ahd)
8244{
8245	int i;
8246	int max_prog;
8247
8248	max_prog = 2048;
8249
8250	ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
8251	ahd_outb(ahd, PRGMCNT, 0);
8252	ahd_outb(ahd, PRGMCNT+1, 0);
8253	for (i = 0; i < max_prog; i++) {
8254		uint8_t ins_bytes[4];
8255
8256		ahd_insb(ahd, SEQRAM, ins_bytes, 4);
8257		printf("0x%08x\n", ins_bytes[0] << 24
8258				 | ins_bytes[1] << 16
8259				 | ins_bytes[2] << 8
8260				 | ins_bytes[3]);
8261	}
8262}
8263#endif
8264
8265static void
8266ahd_loadseq(struct ahd_softc *ahd)
8267{
8268	struct	cs cs_table[num_critical_sections];
8269	u_int	begin_set[num_critical_sections];
8270	u_int	end_set[num_critical_sections];
8271	struct	patch *cur_patch;
8272	u_int	cs_count;
8273	u_int	cur_cs;
8274	u_int	i;
8275	int	downloaded;
8276	u_int	skip_addr;
8277	u_int	sg_prefetch_cnt;
8278	u_int	sg_prefetch_cnt_limit;
8279	u_int	sg_prefetch_align;
8280	u_int	sg_size;
8281	uint8_t	download_consts[DOWNLOAD_CONST_COUNT];
8282
8283	if (bootverbose)
8284		printf("%s: Downloading Sequencer Program...",
8285		       ahd_name(ahd));
8286
8287#if DOWNLOAD_CONST_COUNT != 7
8288#error "Download Const Mismatch"
8289#endif
8290	/*
8291	 * Start out with 0 critical sections
8292	 * that apply to this firmware load.
8293	 */
8294	cs_count = 0;
8295	cur_cs = 0;
8296	memset(begin_set, 0, sizeof(begin_set));
8297	memset(end_set, 0, sizeof(end_set));
8298
8299	/*
8300	 * Setup downloadable constant table.
8301	 *
8302	 * The computation for the S/G prefetch variables is
8303	 * a bit complicated.  We would like to always fetch
8304	 * in terms of cachelined sized increments.  However,
8305	 * if the cacheline is not an even multiple of the
8306	 * SG element size or is larger than our SG RAM, using
8307	 * just the cache size might leave us with only a portion
8308	 * of an SG element at the tail of a prefetch.  If the
8309	 * cacheline is larger than our S/G prefetch buffer less
8310	 * the size of an SG element, we may round down to a cacheline
8311	 * that doesn't contain any or all of the S/G of interest
8312	 * within the bounds of our S/G ram.  Provide variables to
8313	 * the sequencer that will allow it to handle these edge
8314	 * cases.
8315	 */
8316	/* Start by aligning to the nearest cacheline. */
8317	sg_prefetch_align = ahd->pci_cachesize;
8318	if (sg_prefetch_align == 0)
8319		sg_prefetch_align = 8;
8320	/* Round down to the nearest power of 2. */
8321	while (powerof2(sg_prefetch_align) == 0)
8322		sg_prefetch_align--;
8323	/*
8324	 * If the cacheline boundary is greater than half our prefetch RAM
8325	 * we risk not being able to fetch even a single complete S/G
8326	 * segment if we align to that boundary.
8327	 */
8328	if (sg_prefetch_align > CCSGADDR_MAX/2)
8329		sg_prefetch_align = CCSGADDR_MAX/2;
8330	/* Start by fetching a single cacheline. */
8331	sg_prefetch_cnt = sg_prefetch_align;
8332	/*
8333	 * Increment the prefetch count by cachelines until
8334	 * at least one S/G element will fit.
8335	 */
8336	sg_size = sizeof(struct ahd_dma_seg);
8337	if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0)
8338		sg_size = sizeof(struct ahd_dma64_seg);
8339	while (sg_prefetch_cnt < sg_size)
8340		sg_prefetch_cnt += sg_prefetch_align;
8341	/*
8342	 * If the cacheline is not an even multiple of
8343	 * the S/G size, we may only get a partial S/G when
8344	 * we align. Add a cacheline if this is the case.
8345	 */
8346	if ((sg_prefetch_align % sg_size) != 0
8347	 && (sg_prefetch_cnt < CCSGADDR_MAX))
8348		sg_prefetch_cnt += sg_prefetch_align;
8349	/*
8350	 * Lastly, compute a value that the sequencer can use
8351	 * to determine if the remainder of the CCSGRAM buffer
8352	 * has a full S/G element in it.
8353	 */
8354	sg_prefetch_cnt_limit = -(sg_prefetch_cnt - sg_size + 1);
8355	download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
8356	download_consts[SG_PREFETCH_CNT_LIMIT] = sg_prefetch_cnt_limit;
8357	download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_align - 1);
8358	download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_align - 1);
8359	download_consts[SG_SIZEOF] = sg_size;
8360	download_consts[PKT_OVERRUN_BUFOFFSET] =
8361		(ahd->overrun_buf - (uint8_t *)ahd->qoutfifo) / 256;
8362	download_consts[SCB_TRANSFER_SIZE] = SCB_TRANSFER_SIZE_1BYTE_LUN;
8363	cur_patch = patches;
8364	downloaded = 0;
8365	skip_addr = 0;
8366	ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
8367	ahd_outb(ahd, PRGMCNT, 0);
8368	ahd_outb(ahd, PRGMCNT+1, 0);
8369
8370	for (i = 0; i < sizeof(seqprog)/4; i++) {
8371		if (ahd_check_patch(ahd, &cur_patch, i, &skip_addr) == 0) {
8372			/*
8373			 * Don't download this instruction as it
8374			 * is in a patch that was removed.
8375			 */
8376			continue;
8377		}
8378		/*
8379		 * Move through the CS table until we find a CS
8380		 * that might apply to this instruction.
8381		 */
8382		for (; cur_cs < num_critical_sections; cur_cs++) {
8383			if (critical_sections[cur_cs].end <= i) {
8384				if (begin_set[cs_count] == TRUE
8385				 && end_set[cs_count] == FALSE) {
8386					cs_table[cs_count].end = downloaded;
8387					end_set[cs_count] = TRUE;
8388					cs_count++;
8389				}
8390				continue;
8391			}
8392			if (critical_sections[cur_cs].begin <= i
8393			 && begin_set[cs_count] == FALSE) {
8394				cs_table[cs_count].begin = downloaded;
8395				begin_set[cs_count] = TRUE;
8396			}
8397			break;
8398		}
8399		ahd_download_instr(ahd, i, download_consts);
8400		downloaded++;
8401	}
8402
8403	ahd->num_critical_sections = cs_count;
8404	if (cs_count != 0) {
8405
8406		cs_count *= sizeof(struct cs);
8407		ahd->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT);
8408		if (ahd->critical_sections == NULL)
8409			panic("ahd_loadseq: Could not malloc");
8410		memcpy(ahd->critical_sections, cs_table, cs_count);
8411	}
8412	ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE);
8413
8414	if (bootverbose) {
8415		printf(" %d instructions downloaded\n", downloaded);
8416		printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n",
8417		       ahd_name(ahd), ahd->features, ahd->bugs, ahd->flags);
8418	}
8419}
8420
8421static int
8422ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch,
8423		u_int start_instr, u_int *skip_addr)
8424{
8425	struct	patch *cur_patch;
8426	struct	patch *last_patch;
8427	u_int	num_patches;
8428
8429	num_patches = sizeof(patches)/sizeof(struct patch);
8430	last_patch = &patches[num_patches];
8431	cur_patch = *start_patch;
8432
8433	while (cur_patch < last_patch && start_instr == cur_patch->begin) {
8434
8435		if (cur_patch->patch_func(ahd) == 0) {
8436
8437			/* Start rejecting code */
8438			*skip_addr = start_instr + cur_patch->skip_instr;
8439			cur_patch += cur_patch->skip_patch;
8440		} else {
8441			/* Accepted this patch.  Advance to the next
8442			 * one and wait for our intruction pointer to
8443			 * hit this point.
8444			 */
8445			cur_patch++;
8446		}
8447	}
8448
8449	*start_patch = cur_patch;
8450	if (start_instr < *skip_addr)
8451		/* Still skipping */
8452		return (0);
8453
8454	return (1);
8455}
8456
8457static u_int
8458ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address)
8459{
8460	struct patch *cur_patch;
8461	int address_offset;
8462	u_int skip_addr;
8463	u_int i;
8464
8465	address_offset = 0;
8466	cur_patch = patches;
8467	skip_addr = 0;
8468
8469	for (i = 0; i < address;) {
8470
8471		ahd_check_patch(ahd, &cur_patch, i, &skip_addr);
8472
8473		if (skip_addr > i) {
8474			int end_addr;
8475
8476			end_addr = MIN(address, skip_addr);
8477			address_offset += end_addr - i;
8478			i = skip_addr;
8479		} else {
8480			i++;
8481		}
8482	}
8483	return (address - address_offset);
8484}
8485
8486static void
8487ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts)
8488{
8489	union	ins_formats instr;
8490	struct	ins_format1 *fmt1_ins;
8491	struct	ins_format3 *fmt3_ins;
8492	u_int	opcode;
8493
8494	/*
8495	 * The firmware is always compiled into a little endian format.
8496	 */
8497	instr.integer = ahd_le32toh(*(uint32_t*)&seqprog[instrptr * 4]);
8498
8499	fmt1_ins = &instr.format1;
8500	fmt3_ins = NULL;
8501
8502	/* Pull the opcode */
8503	opcode = instr.format1.opcode;
8504	switch (opcode) {
8505	case AIC_OP_JMP:
8506	case AIC_OP_JC:
8507	case AIC_OP_JNC:
8508	case AIC_OP_CALL:
8509	case AIC_OP_JNE:
8510	case AIC_OP_JNZ:
8511	case AIC_OP_JE:
8512	case AIC_OP_JZ:
8513	{
8514		fmt3_ins = &instr.format3;
8515		fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address);
8516		/* FALLTHROUGH */
8517	}
8518	case AIC_OP_OR:
8519	case AIC_OP_AND:
8520	case AIC_OP_XOR:
8521	case AIC_OP_ADD:
8522	case AIC_OP_ADC:
8523	case AIC_OP_BMOV:
8524		if (fmt1_ins->parity != 0) {
8525			fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
8526		}
8527		fmt1_ins->parity = 0;
8528		/* FALLTHROUGH */
8529	case AIC_OP_ROL:
8530	{
8531		int i, count;
8532
8533		/* Calculate odd parity for the instruction */
8534		for (i = 0, count = 0; i < 31; i++) {
8535			uint32_t mask;
8536
8537			mask = 0x01 << i;
8538			if ((instr.integer & mask) != 0)
8539				count++;
8540		}
8541		if ((count & 0x01) == 0)
8542			instr.format1.parity = 1;
8543
8544		/* The sequencer is a little endian CPU */
8545		instr.integer = ahd_htole32(instr.integer);
8546		ahd_outsb(ahd, SEQRAM, instr.bytes, 4);
8547		break;
8548	}
8549	default:
8550		panic("Unknown opcode encountered in seq program");
8551		break;
8552	}
8553}
8554
8555static int
8556ahd_probe_stack_size(struct ahd_softc *ahd)
8557{
8558	int last_probe;
8559
8560	last_probe = 0;
8561	while (1) {
8562		int i;
8563
8564		/*
8565		 * We avoid using 0 as a pattern to avoid
8566		 * confusion if the stack implementation
8567		 * "back-fills" with zeros when "poping'
8568		 * entries.
8569		 */
8570		for (i = 1; i <= last_probe+1; i++) {
8571			ahd_outb(ahd, STACK, i & 0xFF);
8572			ahd_outb(ahd, STACK, (i >> 8) & 0xFF);
8573		}
8574
8575		/* Verify */
8576		for (i = last_probe+1; i > 0; i--) {
8577			u_int stack_entry;
8578
8579			stack_entry = ahd_inb(ahd, STACK)
8580				    |(ahd_inb(ahd, STACK) << 8);
8581			if (stack_entry != i)
8582				goto sized;
8583		}
8584		last_probe++;
8585	}
8586sized:
8587	return (last_probe);
8588}
8589
8590void
8591ahd_dump_all_cards_state(void)
8592{
8593	struct ahd_softc *list_ahd;
8594
8595	TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
8596		ahd_dump_card_state(list_ahd);
8597	}
8598}
8599
8600int
8601ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries,
8602		   const char *name, u_int address, u_int value,
8603		   u_int *cur_column, u_int wrap_point)
8604{
8605	int	printed;
8606	u_int	printed_mask;
8607	char    line[1024];
8608
8609	line[0] = 0;
8610
8611	if (cur_column != NULL && *cur_column >= wrap_point) {
8612		printf("\n");
8613		*cur_column = 0;
8614	}
8615	printed = snprintf(line, sizeof(line), "%s[0x%x]", name, value);
8616	if (table == NULL) {
8617		printed += snprintf(&line[printed], (sizeof line) - printed,
8618		    " ");
8619		printf("%s", line);
8620		if (cur_column != NULL)
8621			*cur_column += printed;
8622		return (printed);
8623	}
8624	printed_mask = 0;
8625	while (printed_mask != 0xFF) {
8626		int entry;
8627
8628		for (entry = 0; entry < num_entries; entry++) {
8629			if (((value & table[entry].mask)
8630			  != table[entry].value)
8631			 || ((printed_mask & table[entry].mask)
8632			  == table[entry].mask))
8633				continue;
8634			printed += snprintf(&line[printed],
8635			    (sizeof line) - printed, "%s%s",
8636				printed_mask == 0 ? ":(" : "|",
8637				table[entry].name);
8638			printed_mask |= table[entry].mask;
8639
8640			break;
8641		}
8642		if (entry >= num_entries)
8643			break;
8644	}
8645	if (printed_mask != 0)
8646		printed += snprintf(&line[printed],
8647		    (sizeof line) - printed, ") ");
8648	else
8649		printed += snprintf(&line[printed],
8650		    (sizeof line) - printed, " ");
8651	if (cur_column != NULL)
8652		*cur_column += printed;
8653	printf("%s", line);
8654
8655	return (printed);
8656}
8657
8658void
8659ahd_dump_card_state(struct ahd_softc *ahd)
8660{
8661	struct scb	*scb;
8662	ahd_mode_state	 saved_modes;
8663	u_int		 dffstat;
8664	int		 paused;
8665	u_int		 scb_index;
8666	u_int		 saved_scb_index;
8667	u_int		 cur_col;
8668	int		 i;
8669
8670	if (ahd_is_paused(ahd)) {
8671		paused = 1;
8672	} else {
8673		paused = 0;
8674		ahd_pause(ahd);
8675	}
8676	saved_modes = ahd_save_modes(ahd);
8677	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
8678	printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n"
8679	       "%s: Dumping Card State at program address 0x%x Mode 0x%x\n",
8680	       ahd_name(ahd),
8681	       ahd_inb(ahd, CURADDR) | (ahd_inb(ahd, CURADDR+1) << 8),
8682	       ahd_build_mode_state(ahd, ahd->saved_src_mode,
8683				    ahd->saved_dst_mode));
8684	if (paused)
8685		printf("Card was paused\n");
8686
8687	if (ahd_check_cmdcmpltqueues(ahd))
8688		printf("Completions are pending\n");
8689	/*
8690	 * Mode independent registers.
8691	 */
8692	cur_col = 0;
8693	ahd_hs_mailbox_print(ahd_inb(ahd, LOCAL_HS_MAILBOX), &cur_col, 50);
8694	ahd_intctl_print(ahd_inb(ahd, INTCTL), &cur_col, 50);
8695	ahd_seqintstat_print(ahd_inb(ahd, SEQINTSTAT), &cur_col, 50);
8696	ahd_saved_mode_print(ahd_inb(ahd, SAVED_MODE), &cur_col, 50);
8697	ahd_dffstat_print(ahd_inb(ahd, DFFSTAT), &cur_col, 50);
8698	ahd_scsisigi_print(ahd_inb(ahd, SCSISIGI), &cur_col, 50);
8699	ahd_scsiphase_print(ahd_inb(ahd, SCSIPHASE), &cur_col, 50);
8700	ahd_scsibus_print(ahd_inb(ahd, SCSIBUS), &cur_col, 50);
8701	ahd_lastphase_print(ahd_inb(ahd, LASTPHASE), &cur_col, 50);
8702	ahd_scsiseq0_print(ahd_inb(ahd, SCSISEQ0), &cur_col, 50);
8703	ahd_scsiseq1_print(ahd_inb(ahd, SCSISEQ1), &cur_col, 50);
8704	ahd_seqctl0_print(ahd_inb(ahd, SEQCTL0), &cur_col, 50);
8705	ahd_seqintctl_print(ahd_inb(ahd, SEQINTCTL), &cur_col, 50);
8706	ahd_seq_flags_print(ahd_inb(ahd, SEQ_FLAGS), &cur_col, 50);
8707	ahd_seq_flags2_print(ahd_inb(ahd, SEQ_FLAGS2), &cur_col, 50);
8708	ahd_sstat0_print(ahd_inb(ahd, SSTAT0), &cur_col, 50);
8709	ahd_sstat1_print(ahd_inb(ahd, SSTAT1), &cur_col, 50);
8710	ahd_sstat2_print(ahd_inb(ahd, SSTAT2), &cur_col, 50);
8711	ahd_sstat3_print(ahd_inb(ahd, SSTAT3), &cur_col, 50);
8712	ahd_perrdiag_print(ahd_inb(ahd, PERRDIAG), &cur_col, 50);
8713	ahd_simode1_print(ahd_inb(ahd, SIMODE1), &cur_col, 50);
8714	ahd_lqistat0_print(ahd_inb(ahd, LQISTAT0), &cur_col, 50);
8715	ahd_lqistat1_print(ahd_inb(ahd, LQISTAT1), &cur_col, 50);
8716	ahd_lqistat2_print(ahd_inb(ahd, LQISTAT2), &cur_col, 50);
8717	ahd_lqostat0_print(ahd_inb(ahd, LQOSTAT0), &cur_col, 50);
8718	ahd_lqostat1_print(ahd_inb(ahd, LQOSTAT1), &cur_col, 50);
8719	ahd_lqostat2_print(ahd_inb(ahd, LQOSTAT2), &cur_col, 50);
8720	printf("\n");
8721	printf("\nSCB Count = %d CMDS_PENDING = %d LASTSCB 0x%x "
8722	       "CURRSCB 0x%x NEXTSCB 0x%x\n",
8723	       ahd->scb_data.numscbs, ahd_inw(ahd, CMDS_PENDING),
8724	       ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB),
8725	       ahd_inw(ahd, NEXTSCB));
8726	cur_col = 0;
8727	/* QINFIFO */
8728	ahd_search_qinfifo(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS,
8729			   CAM_LUN_WILDCARD, SCB_LIST_NULL,
8730			   ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT);
8731	saved_scb_index = ahd_get_scbptr(ahd);
8732	printf("Pending list:");
8733	i = 0;
8734	LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
8735		if (i++ > AHD_SCB_MAX)
8736			break;
8737		/*cur_col =*/ printf("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb),
8738				     ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT));
8739		ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
8740		ahd_scb_control_print(ahd_inb_scbram(ahd, SCB_CONTROL),
8741				      &cur_col, 60);
8742		ahd_scb_scsiid_print(ahd_inb_scbram(ahd, SCB_SCSIID),
8743				     &cur_col, 60);
8744	}
8745	printf("\nTotal %d\n", i);
8746
8747	printf("Kernel Free SCB list: ");
8748	i = 0;
8749	TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
8750		struct scb *list_scb;
8751
8752		list_scb = scb;
8753		do {
8754			printf("%d ", SCB_GET_TAG(list_scb));
8755			list_scb = LIST_NEXT(list_scb, collision_links);
8756		} while (list_scb && i++ < AHD_SCB_MAX);
8757	}
8758
8759	LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) {
8760		if (i++ > AHD_SCB_MAX)
8761			break;
8762		printf("%d ", SCB_GET_TAG(scb));
8763	}
8764	printf("\n");
8765
8766	printf("Sequencer Complete DMA-inprog list: ");
8767	scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD);
8768	i = 0;
8769	while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
8770		ahd_set_scbptr(ahd, scb_index);
8771		printf("%d ", scb_index);
8772		scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
8773	}
8774	printf("\n");
8775
8776	printf("Sequencer Complete list: ");
8777	scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD);
8778	i = 0;
8779	while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
8780		ahd_set_scbptr(ahd, scb_index);
8781		printf("%d ", scb_index);
8782		scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
8783	}
8784	printf("\n");
8785
8786
8787	printf("Sequencer DMA-Up and Complete list: ");
8788	scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
8789	i = 0;
8790	while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) {
8791		ahd_set_scbptr(ahd, scb_index);
8792		printf("%d ", scb_index);
8793		scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
8794	}
8795	printf("\n");
8796	ahd_set_scbptr(ahd, saved_scb_index);
8797	dffstat = ahd_inb(ahd, DFFSTAT);
8798	for (i = 0; i < 2; i++) {
8799#ifdef AHD_DEBUG
8800		struct scb *fifo_scb;
8801#endif
8802		u_int	    fifo_scbptr;
8803
8804		ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i);
8805		fifo_scbptr = ahd_get_scbptr(ahd);
8806		printf("\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n",
8807		       ahd_name(ahd), i,
8808		       (dffstat & (FIFO0FREE << i)) ? "Free" : "Active",
8809		       ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr);
8810		cur_col = 0;
8811		ahd_seqimode_print(ahd_inb(ahd, SEQIMODE), &cur_col, 50);
8812		ahd_seqintsrc_print(ahd_inb(ahd, SEQINTSRC), &cur_col, 50);
8813		ahd_dfcntrl_print(ahd_inb(ahd, DFCNTRL), &cur_col, 50);
8814		ahd_dfstatus_print(ahd_inb(ahd, DFSTATUS), &cur_col, 50);
8815		ahd_sg_cache_shadow_print(ahd_inb(ahd, SG_CACHE_SHADOW),
8816					  &cur_col, 50);
8817		ahd_sg_state_print(ahd_inb(ahd, SG_STATE), &cur_col, 50);
8818		ahd_dffsxfrctl_print(ahd_inb(ahd, DFFSXFRCTL), &cur_col, 50);
8819		ahd_soffcnt_print(ahd_inb(ahd, SOFFCNT), &cur_col, 50);
8820		ahd_mdffstat_print(ahd_inb(ahd, MDFFSTAT), &cur_col, 50);
8821		if (cur_col > 50) {
8822			printf("\n");
8823			cur_col = 0;
8824		}
8825		printf("\nSHADDR = 0x%x%x, SHCNT = 0x%x ",
8826				  ahd_inl(ahd, SHADDR+4),
8827				  ahd_inl(ahd, SHADDR),
8828				  (ahd_inb(ahd, SHCNT)
8829				| (ahd_inb(ahd, SHCNT + 1) << 8)
8830				| (ahd_inb(ahd, SHCNT + 2) << 16)));
8831		printf("HADDR = 0x%x%x, HCNT = 0x%x \n",
8832				  ahd_inl(ahd, HADDR+4),
8833				  ahd_inl(ahd, HADDR),
8834				  (ahd_inb(ahd, HCNT)
8835				| (ahd_inb(ahd, HCNT + 1) << 8)
8836				| (ahd_inb(ahd, HCNT + 2) << 16)));
8837		ahd_ccsgctl_print(ahd_inb(ahd, CCSGCTL), &cur_col, 50);
8838#ifdef AHD_DEBUG
8839		if ((ahd_debug & AHD_SHOW_SG) != 0) {
8840			fifo_scb = ahd_lookup_scb(ahd, fifo_scbptr);
8841			if (fifo_scb != NULL)
8842				ahd_dump_sglist(fifo_scb);
8843		}
8844#endif
8845	}
8846	printf("\nLQIN: ");
8847	for (i = 0; i < 20; i++)
8848		printf("0x%x ", ahd_inb(ahd, LQIN + i));
8849	printf("\n");
8850	ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
8851	printf("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n",
8852	       ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE),
8853	       ahd_inb(ahd, OPTIONMODE));
8854	printf("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n",
8855	       ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT),
8856	       ahd_inb(ahd, MAXCMDCNT));
8857	ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50);
8858	printf("\n");
8859	ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
8860	cur_col = 0;
8861	ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
8862	printf("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n",
8863	       ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX),
8864	       ahd_inw(ahd, DINDEX));
8865	printf("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n",
8866	       ahd_name(ahd), ahd_get_scbptr(ahd),
8867	       ahd_inw_scbram(ahd, SCB_NEXT),
8868	       ahd_inw_scbram(ahd, SCB_NEXT2));
8869	printf("CDB %x %x %x %x %x %x\n",
8870	       ahd_inb_scbram(ahd, SCB_CDB_STORE),
8871	       ahd_inb_scbram(ahd, SCB_CDB_STORE+1),
8872	       ahd_inb_scbram(ahd, SCB_CDB_STORE+2),
8873	       ahd_inb_scbram(ahd, SCB_CDB_STORE+3),
8874	       ahd_inb_scbram(ahd, SCB_CDB_STORE+4),
8875	       ahd_inb_scbram(ahd, SCB_CDB_STORE+5));
8876	printf("STACK:");
8877	for (i = 0; i < ahd->stack_size; i++) {
8878		ahd->saved_stack[i] =
8879		    ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8);
8880		printf(" 0x%x", ahd->saved_stack[i]);
8881	}
8882	for (i = ahd->stack_size-1; i >= 0; i--) {
8883		ahd_outb(ahd, STACK, ahd->saved_stack[i] & 0xFF);
8884		ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF);
8885	}
8886	printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
8887	ahd_platform_dump_card_state(ahd);
8888	ahd_restore_modes(ahd, saved_modes);
8889	if (paused == 0)
8890		ahd_unpause(ahd);
8891}
8892
8893void
8894ahd_dump_scbs(struct ahd_softc *ahd)
8895{
8896	ahd_mode_state saved_modes;
8897	u_int	       saved_scb_index;
8898	int	       i;
8899
8900	saved_modes = ahd_save_modes(ahd);
8901	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
8902	saved_scb_index = ahd_get_scbptr(ahd);
8903	for (i = 0; i < AHD_SCB_MAX; i++) {
8904		ahd_set_scbptr(ahd, i);
8905		printf("%3d", i);
8906		printf("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n",
8907		       ahd_inb_scbram(ahd, SCB_CONTROL),
8908		       ahd_inb_scbram(ahd, SCB_SCSIID),
8909		       ahd_inw_scbram(ahd, SCB_NEXT),
8910		       ahd_inw_scbram(ahd, SCB_NEXT2),
8911		       ahd_inl_scbram(ahd, SCB_SGPTR),
8912		       ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR));
8913	}
8914	printf("\n");
8915	ahd_set_scbptr(ahd, saved_scb_index);
8916	ahd_restore_modes(ahd, saved_modes);
8917}
8918
8919/**************************** Flexport Logic **********************************/
8920/*
8921 * Read count 16bit words from 16bit word address start_addr from the
8922 * SEEPROM attached to the controller, into tbuf, using the controller's
8923 * SEEPROM reading state machine.  Optionally treat the data as a byte
8924 * stream in terms of byte order.
8925 */
8926int
8927ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *tbuf,
8928		 u_int start_addr, u_int count, int bytestream)
8929{
8930	u_int cur_addr;
8931	u_int end_addr;
8932	int   error;
8933
8934	/*
8935	 * If we never make it through the loop even once,
8936	 * we were passed invalid arguments.
8937	 */
8938	error = EINVAL;
8939	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
8940	end_addr = start_addr + count;
8941	for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) {
8942
8943		ahd_outb(ahd, SEEADR, cur_addr);
8944		ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART);
8945
8946		error = ahd_wait_seeprom(ahd);
8947		if (error) {
8948			printf("%s: ahd_wait_seeprom timed out\n",
8949			    ahd_name(ahd));
8950			break;
8951		}
8952		if (bytestream != 0) {
8953			uint8_t *bytestream_ptr;
8954
8955			bytestream_ptr = (uint8_t *)tbuf;
8956			*bytestream_ptr++ = ahd_inb(ahd, SEEDAT);
8957			*bytestream_ptr = ahd_inb(ahd, SEEDAT+1);
8958		} else {
8959			/*
8960			 * ahd_inw() already handles machine byte order.
8961			 */
8962			*tbuf = ahd_inw(ahd, SEEDAT);
8963		}
8964		tbuf++;
8965	}
8966	return (error);
8967}
8968
8969/*
8970 * Write count 16bit words from tbuf, into SEEPROM attache to the
8971 * controller starting at 16bit word address start_addr, using the
8972 * controller's SEEPROM writing state machine.
8973 */
8974int
8975ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *tbuf,
8976		  u_int start_addr, u_int count)
8977{
8978	u_int cur_addr;
8979	u_int end_addr;
8980	int   error;
8981	int   retval;
8982
8983	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
8984	error = ENOENT;
8985
8986	/* Place the chip into write-enable mode */
8987	ahd_outb(ahd, SEEADR, SEEOP_EWEN_ADDR);
8988	ahd_outb(ahd, SEECTL, SEEOP_EWEN | SEESTART);
8989	error = ahd_wait_seeprom(ahd);
8990	if (error)
8991		return (error);
8992
8993	/*
8994	 * Write the data.  If we don't get throught the loop at
8995	 * least once, the arguments were invalid.
8996	 */
8997	retval = EINVAL;
8998	end_addr = start_addr + count;
8999	for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) {
9000		ahd_outw(ahd, SEEDAT, *tbuf++);
9001		ahd_outb(ahd, SEEADR, cur_addr);
9002		ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART);
9003
9004		retval = ahd_wait_seeprom(ahd);
9005		if (retval)
9006			break;
9007	}
9008
9009	/*
9010	 * Disable writes.
9011	 */
9012	ahd_outb(ahd, SEEADR, SEEOP_EWDS_ADDR);
9013	ahd_outb(ahd, SEECTL, SEEOP_EWDS | SEESTART);
9014	error = ahd_wait_seeprom(ahd);
9015	if (error)
9016		return (error);
9017	return (retval);
9018}
9019
9020/*
9021 * Wait ~100us for the serial eeprom to satisfy our request.
9022 */
9023int
9024ahd_wait_seeprom(struct ahd_softc *ahd)
9025{
9026	int cnt;
9027
9028	cnt = 2000;
9029	while ((ahd_inb(ahd, SEESTAT) & (SEEARBACK|SEEBUSY)) != 0 && --cnt)
9030		ahd_delay(5);
9031
9032	if (cnt == 0)
9033		return (ETIMEDOUT);
9034	return (0);
9035}
9036
9037/*
9038 * Validate the two checksums in the per_channel
9039 * vital product data struct.
9040 */
9041int
9042ahd_verify_vpd_cksum(struct vpd_config *vpd)
9043{
9044	int i;
9045	int maxaddr;
9046	uint32_t checksum;
9047	uint8_t *vpdarray;
9048
9049	vpdarray = (uint8_t *)vpd;
9050	maxaddr = offsetof(struct vpd_config, vpd_checksum);
9051	checksum = 0;
9052	for (i = offsetof(struct vpd_config, resource_type); i < maxaddr; i++)
9053		checksum = checksum + vpdarray[i];
9054	if (checksum == 0
9055	 || (-checksum & 0xFF) != vpd->vpd_checksum)
9056		return (0);
9057
9058	checksum = 0;
9059	maxaddr = offsetof(struct vpd_config, checksum);
9060	for (i = offsetof(struct vpd_config, default_target_flags);
9061	     i < maxaddr; i++)
9062		checksum = checksum + vpdarray[i];
9063	if (checksum == 0
9064	 || (-checksum & 0xFF) != vpd->checksum)
9065		return (0);
9066	return (1);
9067}
9068
9069int
9070ahd_verify_cksum(struct seeprom_config *sc)
9071{
9072	int i;
9073	int maxaddr;
9074	uint32_t checksum;
9075	uint16_t *scarray;
9076
9077	maxaddr = (sizeof(*sc)/2) - 1;
9078	checksum = 0;
9079	scarray = (uint16_t *)sc;
9080
9081	for (i = 0; i < maxaddr; i++)
9082		checksum = checksum + scarray[i];
9083	if (checksum == 0
9084	 || (checksum & 0xFFFF) != sc->checksum) {
9085		return (0);
9086	} else {
9087		return (1);
9088	}
9089}
9090
9091int
9092ahd_acquire_seeprom(struct ahd_softc *ahd)
9093{
9094	/*
9095	 * We should be able to determine the SEEPROM type
9096	 * from the flexport logic, but unfortunately not
9097	 * all implementations have this logic and there is
9098	 * no programatic method for determining if the logic
9099	 * is present.
9100	 */
9101
9102	return (1);
9103#if 0
9104	uint8_t	seetype;
9105	int	error;
9106
9107	error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype);
9108	if (error != 0
9109	 || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE))
9110		return (0);
9111	return (1);
9112#endif
9113}
9114
9115void
9116ahd_release_seeprom(struct ahd_softc *ahd)
9117{
9118	/* Currently a no-op */
9119}
9120
9121int
9122ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value)
9123{
9124	int error;
9125
9126	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
9127	if (addr > 7)
9128		panic("ahd_write_flexport: address out of range");
9129	ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3));
9130	error = ahd_wait_flexport(ahd);
9131	if (error != 0)
9132		return (error);
9133	ahd_outb(ahd, BRDDAT, value);
9134	ahd_flush_device_writes(ahd);
9135	ahd_outb(ahd, BRDCTL, BRDSTB|BRDEN|(addr << 3));
9136	ahd_flush_device_writes(ahd);
9137	ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3));
9138	ahd_flush_device_writes(ahd);
9139	ahd_outb(ahd, BRDCTL, 0);
9140	ahd_flush_device_writes(ahd);
9141	return (0);
9142}
9143
9144int
9145ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value)
9146{
9147	int	error;
9148
9149	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
9150	if (addr > 7)
9151		panic("ahd_read_flexport: address out of range");
9152	ahd_outb(ahd, BRDCTL, BRDRW|BRDEN|(addr << 3));
9153	error = ahd_wait_flexport(ahd);
9154	if (error != 0)
9155		return (error);
9156	*value = ahd_inb(ahd, BRDDAT);
9157	ahd_outb(ahd, BRDCTL, 0);
9158	ahd_flush_device_writes(ahd);
9159	return (0);
9160}
9161
9162/*
9163 * Wait at most 2 seconds for flexport arbitration to succeed.
9164 */
9165int
9166ahd_wait_flexport(struct ahd_softc *ahd)
9167{
9168	int cnt;
9169
9170	AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK);
9171	cnt = 1000000 * 2 / 5;
9172	while ((ahd_inb(ahd, BRDCTL) & FLXARBACK) == 0 && --cnt)
9173		ahd_delay(5);
9174
9175	if (cnt == 0)
9176		return (ETIMEDOUT);
9177	return (0);
9178}
9179
9180/************************* Target Mode ****************************************/
9181#ifdef AHD_TARGET_MODE
9182cam_status
9183ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb,
9184		    struct ahd_tmode_tstate **tstate,
9185		    struct ahd_tmode_lstate **lstate,
9186		    int notfound_failure)
9187{
9188
9189	if ((ahd->features & AHD_TARGETMODE) == 0)
9190		return (CAM_REQ_INVALID);
9191
9192	/*
9193	 * Handle the 'black hole' device that sucks up
9194	 * requests to unattached luns on enabled targets.
9195	 */
9196	if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
9197	 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
9198		*tstate = NULL;
9199		*lstate = ahd->black_hole;
9200	} else {
9201		u_int max_id;
9202
9203		max_id = (ahd->features & AHD_WIDE) ? 15 : 7;
9204		if (ccb->ccb_h.target_id > max_id)
9205			return (CAM_TID_INVALID);
9206
9207		if (ccb->ccb_h.target_lun >= AHD_NUM_LUNS)
9208			return (CAM_LUN_INVALID);
9209
9210		*tstate = ahd->enabled_targets[ccb->ccb_h.target_id];
9211		*lstate = NULL;
9212		if (*tstate != NULL)
9213			*lstate =
9214			    (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
9215	}
9216
9217	if (notfound_failure != 0 && *lstate == NULL)
9218		return (CAM_PATH_INVALID);
9219
9220	return (CAM_REQ_CMP);
9221}
9222
9223void
9224ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
9225{
9226#if NOT_YET
9227	struct	   ahd_tmode_tstate *tstate;
9228	struct	   ahd_tmode_lstate *lstate;
9229	struct	   ccb_en_lun *cel;
9230	cam_status status;
9231	u_int	   target;
9232	u_int	   lun;
9233	u_int	   target_mask;
9234	u_long	   s;
9235	char	   channel;
9236
9237	status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate,
9238				     /*notfound_failure*/FALSE);
9239
9240	if (status != CAM_REQ_CMP) {
9241		ccb->ccb_h.status = status;
9242		return;
9243	}
9244
9245	if ((ahd->features & AHD_MULTIROLE) != 0) {
9246		u_int	   our_id;
9247
9248		our_id = ahd->our_id;
9249		if (ccb->ccb_h.target_id != our_id) {
9250			if ((ahd->features & AHD_MULTI_TID) != 0
9251			 && (ahd->flags & AHD_INITIATORROLE) != 0) {
9252				/*
9253				 * Only allow additional targets if
9254				 * the initiator role is disabled.
9255				 * The hardware cannot handle a re-select-in
9256				 * on the initiator id during a re-select-out
9257				 * on a different target id.
9258				 */
9259				status = CAM_TID_INVALID;
9260			} else if ((ahd->flags & AHD_INITIATORROLE) != 0
9261				|| ahd->enabled_luns > 0) {
9262				/*
9263				 * Only allow our target id to change
9264				 * if the initiator role is not configured
9265				 * and there are no enabled luns which
9266				 * are attached to the currently registered
9267				 * scsi id.
9268				 */
9269				status = CAM_TID_INVALID;
9270			}
9271		}
9272	}
9273
9274	if (status != CAM_REQ_CMP) {
9275		ccb->ccb_h.status = status;
9276		return;
9277	}
9278
9279	/*
9280	 * We now have an id that is valid.
9281	 * If we aren't in target mode, switch modes.
9282	 */
9283	if ((ahd->flags & AHD_TARGETROLE) == 0
9284	 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
9285		u_long	s;
9286
9287		printf("Configuring Target Mode\n");
9288		ahd_lock(ahd, &s);
9289		if (LIST_FIRST(&ahd->pending_scbs) != NULL) {
9290			ccb->ccb_h.status = CAM_BUSY;
9291			ahd_unlock(ahd, &s);
9292			return;
9293		}
9294		ahd->flags |= AHD_TARGETROLE;
9295		if ((ahd->features & AHD_MULTIROLE) == 0)
9296			ahd->flags &= ~AHD_INITIATORROLE;
9297		ahd_pause(ahd);
9298		ahd_loadseq(ahd);
9299		ahd_restart(ahd);
9300		ahd_unlock(ahd, &s);
9301	}
9302	cel = &ccb->cel;
9303	target = ccb->ccb_h.target_id;
9304	lun = ccb->ccb_h.target_lun;
9305	channel = SIM_CHANNEL(ahd, sim);
9306	target_mask = 0x01 << target;
9307	if (channel == 'B')
9308		target_mask <<= 8;
9309
9310	if (cel->enable != 0) {
9311		u_int scsiseq1;
9312
9313		/* Are we already enabled?? */
9314		if (lstate != NULL) {
9315			xpt_print_path(ccb->ccb_h.path);
9316			printf("Lun already enabled\n");
9317			ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
9318			return;
9319		}
9320
9321		if (cel->grp6_len != 0
9322		 || cel->grp7_len != 0) {
9323			/*
9324			 * Don't (yet?) support vendor
9325			 * specific commands.
9326			 */
9327			ccb->ccb_h.status = CAM_REQ_INVALID;
9328			printf("Non-zero Group Codes\n");
9329			return;
9330		}
9331
9332		/*
9333		 * Seems to be okay.
9334		 * Setup our data structures.
9335		 */
9336		if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
9337			tstate = ahd_alloc_tstate(ahd, target, channel);
9338			if (tstate == NULL) {
9339				xpt_print_path(ccb->ccb_h.path);
9340				printf("Couldn't allocate tstate\n");
9341				ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
9342				return;
9343			}
9344		}
9345		lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT);
9346		if (lstate == NULL) {
9347			xpt_print_path(ccb->ccb_h.path);
9348			printf("Couldn't allocate lstate\n");
9349			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
9350			return;
9351		}
9352		memset(lstate, 0, sizeof(*lstate));
9353		status = xpt_create_path(&lstate->path, /*periph*/NULL,
9354					 xpt_path_path_id(ccb->ccb_h.path),
9355					 xpt_path_target_id(ccb->ccb_h.path),
9356					 xpt_path_lun_id(ccb->ccb_h.path));
9357		if (status != CAM_REQ_CMP) {
9358			free(lstate, M_DEVBUF);
9359			xpt_print_path(ccb->ccb_h.path);
9360			printf("Couldn't allocate path\n");
9361			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
9362			return;
9363		}
9364		SLIST_INIT(&lstate->accept_tios);
9365		SLIST_INIT(&lstate->immed_notifies);
9366		ahd_lock(ahd, &s);
9367		ahd_pause(ahd);
9368		if (target != CAM_TARGET_WILDCARD) {
9369			tstate->enabled_luns[lun] = lstate;
9370			ahd->enabled_luns++;
9371
9372			if ((ahd->features & AHD_MULTI_TID) != 0) {
9373				u_int targid_mask;
9374
9375				targid_mask = ahd_inb(ahd, TARGID)
9376					    | (ahd_inb(ahd, TARGID + 1) << 8);
9377
9378				targid_mask |= target_mask;
9379				ahd_outb(ahd, TARGID, targid_mask);
9380				ahd_outb(ahd, TARGID+1, (targid_mask >> 8));
9381
9382				ahd_update_scsiid(ahd, targid_mask);
9383			} else {
9384				u_int our_id;
9385				char  channel;
9386
9387				channel = SIM_CHANNEL(ahd, sim);
9388				our_id = SIM_SCSI_ID(ahd, sim);
9389
9390				/*
9391				 * This can only happen if selections
9392				 * are not enabled
9393				 */
9394				if (target != our_id) {
9395					u_int sblkctl;
9396					char  cur_channel;
9397					int   swap;
9398
9399					sblkctl = ahd_inb(ahd, SBLKCTL);
9400					cur_channel = (sblkctl & SELBUSB)
9401						    ? 'B' : 'A';
9402					if ((ahd->features & AHD_TWIN) == 0)
9403						cur_channel = 'A';
9404					swap = cur_channel != channel;
9405					ahd->our_id = target;
9406
9407					if (swap)
9408						ahd_outb(ahd, SBLKCTL,
9409							 sblkctl ^ SELBUSB);
9410
9411					ahd_outb(ahd, SCSIID, target);
9412
9413					if (swap)
9414						ahd_outb(ahd, SBLKCTL, sblkctl);
9415				}
9416			}
9417		} else
9418			ahd->black_hole = lstate;
9419		/* Allow select-in operations */
9420		if (ahd->black_hole != NULL && ahd->enabled_luns > 0) {
9421			scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE);
9422			scsiseq1 |= ENSELI;
9423			ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1);
9424			scsiseq1 = ahd_inb(ahd, SCSISEQ1);
9425			scsiseq1 |= ENSELI;
9426			ahd_outb(ahd, SCSISEQ1, scsiseq1);
9427		}
9428		ahd_unpause(ahd);
9429		ahd_unlock(ahd, &s);
9430		ccb->ccb_h.status = CAM_REQ_CMP;
9431		xpt_print_path(ccb->ccb_h.path);
9432		printf("Lun now enabled for target mode\n");
9433	} else {
9434		struct scb *scb;
9435		int i, empty;
9436
9437		if (lstate == NULL) {
9438			ccb->ccb_h.status = CAM_LUN_INVALID;
9439			return;
9440		}
9441
9442		ahd_lock(ahd, &s);
9443
9444		ccb->ccb_h.status = CAM_REQ_CMP;
9445		LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
9446			struct ccb_hdr *ccbh;
9447
9448			ccbh = &scb->io_ctx->ccb_h;
9449			if (ccbh->func_code == XPT_CONT_TARGET_IO
9450			 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
9451				printf("CTIO pending\n");
9452				ccb->ccb_h.status = CAM_REQ_INVALID;
9453				ahd_unlock(ahd, &s);
9454				return;
9455			}
9456		}
9457
9458		if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
9459			printf("ATIOs pending\n");
9460			ccb->ccb_h.status = CAM_REQ_INVALID;
9461		}
9462
9463		if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
9464			printf("INOTs pending\n");
9465			ccb->ccb_h.status = CAM_REQ_INVALID;
9466		}
9467
9468		if (ccb->ccb_h.status != CAM_REQ_CMP) {
9469			ahd_unlock(ahd, &s);
9470			return;
9471		}
9472
9473		xpt_print_path(ccb->ccb_h.path);
9474		printf("Target mode disabled\n");
9475		xpt_free_path(lstate->path);
9476		free(lstate, M_DEVBUF);
9477
9478		ahd_pause(ahd);
9479		/* Can we clean up the target too? */
9480		if (target != CAM_TARGET_WILDCARD) {
9481			tstate->enabled_luns[lun] = NULL;
9482			ahd->enabled_luns--;
9483			for (empty = 1, i = 0; i < 8; i++)
9484				if (tstate->enabled_luns[i] != NULL) {
9485					empty = 0;
9486					break;
9487				}
9488
9489			if (empty) {
9490				ahd_free_tstate(ahd, target, channel,
9491						/*force*/FALSE);
9492				if (ahd->features & AHD_MULTI_TID) {
9493					u_int targid_mask;
9494
9495					targid_mask = ahd_inb(ahd, TARGID)
9496						    | (ahd_inb(ahd, TARGID + 1)
9497						       << 8);
9498
9499					targid_mask &= ~target_mask;
9500					ahd_outb(ahd, TARGID, targid_mask);
9501					ahd_outb(ahd, TARGID+1,
9502						 (targid_mask >> 8));
9503					ahd_update_scsiid(ahd, targid_mask);
9504				}
9505			}
9506		} else {
9507
9508			ahd->black_hole = NULL;
9509
9510			/*
9511			 * We can't allow selections without
9512			 * our black hole device.
9513			 */
9514			empty = TRUE;
9515		}
9516		if (ahd->enabled_luns == 0) {
9517			/* Disallow select-in */
9518			u_int scsiseq1;
9519
9520			scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE);
9521			scsiseq1 &= ~ENSELI;
9522			ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1);
9523			scsiseq1 = ahd_inb(ahd, SCSISEQ1);
9524			scsiseq1 &= ~ENSELI;
9525			ahd_outb(ahd, SCSISEQ1, scsiseq1);
9526
9527			if ((ahd->features & AHD_MULTIROLE) == 0) {
9528				printf("Configuring Initiator Mode\n");
9529				ahd->flags &= ~AHD_TARGETROLE;
9530				ahd->flags |= AHD_INITIATORROLE;
9531				ahd_pause(ahd);
9532				ahd_loadseq(ahd);
9533				ahd_restart(ahd);
9534				/*
9535				 * Unpaused.  The extra unpause
9536				 * that follows is harmless.
9537				 */
9538			}
9539		}
9540		ahd_unpause(ahd);
9541		ahd_unlock(ahd, &s);
9542	}
9543#endif
9544}
9545
9546static void
9547ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask)
9548{
9549#if NOT_YET
9550	u_int scsiid_mask;
9551	u_int scsiid;
9552
9553	if ((ahd->features & AHD_MULTI_TID) == 0)
9554		panic("ahd_update_scsiid called on non-multitid unit\n");
9555
9556	/*
9557	 * Since we will rely on the TARGID mask
9558	 * for selection enables, ensure that OID
9559	 * in SCSIID is not set to some other ID
9560	 * that we don't want to allow selections on.
9561	 */
9562	if ((ahd->features & AHD_ULTRA2) != 0)
9563		scsiid = ahd_inb(ahd, SCSIID_ULTRA2);
9564	else
9565		scsiid = ahd_inb(ahd, SCSIID);
9566	scsiid_mask = 0x1 << (scsiid & OID);
9567	if ((targid_mask & scsiid_mask) == 0) {
9568		u_int our_id;
9569
9570		/* ffs counts from 1 */
9571		our_id = ffs(targid_mask);
9572		if (our_id == 0)
9573			our_id = ahd->our_id;
9574		else
9575			our_id--;
9576		scsiid &= TID;
9577		scsiid |= our_id;
9578	}
9579	if ((ahd->features & AHD_ULTRA2) != 0)
9580		ahd_outb(ahd, SCSIID_ULTRA2, scsiid);
9581	else
9582		ahd_outb(ahd, SCSIID, scsiid);
9583#endif
9584}
9585
9586#ifdef AHD_TARGET_MODE
9587void
9588ahd_run_tqinfifo(struct ahd_softc *ahd, int paused)
9589{
9590	struct target_cmd *cmd;
9591
9592	ahd_sync_tqinfifo(ahd, BUS_DMASYNC_POSTREAD);
9593	while ((cmd = &ahd->targetcmds[ahd->tqinfifonext])->cmd_valid != 0) {
9594
9595		/*
9596		 * Only advance through the queue if we
9597		 * have the resources to process the command.
9598		 */
9599		if (ahd_handle_target_cmd(ahd, cmd) != 0)
9600			break;
9601
9602		cmd->cmd_valid = 0;
9603		ahd_dmamap_sync(ahd, ahd->parent_dmat /*shared_data_dmat*/,
9604				ahd->shared_data_map.dmamap,
9605				ahd_targetcmd_offset(ahd, ahd->tqinfifonext),
9606				sizeof(struct target_cmd),
9607				BUS_DMASYNC_PREREAD);
9608		ahd->tqinfifonext++;
9609
9610		/*
9611		 * Lazily update our position in the target mode incoming
9612		 * command queue as seen by the sequencer.
9613		 */
9614		if ((ahd->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
9615			u_int hs_mailbox;
9616
9617			hs_mailbox = ahd_inb(ahd, HS_MAILBOX);
9618			hs_mailbox &= ~HOST_TQINPOS;
9619			hs_mailbox |= ahd->tqinfifonext & HOST_TQINPOS;
9620			ahd_outb(ahd, HS_MAILBOX, hs_mailbox);
9621		}
9622	}
9623}
9624#endif
9625
9626static int
9627ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd)
9628{
9629	struct	  ahd_tmode_tstate *tstate;
9630	struct	  ahd_tmode_lstate *lstate;
9631	struct	  ccb_accept_tio *atio;
9632	uint8_t *byte;
9633	int	  initiator;
9634	int	  target;
9635	int	  lun;
9636
9637	initiator = SCSIID_TARGET(ahd, cmd->scsiid);
9638	target = SCSIID_OUR_ID(cmd->scsiid);
9639	lun    = (cmd->identify & MSG_IDENTIFY_LUNMASK);
9640
9641	byte = cmd->bytes;
9642	tstate = ahd->enabled_targets[target];
9643	lstate = NULL;
9644	if (tstate != NULL)
9645		lstate = tstate->enabled_luns[lun];
9646
9647	/*
9648	 * Commands for disabled luns go to the black hole driver.
9649	 */
9650	if (lstate == NULL)
9651		lstate = ahd->black_hole;
9652
9653	atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
9654	if (atio == NULL) {
9655		ahd->flags |= AHD_TQINFIFO_BLOCKED;
9656		/*
9657		 * Wait for more ATIOs from the peripheral driver for this lun.
9658		 */
9659		return (1);
9660	} else
9661		ahd->flags &= ~AHD_TQINFIFO_BLOCKED;
9662#ifdef AHD_DEBUG
9663	if ((ahd_debug & AHD_SHOW_TQIN) != 0)
9664	  printf("%s: incoming command from %d for %d:%d%s\n",
9665		 ahd_name(ahd),
9666		 initiator, target, lun,
9667		 lstate == ahd->black_hole ? "(Black Holed)" : "");
9668#endif
9669	SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
9670
9671	if (lstate == ahd->black_hole) {
9672		/* Fill in the wildcards */
9673		atio->ccb_h.target_id = target;
9674		atio->ccb_h.target_lun = lun;
9675	}
9676
9677	/*
9678	 * Package it up and send it off to
9679	 * whomever has this lun enabled.
9680	 */
9681	atio->sense_len = 0;
9682	atio->init_id = initiator;
9683	if (byte[0] != 0xFF) {
9684		/* Tag was included */
9685		atio->tag_action = *byte++;
9686		atio->tag_id = *byte++;
9687		atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
9688	} else {
9689		atio->ccb_h.flags = 0;
9690	}
9691	byte++;
9692
9693	/* Okay.  Now determine the cdb size based on the command code */
9694	switch (*byte >> CMD_GROUP_CODE_SHIFT) {
9695	case 0:
9696		atio->cdb_len = 6;
9697		break;
9698	case 1:
9699	case 2:
9700		atio->cdb_len = 10;
9701		break;
9702	case 4:
9703		atio->cdb_len = 16;
9704		break;
9705	case 5:
9706		atio->cdb_len = 12;
9707		break;
9708	case 3:
9709	default:
9710		/* Only copy the opcode. */
9711		atio->cdb_len = 1;
9712		printf("Reserved or VU command code type encountered\n");
9713		break;
9714	}
9715
9716	memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
9717
9718	atio->ccb_h.status |= CAM_CDB_RECVD;
9719
9720	if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
9721		/*
9722		 * We weren't allowed to disconnect.
9723		 * We're hanging on the bus until a
9724		 * continue target I/O comes in response
9725		 * to this accept tio.
9726		 */
9727#ifdef AHD_DEBUG
9728		if ((ahd_debug & AHD_SHOW_TQIN) != 0)
9729			printf("Received Immediate Command %d:%d:%d - %p\n",
9730			       initiator, target, lun, ahd->pending_device);
9731#endif
9732		ahd->pending_device = lstate;
9733		ahd_freeze_ccb((union ccb *)atio);
9734		atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
9735	}
9736	xpt_done((union ccb*)atio);
9737	return (0);
9738}
9739
9740#endif
9741
9742static int
9743ahd_createdmamem(bus_dma_tag_t tag, int size, int flags, bus_dmamap_t *mapp,
9744    void **vaddr, bus_addr_t *baddr, bus_dma_segment_t *seg, int *nseg,
9745    const char *myname, const char *what)
9746{
9747	int error, level = 0;
9748
9749	if ((error = bus_dmamem_alloc(tag, size, PAGE_SIZE, 0,
9750				      seg, 1, nseg, BUS_DMA_WAITOK)) != 0) {
9751		printf("%s: failed to allocate DMA mem for %s, error = %d\n",
9752			myname, what, error);
9753		goto out;
9754	}
9755	level++;
9756
9757	if ((error = bus_dmamem_map(tag, seg, *nseg, size, vaddr,
9758				    BUS_DMA_WAITOK|BUS_DMA_COHERENT)) != 0) {
9759		printf("%s: failed to map DMA mem for %s, error = %d\n",
9760			myname, what, error);
9761		goto out;
9762	}
9763	level++;
9764
9765	if ((error = bus_dmamap_create(tag, size, 1, size, 0,
9766				       BUS_DMA_WAITOK | flags, mapp)) != 0) {
9767		printf("%s: failed to create DMA map for %s, error = %d\n",
9768			myname, what, error);
9769		goto out;
9770	}
9771	level++;
9772
9773
9774	if ((error = bus_dmamap_load(tag, *mapp, *vaddr, size, NULL,
9775				     BUS_DMA_WAITOK)) != 0) {
9776		printf("%s: failed to load DMA map for %s, error = %d\n",
9777			myname, what, error);
9778		goto out;
9779	}
9780
9781	*baddr = (*mapp)->dm_segs[0].ds_addr;
9782
9783	return 0;
9784out:
9785	printf("ahd_createdmamem error (%d)\n", level);
9786	switch (level) {
9787	case 3:
9788		bus_dmamap_destroy(tag, *mapp);
9789		/* FALLTHROUGH */
9790	case 2:
9791		bus_dmamem_unmap(tag, *vaddr, size);
9792		/* FALLTHROUGH */
9793	case 1:
9794		bus_dmamem_free(tag, seg, *nseg);
9795		break;
9796	default:
9797		break;
9798	}
9799
9800	return error;
9801}
9802
9803static void
9804ahd_freedmamem(bus_dma_tag_t tag, int size, bus_dmamap_t map, void *vaddr,
9805    bus_dma_segment_t *seg, int nseg)
9806{
9807
9808	bus_dmamap_unload(tag, map);
9809	bus_dmamap_destroy(tag, map);
9810	bus_dmamem_unmap(tag, vaddr, size);
9811	bus_dmamem_free(tag, seg, nseg);
9812}
9813