aic7xxx.c revision 65176
1/*
2 * Generic driver for the aic7xxx based adaptec SCSI controllers
3 * Product specific probe and attach routines can be found in:
4 * i386/eisa/ahc_eisa.c	27/284X and aic7770 motherboard controllers
5 * pci/ahc_pci.c	3985, 3980, 3940, 2940, aic7895, aic7890,
6 *			aic7880, aic7870, aic7860, and aic7850 controllers
7 *
8 * Copyright (c) 1994, 1995, 1996, 1997, 1998, 1999, 2000 Justin T. Gibbs.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions, and the following disclaimer,
16 *    without modification.
17 * 2. The name of the author may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU Public License ("GPL").
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * $FreeBSD: head/sys/dev/aic7xxx/aic7xxx.c 65176 2000-08-28 21:48:13Z dfr $
36 */
37/*
38 * A few notes on features of the driver.
39 *
40 * SCB paging takes advantage of the fact that devices stay disconnected
41 * from the bus a relatively long time and that while they're disconnected,
42 * having the SCBs for these transactions down on the host adapter is of
43 * little use.  Instead of leaving this idle SCB down on the card we copy
44 * it back up into kernel memory and reuse the SCB slot on the card to
45 * schedule another transaction.  This can be a real payoff when doing random
46 * I/O to tagged queueing devices since there are more transactions active at
47 * once for the device to sort for optimal seek reduction. The algorithm goes
48 * like this...
49 *
50 * The sequencer maintains two lists of its hardware SCBs.  The first is the
51 * singly linked free list which tracks all SCBs that are not currently in
52 * use.  The second is the doubly linked disconnected list which holds the
53 * SCBs of transactions that are in the disconnected state sorted most
54 * recently disconnected first.  When the kernel queues a transaction to
55 * the card, a hardware SCB to "house" this transaction is retrieved from
56 * either of these two lists.  If the SCB came from the disconnected list,
57 * a check is made to see if any data transfer or SCB linking (more on linking
58 * in a bit) information has been changed since it was copied from the host
59 * and if so, DMAs the SCB back up before it can be used.  Once a hardware
60 * SCB has been obtained, the SCB is DMAed from the host.  Before any work
61 * can begin on this SCB, the sequencer must ensure that either the SCB is
62 * for a tagged transaction or the target is not already working on another
63 * non-tagged transaction.  If a conflict arises in the non-tagged case, the
64 * sequencer finds the SCB for the active transactions and sets the SCB_LINKED
65 * field in that SCB to this next SCB to execute.  To facilitate finding
66 * active non-tagged SCBs, the last four bytes of up to the first four hardware
67 * SCBs serve as a storage area for the currently active SCB ID for each
68 * target.
69 *
70 * When a device reconnects, a search is made of the hardware SCBs to find
71 * the SCB for this transaction.  If the search fails, a hardware SCB is
72 * pulled from either the free or disconnected SCB list and the proper
73 * SCB is DMAed from the host.  If the MK_MESSAGE control bit is set
74 * in the control byte of the SCB while it was disconnected, the sequencer
75 * will assert ATN and attempt to issue a message to the host.
76 *
77 * When a command completes, a check for non-zero status and residuals is
78 * made.  If either of these conditions exists, the SCB is DMAed back up to
79 * the host so that it can interpret this information.  Additionally, in the
80 * case of bad status, the sequencer generates a special interrupt and pauses
81 * itself.  This allows the host to setup a request sense command if it
82 * chooses for this target synchronously with the error so that sense
83 * information isn't lost.
84 *
85 */
86
87#include <opt_aic7xxx.h>
88
89#include <pci.h>
90#include <stddef.h>	/* For offsetof */
91
92#include <sys/param.h>
93#include <sys/systm.h>
94#include <sys/malloc.h>
95#include <sys/eventhandler.h>
96#include <sys/proc.h>
97
98#include <cam/cam.h>
99#include <cam/cam_ccb.h>
100#include <cam/cam_sim.h>
101#include <cam/cam_xpt_sim.h>
102#include <cam/cam_debug.h>
103
104#include <cam/scsi/scsi_all.h>
105#include <cam/scsi/scsi_message.h>
106
107#if NPCI > 0
108#include <machine/bus_memio.h>
109#endif
110#include <machine/bus_pio.h>
111#include <machine/bus.h>
112#include <machine/clock.h>
113#include <machine/endian.h>
114#include <sys/rman.h>
115
116#include <vm/vm.h>
117#include <vm/vm_param.h>
118#include <vm/pmap.h>
119
120#include <dev/aic7xxx/aic7xxx.h>
121#include <dev/aic7xxx/aicasm_insformat.h>
122
123#include <aic7xxx_reg.h>
124#include <aic7xxx_seq.h>
125
126#include <sys/kernel.h>
127
128#ifndef AHC_TMODE_ENABLE
129#define AHC_TMODE_ENABLE 0
130#endif
131
132#define MAX(a,b) (((a) > (b)) ? (a) : (b))
133#define MIN(a,b) (((a) < (b)) ? (a) : (b))
134#define ALL_CHANNELS '\0'
135#define ALL_TARGETS_MASK 0xFFFF
136#define INITIATOR_WILDCARD	(~0)
137
138#define	SIM_IS_SCSIBUS_B(ahc, sim)	\
139	((sim) == ahc->sim_b)
140#define	SIM_CHANNEL(ahc, sim)	\
141	(((sim) == ahc->sim_b) ? 'B' : 'A')
142#define	SIM_SCSI_ID(ahc, sim)	\
143	(((sim) == ahc->sim_b) ? ahc->our_id_b : ahc->our_id)
144#define	SIM_PATH(ahc, sim)	\
145	(((sim) == ahc->sim_b) ? ahc->path_b : ahc->path)
146#define SCSIID_TARGET(ahc, scsiid) \
147	(((scsiid) & ((((ahc)->features & AHC_TWIN) != 0) ? TWIN_TID : TID)) \
148	>> TID_SHIFT)
149#define SCSIID_OUR_ID(scsiid) \
150	((scsiid) & OID)
151#define SCSIID_CHANNEL(ahc, scsiid) \
152	((((ahc)->features & AHC_TWIN) != 0) \
153        ? ((((scsiid) & TWIN_CHNLB) != 0) ? 'B' : 'A') \
154       : 'A')
155#define	SCB_IS_SCSIBUS_B(ahc, scb) \
156	(SCSIID_CHANNEL(ahc, (scb)->hscb->scsiid) == 'B')
157#define	SCB_GET_OUR_ID(scb) \
158	SCSIID_OUR_ID((scb)->hscb->scsiid)
159#define	SCB_GET_TARGET(ahc, scb) \
160	SCSIID_TARGET((ahc), (scb)->hscb->scsiid)
161#define	SCB_GET_CHANNEL(ahc, scb) \
162	SCSIID_CHANNEL(ahc, (scb)->hscb->scsiid)
163#define	SCB_GET_LUN(scb) \
164	((scb)->hscb->lun)
165#define SCB_GET_TARGET_OFFSET(ahc, scb)	\
166	(SCB_GET_TARGET(ahc, scb) + (SCB_IS_SCSIBUS_B(ahc, scb) ? 8 : 0))
167#define SCB_GET_TARGET_MASK(ahc, scb) \
168	(0x01 << (SCB_GET_TARGET_OFFSET(ahc, scb)))
169#define TCL_TARGET_OFFSET(tcl) \
170	((((tcl) >> 4) & TID) >> 4)
171#define TCL_LUN(tcl) \
172	(tcl & (AHC_NUM_LUNS - 1))
173#define BUILD_TCL(scsiid, lun) \
174	((lun) | (((scsiid) & TID) << 4))
175#define BUILD_SCSIID(ahc, sim, target_id, our_id) \
176	((((target_id) << TID_SHIFT) & TID) | (our_id) \
177	| (SIM_IS_SCSIBUS_B(ahc, sim) ? TWIN_CHNLB : 0))
178
179#define ccb_scb_ptr spriv_ptr0
180#define ccb_ahc_ptr spriv_ptr1
181
182char *ahc_chip_names[] =
183{
184	"NONE",
185	"aic7770",
186	"aic7850",
187	"aic7855",
188	"aic7859",
189	"aic7860",
190	"aic7870",
191	"aic7880",
192	"aic7895",
193	"aic7890/91",
194	"aic7896/97",
195	"aic7892",
196	"aic7899"
197};
198
199typedef enum {
200	ROLE_UNKNOWN,
201	ROLE_INITIATOR,
202	ROLE_TARGET
203} role_t;
204
205struct ahc_devinfo {
206	int	  our_scsiid;
207	int	  target_offset;
208	uint16_t target_mask;
209	uint8_t  target;
210	uint8_t  lun;
211	char	  channel;
212	role_t	  role;		/*
213				 * Only guaranteed to be correct if not
214				 * in the busfree state.
215				 */
216};
217
218typedef enum {
219	SEARCH_COMPLETE,
220	SEARCH_COUNT,
221	SEARCH_REMOVE
222} ahc_search_action;
223
224#ifdef AHC_DEBUG
225static int     ahc_debug = AHC_DEBUG;
226#endif
227
228#if NPCI > 0
229void ahc_pci_intr(struct ahc_softc *ahc);
230#endif
231
232static int	ahcinitscbdata(struct ahc_softc *ahc);
233static void	ahcfiniscbdata(struct ahc_softc *ahc);
234
235static bus_dmamap_callback_t	ahcdmamapcb;
236
237#if UNUSED
238static void	ahc_dump_targcmd(struct target_cmd *cmd);
239#endif
240static void	ahc_shutdown(void *arg, int howto);
241static cam_status
242		ahc_find_tmode_devs(struct ahc_softc *ahc,
243				    struct cam_sim *sim, union ccb *ccb,
244				    struct tmode_tstate **tstate,
245				    struct tmode_lstate **lstate,
246				    int notfound_failure);
247static void	ahc_action(struct cam_sim *sim, union ccb *ccb);
248static void	ahc_async(void *callback_arg, uint32_t code,
249			  struct cam_path *path, void *arg);
250static void	ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
251				int nsegments, int error);
252static void	ahc_poll(struct cam_sim *sim);
253static void	ahc_setup_data(struct ahc_softc *ahc,
254			       struct ccb_scsiio *csio, struct scb *scb);
255static void	ahc_freeze_devq(struct ahc_softc *ahc, struct cam_path *path);
256static void	ahcallocscbs(struct ahc_softc *ahc);
257#if UNUSED
258static void	ahc_scb_devinfo(struct ahc_softc *ahc,
259				struct ahc_devinfo *devinfo,
260				struct scb *scb);
261#endif
262static void	ahc_fetch_devinfo(struct ahc_softc *ahc,
263				  struct ahc_devinfo *devinfo);
264static void	ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id,
265				    u_int target, u_int lun, char channel,
266				    role_t role);
267static u_int	ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev);
268static void	ahc_done(struct ahc_softc *ahc, struct scb *scbp);
269static struct tmode_tstate *
270		ahc_alloc_tstate(struct ahc_softc *ahc,
271				 u_int scsi_id, char channel);
272static void	ahc_free_tstate(struct ahc_softc *ahc,
273				u_int scsi_id, char channel, int force);
274static void	ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim,
275				  union ccb *ccb);
276static void	ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask);
277static int	ahc_handle_target_cmd(struct ahc_softc *ahc,
278				      struct target_cmd *cmd);
279static void 	ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat);
280static void	ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat);
281static void	ahc_build_transfer_msg(struct ahc_softc *ahc,
282				       struct ahc_devinfo *devinfo);
283static void	ahc_setup_initiator_msgout(struct ahc_softc *ahc,
284					   struct ahc_devinfo *devinfo,
285					   struct scb *scb);
286static void	ahc_setup_target_msgin(struct ahc_softc *ahc,
287				       struct ahc_devinfo *devinfo);
288static int	ahc_handle_msg_reject(struct ahc_softc *ahc,
289				      struct ahc_devinfo *devinfo);
290static void	ahc_clear_msg_state(struct ahc_softc *ahc);
291static void	ahc_handle_message_phase(struct ahc_softc *ahc,
292					 struct cam_path *path);
293static int	ahc_sent_msg(struct ahc_softc *ahc, u_int msgtype, int full);
294typedef enum {
295	MSGLOOP_IN_PROG,
296	MSGLOOP_MSGCOMPLETE,
297	MSGLOOP_TERMINATED
298} msg_loop_stat;
299static int ahc_parse_msg(struct ahc_softc *ahc, struct cam_path *path,
300				   struct ahc_devinfo *devinfo);
301static void	ahc_handle_ign_wide_residue(struct ahc_softc *ahc,
302					    struct ahc_devinfo *devinfo);
303static void	ahc_handle_devreset(struct ahc_softc *ahc,
304				    struct ahc_devinfo *devinfo,
305				    cam_status status, ac_code acode,
306				    char *message,
307				    int verbose_level);
308#ifdef AHC_DUMP_SEQ
309static void	ahc_dumpseq(struct ahc_softc *ahc);
310#endif
311static void	ahc_loadseq(struct ahc_softc *ahc);
312static int	ahc_check_patch(struct ahc_softc *ahc,
313				struct patch **start_patch,
314				u_int start_instr, u_int *skip_addr);
315static void	ahc_download_instr(struct ahc_softc *ahc,
316				   u_int instrptr, uint8_t *dconsts);
317static int	ahc_match_scb(struct ahc_softc *ahc, struct scb *scb,
318			      int target, char channel, int lun, u_int tag,
319			      role_t role);
320#ifdef AHC_DEBUG
321static void	ahc_print_scb(struct scb *scb);
322#endif
323static int	ahc_search_qinfifo(struct ahc_softc *ahc, int target,
324				   char channel, int lun, u_int tag,
325				   role_t role, uint32_t status,
326				   ahc_search_action action);
327static void	ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim,
328			      union ccb *ccb);
329static int	ahc_reset_channel(struct ahc_softc *ahc, char channel,
330				  int initiate_reset);
331static int	ahc_abort_scbs(struct ahc_softc *ahc, int target,
332			       char channel, int lun, u_int tag, role_t role,
333			       uint32_t status);
334static int	ahc_search_disc_list(struct ahc_softc *ahc, int target,
335				     char channel, int lun, u_int tag,
336				     int stop_on_first, int remove,
337				     int save_state);
338static u_int	ahc_rem_scb_from_disc_list(struct ahc_softc *ahc,
339					   u_int prev, u_int scbptr);
340static void	ahc_add_curscb_to_free_list(struct ahc_softc *ahc);
341static void	ahc_clear_intstat(struct ahc_softc *ahc);
342static void	ahc_reset_current_bus(struct ahc_softc *ahc);
343static struct ahc_syncrate *
344		ahc_devlimited_syncrate(struct ahc_softc *ahc, u_int *period,
345					u_int *ppr_options);
346static struct ahc_syncrate *
347		ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
348				  u_int *ppr_options, u_int maxsync);
349static u_int	ahc_find_period(struct ahc_softc *ahc, u_int scsirate,
350				u_int maxsync);
351static void	ahc_validate_offset(struct ahc_softc *ahc,
352				    struct ahc_syncrate *syncrate,
353				    u_int *offset, int wide);
354static void	ahc_validate_width(struct ahc_softc *ahc, u_int *bus_width);
355static void	ahc_update_target_msg_request(struct ahc_softc *ahc,
356					      struct ahc_devinfo *devinfo,
357					      struct ahc_initiator_tinfo *tinfo,
358					      int force, int paused);
359static int	ahc_create_path(struct ahc_softc *ahc,
360				struct ahc_devinfo *devinfo,
361				struct cam_path **path);
362static void	ahc_set_syncrate(struct ahc_softc *ahc,
363				 struct ahc_devinfo *devinfo,
364				 struct cam_path *path,
365				 struct ahc_syncrate *syncrate,
366				 u_int period, u_int offset,
367				 u_int ppr_options, u_int type,
368				 int paused);
369static void	ahc_set_width(struct ahc_softc *ahc,
370			      struct ahc_devinfo *devinfo,
371			      struct cam_path *path, u_int width, u_int type,
372			      int paused);
373static void	ahc_set_tags(struct ahc_softc *ahc,
374			     struct ahc_devinfo *devinfo,
375			     int enable);
376static void	ahc_construct_sdtr(struct ahc_softc *ahc,
377				   u_int period, u_int offset);
378
379static void	ahc_construct_wdtr(struct ahc_softc *ahc, u_int bus_width);
380static void	ahc_construct_ppr(struct ahc_softc *ahc, u_int period,
381				  u_int offset, u_int bus_width,
382				  u_int ppr_options);
383
384static __inline int ahc_check_residual(struct scb *scb);
385static void	ahc_calc_residual(struct scb *scb);
386
387static void	ahc_update_pending_syncrates(struct ahc_softc *ahc);
388
389static void	ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
390
391static timeout_t
392		ahc_timeout;
393static void	ahc_queue_lstate_event(struct ahc_softc *ahc,
394				       struct tmode_lstate *lstate,
395				       u_int initiator_id, u_int event_type,
396				       u_int event_arg);
397static void	ahc_send_lstate_events(struct ahc_softc *ahc,
398				       struct tmode_lstate *lstate);
399static void	restart_sequencer(struct ahc_softc *ahc);
400static u_int	ahc_index_busy_tcl(struct ahc_softc *ahc,
401					 u_int tcl, int unbusy);
402
403static __inline void	   ahc_freeze_ccb(union ccb* ccb);
404static __inline cam_status ahc_ccb_status(union ccb* ccb);
405static __inline void	   ahcsetccbstatus(union ccb* ccb,
406					   cam_status status);
407static void		   ahc_run_untagged_queues(struct ahc_softc *);
408static void		   ahc_run_untagged_queue(struct ahc_softc *,
409						  struct scb_tailq *);
410static void		   ahc_run_tqinfifo(struct ahc_softc *ahc, int paused);
411static void		   ahc_run_qoutfifo(struct ahc_softc *ahc);
412
413static __inline struct ahc_initiator_tinfo *
414			   ahc_fetch_transinfo(struct ahc_softc *ahc,
415					       char channel,
416					       u_int our_id, u_int target,
417					       struct tmode_tstate **tstate);
418static __inline struct ahc_dma_seg *
419			    ahc_sg_bus_to_virt(struct scb *scb,
420					       uint32_t sg_busaddr);
421static __inline uint32_t
422			    ahc_sg_virt_to_bus(struct scb *scb,
423					       struct ahc_dma_seg *sg);
424static __inline void	    ahc_queue_scb(struct ahc_softc *ahc,
425					  struct scb *scb);
426static void	   ahcfreescb(struct ahc_softc *ahc, struct scb *scb);
427static __inline	struct scb *ahcgetscb(struct ahc_softc *ahc);
428static __inline void	    ahc_freeze_untagged_queues(struct ahc_softc *ahc);
429static __inline void	    ahc_release_untagged_queues(struct ahc_softc *ahc);
430
431static __inline uint32_t
432ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
433{
434	return (ahc->scb_data->hscb_busaddr
435		+ (sizeof(struct hardware_scb) * index));
436}
437
438#define AHC_BUSRESET_DELAY	250	/* Reset delay in us */
439
440/*
441 * Restart the sequencer program from address zero
442 */
443static void
444restart_sequencer(struct ahc_softc *ahc)
445{
446	u_int i;
447
448	pause_sequencer(ahc);
449
450	/*
451	 * Everytime we restart the sequencer, there
452	 * is the possiblitity that we have restarted
453	 * within a three instruction window where an
454	 * SCB has been marked free but has not made it
455	 * onto the free list.  Since SCSI events(bus reset,
456	 * unexpected bus free) will always freeze the
457	 * sequencer, we cannot close this window.  To
458	 * avoid losing an SCB, we reconsitute the free
459	 * list every time we restart the sequencer.
460	 */
461	ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL);
462	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
463
464		ahc_outb(ahc, SCBPTR, i);
465		if (ahc_inb(ahc, SCB_TAG) == SCB_LIST_NULL) {
466			ahc_add_curscb_to_free_list(ahc);
467		}
468	}
469	ahc_outb(ahc, SEQCTL, FASTMODE|SEQRESET);
470	unpause_sequencer(ahc);
471}
472
473static u_int
474ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl, int unbusy)
475{
476	u_int scbid;
477	u_int target_offset;
478
479	if ((ahc->features & AHC_SCB_BTT) != 0) {
480		u_int saved_scbptr;
481
482		saved_scbptr = ahc_inb(ahc, SCBPTR);
483		ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
484		scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl));
485		if (unbusy)
486			ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl),
487				 SCB_LIST_NULL);
488		ahc_outb(ahc, SCBPTR, saved_scbptr);
489	} else {
490		target_offset = TCL_TARGET_OFFSET(tcl);
491		scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset);
492		if (unbusy)
493			ahc_outb(ahc, BUSY_TARGETS + target_offset,
494				 SCB_LIST_NULL);
495	}
496
497	return (scbid);
498}
499
500static __inline int
501ahc_check_residual(struct scb *scb)
502{
503	struct status_pkt *sp;
504
505	sp = &scb->hscb->shared_data.status;
506	if ((scb->hscb->sgptr & SG_RESID_VALID) != 0)
507		return (1);
508	return (0);
509}
510
511static __inline void
512ahc_freeze_ccb(union ccb* ccb)
513{
514	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
515		ccb->ccb_h.status |= CAM_DEV_QFRZN;
516		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
517	}
518}
519
520static __inline cam_status
521ahc_ccb_status(union ccb* ccb)
522{
523	return (ccb->ccb_h.status & CAM_STATUS_MASK);
524}
525
526static __inline void
527ahcsetccbstatus(union ccb* ccb, cam_status status)
528{
529	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
530	ccb->ccb_h.status |= status;
531}
532
533static __inline struct ahc_initiator_tinfo *
534ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
535		    u_int remote_id, struct tmode_tstate **tstate)
536{
537	/*
538	 * Transfer data structures are stored from the perspective
539	 * of the target role.  Since the parameters for a connection
540	 * in the initiator role to a given target are the same as
541	 * when the roles are reversed, we pretend we are the target.
542	 */
543	if (channel == 'B')
544		our_id += 8;
545	*tstate = ahc->enabled_targets[our_id];
546	return (&(*tstate)->transinfo[remote_id]);
547}
548
549static __inline struct ahc_dma_seg *
550ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
551{
552	int sg_index;
553
554	sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
555	/* sg_list_phys points to entry 1, not 0 */
556	sg_index++;
557
558	return (&scb->sg_list[sg_index]);
559}
560
561static __inline uint32_t
562ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
563{
564	int sg_index;
565
566	/* sg_list_phys points to entry 1, not 0 */
567	sg_index = sg - &scb->sg_list[1];
568
569	return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
570}
571
572static __inline void
573ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
574{
575	ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
576	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
577		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
578	} else {
579		pause_sequencer(ahc);
580		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
581		unpause_sequencer(ahc);
582	}
583}
584
585static __inline void
586ahc_freeze_untagged_queues(struct ahc_softc *ahc)
587{
588	if ((ahc->features & AHC_SCB_BTT) == 0)
589		ahc->untagged_queue_lock++;
590}
591
592static __inline void
593ahc_release_untagged_queues(struct ahc_softc *ahc)
594{
595	if ((ahc->features & AHC_SCB_BTT) == 0) {
596		ahc->untagged_queue_lock--;
597		if (ahc->untagged_queue_lock == 0)
598			ahc_run_untagged_queues(ahc);
599	}
600}
601
602static void
603ahc_run_untagged_queues(struct ahc_softc *ahc)
604{
605	int i;
606
607	for (i = 0; i < 16; i++)
608		ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
609}
610
611static void
612ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
613{
614	struct scb *scb;
615
616	if (ahc->untagged_queue_lock != 0)
617		return;
618
619	if ((scb = TAILQ_FIRST(queue)) != NULL
620	 && (scb->flags & SCB_ACTIVE) == 0) {
621		scb->flags |= SCB_ACTIVE;
622		ahc_queue_scb(ahc, scb);
623	}
624}
625
626static void
627ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
628{
629	struct target_cmd *cmd;
630
631	while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) {
632
633		/*
634		 * Only advance through the queue if we
635		 * have the resources to process the command.
636		 */
637		if (ahc_handle_target_cmd(ahc, cmd) != 0)
638			break;
639
640		ahc->tqinfifonext++;
641		cmd->cmd_valid = 0;
642
643		/*
644		 * Lazily update our position in the target mode incomming
645		 * command queue as seen by the sequencer.
646		 */
647		if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
648			if ((ahc->features & AHC_HS_MAILBOX) != 0) {
649				u_int hs_mailbox;
650
651				hs_mailbox = ahc_inb(ahc, HS_MAILBOX);
652				hs_mailbox &= ~HOST_TQINPOS;
653				hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS;
654				ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
655			} else {
656				if (!paused)
657					pause_sequencer(ahc);
658				ahc_outb(ahc, KERNEL_TQINPOS,
659					 ahc->tqinfifonext & HOST_TQINPOS);
660				if (!paused)
661				unpause_sequencer(ahc);
662			}
663		}
664	}
665}
666
667static void
668ahc_run_qoutfifo(struct ahc_softc *ahc)
669{
670	struct scb *scb;
671	u_int  scb_index;
672
673	while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) {
674		scb_index = ahc->qoutfifo[ahc->qoutfifonext];
675		ahc->qoutfifo[ahc->qoutfifonext++] = SCB_LIST_NULL;
676
677		scb = &ahc->scb_data->scbarray[scb_index];
678		if (scb_index >= ahc->scb_data->numscbs
679		  || (scb->flags & SCB_ACTIVE) == 0) {
680			printf("%s: WARNING no command for scb %d "
681			       "(cmdcmplt)\nQOUTPOS = %d\n",
682			       ahc_name(ahc), scb_index,
683			       ahc->qoutfifonext - 1);
684			continue;
685		}
686
687		/*
688		 * Save off the residual
689		 * if there is one.
690		 */
691		if (ahc_check_residual(scb) != 0)
692			ahc_calc_residual(scb);
693		else
694			scb->ccb->csio.resid = 0;
695		ahc_done(ahc, scb);
696	}
697}
698
699
700/*
701 * Return an SCB resource to the free list.
702 */
703static void
704ahcfreescb(struct ahc_softc *ahc, struct scb *scb)
705{
706	struct hardware_scb *hscb;
707	int opri;
708
709	hscb = scb->hscb;
710
711	opri = splcam();
712
713	if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0
714	 && (scb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
715		scb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
716		ahc->flags &= ~AHC_RESOURCE_SHORTAGE;
717	}
718
719	/* Clean up for the next user */
720	scb->flags = SCB_FREE;
721	hscb->control = 0;
722
723	SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
724	splx(opri);
725}
726
727/*
728 * Get a free scb. If there are none, see if we can allocate a new SCB.
729 */
730static __inline struct scb *
731ahcgetscb(struct ahc_softc *ahc)
732{
733	struct scb *scbp;
734	int opri;
735
736	opri = splcam();
737	if ((scbp = SLIST_FIRST(&ahc->scb_data->free_scbs))) {
738		SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
739	} else {
740		ahcallocscbs(ahc);
741		scbp = SLIST_FIRST(&ahc->scb_data->free_scbs);
742		if (scbp != NULL)
743			SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
744	}
745
746	splx(opri);
747
748	return (scbp);
749}
750
751char *
752ahc_name(struct ahc_softc *ahc)
753{
754	static char name[10];
755
756	snprintf(name, sizeof(name), "ahc%d", ahc->unit);
757	return (name);
758}
759
760#ifdef  AHC_DEBUG
761static void
762ahc_print_scb(struct scb *scb)
763{
764	int i;
765
766	struct hardware_scb *hscb = scb->hscb;
767
768	printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
769	       scb,
770	       hscb->control,
771	       hscb->scsiid,
772	       hscb->lun,
773	       hscb->cdb_len);
774	i = 0;
775	printf("Shared Data: %#02x %#02x %#02x %#02x\n",
776	       hscb->shared_data.cdb[i++],
777	       hscb->shared_data.cdb[i++],
778	       hscb->shared_data.cdb[i++],
779	       hscb->shared_data.cdb[i++]);
780	printf("             %#02x %#02x %#02x %#02x\n",
781	       hscb->shared_data.cdb[i++],
782	       hscb->shared_data.cdb[i++],
783	       hscb->shared_data.cdb[i++],
784	       hscb->shared_data.cdb[i++]);
785	printf("             %#02x %#02x %#02x %#02x\n",
786	       hscb->shared_data.cdb[i++],
787	       hscb->shared_data.cdb[i++],
788	       hscb->shared_data.cdb[i++],
789	       hscb->shared_data.cdb[i++]);
790	printf("        dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n",
791		hscb->dataptr,
792		hscb->datacnt,
793		hscb->sgptr,
794		hscb->tag);
795	if (scb->sg_count > 0) {
796		for (i = 0; i < scb->sg_count; i++) {
797			printf("sg[%d] - Addr 0x%x : Length %d\n",
798			       i,
799			       scb->sg_list[i].addr,
800			       scb->sg_list[i].len);
801		}
802	}
803}
804#endif
805
806static struct {
807        uint8_t errno;
808	char *errmesg;
809} hard_error[] = {
810	{ ILLHADDR,	"Illegal Host Access" },
811	{ ILLSADDR,	"Illegal Sequencer Address referrenced" },
812	{ ILLOPCODE,	"Illegal Opcode in sequencer program" },
813	{ SQPARERR,	"Sequencer Parity Error" },
814	{ DPARERR,	"Data-path Parity Error" },
815	{ MPARERR,	"Scratch or SCB Memory Parity Error" },
816	{ PCIERRSTAT,	"PCI Error detected" },
817	{ CIOPARERR,	"CIOBUS Parity Error" },
818};
819static const int num_errors = sizeof(hard_error)/sizeof(hard_error[0]);
820
821static struct {
822        uint8_t phase;
823        uint8_t mesg_out; /* Message response to parity errors */
824	char *phasemsg;
825} phase_table[] = {
826	{ P_DATAOUT,	MSG_NOOP,		"in Data-out phase"	},
827	{ P_DATAIN,	MSG_INITIATOR_DET_ERR,	"in Data-in phase"	},
828	{ P_COMMAND,	MSG_NOOP,		"in Command phase"	},
829	{ P_MESGOUT,	MSG_NOOP,		"in Message-out phase"	},
830	{ P_STATUS,	MSG_INITIATOR_DET_ERR,	"in Status phase"	},
831	{ P_MESGIN,	MSG_PARITY_ERROR,	"in Message-in phase"	},
832	{ P_BUSFREE,	MSG_NOOP,		"while idle"		},
833	{ 0,		MSG_NOOP,		"in unknown phase"	}
834};
835static const u_int num_phases =
836    (sizeof(phase_table)/sizeof(phase_table[0])) - 1;
837
838/*
839 * Valid SCSIRATE values.  (p. 3-17)
840 * Provides a mapping of tranfer periods in ns to the proper value to
841 * stick in the scsiscfr reg to use that transfer rate.
842 */
843#define AHC_SYNCRATE_DT		0
844#define AHC_SYNCRATE_ULTRA2	1
845#define AHC_SYNCRATE_ULTRA	3
846#define AHC_SYNCRATE_FAST	6
847static struct ahc_syncrate ahc_syncrates[] = {
848      /* ultra2    fast/ultra  period     rate */
849	{ 0x42,      0x000,      9,      "80.0" },
850	{ 0x03,      0x000,     10,      "40.0" },
851	{ 0x04,      0x000,     11,      "33.0" },
852	{ 0x05,      0x100,     12,      "20.0" },
853	{ 0x06,      0x110,     15,      "16.0" },
854	{ 0x07,      0x120,     18,      "13.4" },
855	{ 0x08,      0x000,     25,      "10.0" },
856	{ 0x19,      0x010,     31,      "8.0"  },
857	{ 0x1a,      0x020,     37,      "6.67" },
858	{ 0x1b,      0x030,     43,      "5.7"  },
859	{ 0x1c,      0x040,     50,      "5.0"  },
860	{ 0x00,      0x050,     56,      "4.4"  },
861	{ 0x00,      0x060,     62,      "4.0"  },
862	{ 0x00,      0x070,     68,      "3.6"  },
863	{ 0x00,      0x000,      0,      NULL   }
864};
865
866void
867ahc_init_probe_config(struct ahc_probe_config *probe_config)
868{
869	probe_config->description = NULL;
870	probe_config->channel = 'A';
871	probe_config->channel_b = 'B';
872	probe_config->chip = AHC_NONE;
873	probe_config->features = AHC_FENONE;
874	probe_config->bugs = AHC_BUGNONE;
875	probe_config->flags = AHC_FNONE;
876}
877
878/*
879 * Allocate a controller structure for a new device and initialize it.
880 */
881struct ahc_softc *
882ahc_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id,
883	  bus_dma_tag_t parent_dmat, struct ahc_probe_config *config,
884	  struct scb_data *scb_data)
885{
886	/*
887	 * find unit and check we have that many defined
888	 */
889	struct  ahc_softc *ahc;
890	size_t	alloc_size;
891	int	i;
892
893	/*
894	 * Allocate a storage area for us.
895	 */
896	if (scb_data == NULL)
897		/*
898		 * We are not sharing SCB space with another controller
899		 * so allocate our own SCB data space.
900		 */
901		alloc_size = sizeof(struct full_ahc_softc);
902	else
903		alloc_size = sizeof(struct ahc_softc);
904	ahc = malloc(alloc_size, M_DEVBUF, M_NOWAIT);
905	if (!ahc) {
906		device_printf(dev, "cannot malloc softc!\n");
907		return NULL;
908	}
909	bzero(ahc, alloc_size);
910	LIST_INIT(&ahc->pending_ccbs);
911	ahc->device = dev;
912	ahc->unit = device_get_unit(dev);
913	ahc->regs_res_type = regs_type;
914	ahc->regs_res_id = regs_id;
915	ahc->regs = regs;
916	ahc->tag = rman_get_bustag(regs);
917	ahc->bsh = rman_get_bushandle(regs);
918	ahc->parent_dmat = parent_dmat;
919	ahc->chip = config->chip;
920	ahc->features = config->features;
921	ahc->bugs = config->bugs;
922	ahc->flags = config->flags;
923	ahc->channel = config->channel;
924	for (i = 0; i < 16; i++)
925		TAILQ_INIT(&ahc->untagged_queues[i]);
926
927	if (scb_data == NULL) {
928		struct full_ahc_softc* full_softc = (struct full_ahc_softc*)ahc;
929		ahc->scb_data = &full_softc->scb_data_storage;
930	} else
931		ahc->scb_data = scb_data;
932
933	ahc->unpause = (ahc_inb(ahc, HCNTRL) & IRQMS) | INTEN;
934	/* The IRQMS bit is only valid on VL and EISA chips */
935	if ((ahc->chip & AHC_PCI) != 0)
936		ahc->unpause &= ~IRQMS;
937	ahc->pause = ahc->unpause | PAUSE;
938	return (ahc);
939}
940
941void
942ahc_free(ahc)
943	struct ahc_softc *ahc;
944{
945	ahcfiniscbdata(ahc);
946	switch (ahc->init_level) {
947	case 3:
948		bus_dmamap_unload(ahc->shared_data_dmat,
949				  ahc->shared_data_dmamap);
950	case 2:
951		bus_dmamem_free(ahc->shared_data_dmat, ahc->qoutfifo,
952				ahc->shared_data_dmamap);
953		bus_dmamap_destroy(ahc->shared_data_dmat,
954				   ahc->shared_data_dmamap);
955	case 1:
956		bus_dma_tag_destroy(ahc->buffer_dmat);
957		break;
958	}
959
960	if (ahc->regs != NULL)
961		bus_release_resource(ahc->device, ahc->regs_res_type,
962				     ahc->regs_res_id, ahc->regs);
963	if (ahc->irq != NULL)
964		bus_release_resource(ahc->device, ahc->irq_res_type,
965				     0, ahc->irq);
966
967	free(ahc, M_DEVBUF);
968	return;
969}
970
971static int
972ahcinitscbdata(struct ahc_softc *ahc)
973{
974	struct scb_data *scb_data;
975	int i;
976
977	scb_data = ahc->scb_data;
978	SLIST_INIT(&scb_data->free_scbs);
979	SLIST_INIT(&scb_data->sg_maps);
980
981	/* Allocate SCB resources */
982	scb_data->scbarray =
983	    (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX,
984				 M_DEVBUF, M_NOWAIT);
985	if (scb_data->scbarray == NULL)
986		return (ENOMEM);
987	bzero(scb_data->scbarray, sizeof(struct scb) * AHC_SCB_MAX);
988
989	/* Determine the number of hardware SCBs and initialize them */
990
991	scb_data->maxhscbs = ahc_probe_scbs(ahc);
992	/* SCB 0 heads the free list */
993	ahc_outb(ahc, FREE_SCBH, 0);
994	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
995		ahc_outb(ahc, SCBPTR, i);
996
997		/* Clear the control byte. */
998		ahc_outb(ahc, SCB_CONTROL, 0);
999
1000		/* Set the next pointer */
1001		ahc_outb(ahc, SCB_NEXT, i+1);
1002
1003		/* Make the tag number invalid */
1004		ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
1005	}
1006
1007	/* Make sure that the last SCB terminates the free list */
1008	ahc_outb(ahc, SCBPTR, i-1);
1009	ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
1010
1011	/* Ensure we clear the 0 SCB's control byte. */
1012	ahc_outb(ahc, SCBPTR, 0);
1013	ahc_outb(ahc, SCB_CONTROL, 0);
1014
1015	scb_data->maxhscbs = i;
1016
1017	if (ahc->scb_data->maxhscbs == 0)
1018		panic("%s: No SCB space found", ahc_name(ahc));
1019
1020	/*
1021	 * Create our DMA tags.  These tags define the kinds of device
1022	 * accessible memory allocations and memory mappings we will
1023	 * need to perform during normal operation.
1024	 *
1025	 * Unless we need to further restrict the allocation, we rely
1026	 * on the restrictions of the parent dmat, hence the common
1027	 * use of MAXADDR and MAXSIZE.
1028	 */
1029
1030	/* DMA tag for our hardware scb structures */
1031	if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0,
1032			       /*lowaddr*/BUS_SPACE_MAXADDR,
1033			       /*highaddr*/BUS_SPACE_MAXADDR,
1034			       /*filter*/NULL, /*filterarg*/NULL,
1035			       AHC_SCB_MAX * sizeof(struct hardware_scb),
1036			       /*nsegments*/1,
1037			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1038			       /*flags*/0, &scb_data->hscb_dmat) != 0) {
1039		goto error_exit;
1040	}
1041
1042	scb_data->init_level++;
1043
1044	/* Allocation for our ccbs */
1045	if (bus_dmamem_alloc(scb_data->hscb_dmat, (void **)&scb_data->hscbs,
1046			     BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) {
1047		goto error_exit;
1048	}
1049
1050	scb_data->init_level++;
1051
1052	/* And permanently map them */
1053	bus_dmamap_load(scb_data->hscb_dmat, scb_data->hscb_dmamap,
1054			scb_data->hscbs,
1055			AHC_SCB_MAX * sizeof(struct hardware_scb),
1056			ahcdmamapcb, &scb_data->hscb_busaddr, /*flags*/0);
1057
1058	scb_data->init_level++;
1059
1060	/* DMA tag for our sense buffers */
1061	if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0,
1062			       /*lowaddr*/BUS_SPACE_MAXADDR,
1063			       /*highaddr*/BUS_SPACE_MAXADDR,
1064			       /*filter*/NULL, /*filterarg*/NULL,
1065			       AHC_SCB_MAX * sizeof(struct scsi_sense_data),
1066			       /*nsegments*/1,
1067			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1068			       /*flags*/0, &scb_data->sense_dmat) != 0) {
1069		goto error_exit;
1070	}
1071
1072	scb_data->init_level++;
1073
1074	/* Allocate them */
1075	if (bus_dmamem_alloc(scb_data->sense_dmat, (void **)&scb_data->sense,
1076			     BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) {
1077		goto error_exit;
1078	}
1079
1080	scb_data->init_level++;
1081
1082	/* And permanently map them */
1083	bus_dmamap_load(scb_data->sense_dmat, scb_data->sense_dmamap,
1084			scb_data->sense,
1085			AHC_SCB_MAX * sizeof(struct scsi_sense_data),
1086			ahcdmamapcb, &scb_data->sense_busaddr, /*flags*/0);
1087
1088	scb_data->init_level++;
1089
1090	/* DMA tag for our S/G structures.  We allocate in page sized chunks */
1091	if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0,
1092			       /*lowaddr*/BUS_SPACE_MAXADDR,
1093			       /*highaddr*/BUS_SPACE_MAXADDR,
1094			       /*filter*/NULL, /*filterarg*/NULL,
1095			       PAGE_SIZE, /*nsegments*/1,
1096			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1097			       /*flags*/0, &scb_data->sg_dmat) != 0) {
1098		goto error_exit;
1099	}
1100
1101        scb_data->init_level++;
1102
1103	/* Perform initial CCB allocation */
1104	bzero(scb_data->hscbs, AHC_SCB_MAX * sizeof(struct hardware_scb));
1105	ahcallocscbs(ahc);
1106
1107	if (scb_data->numscbs == 0) {
1108		printf("%s: ahc_init_scb_data - "
1109		       "Unable to allocate initial scbs\n",
1110		       ahc_name(ahc));
1111		goto error_exit;
1112	}
1113
1114	/*
1115         * Note that we were successfull
1116         */
1117        return 0;
1118
1119error_exit:
1120
1121	return ENOMEM;
1122}
1123
1124static void
1125ahcfiniscbdata(struct ahc_softc *ahc)
1126{
1127	struct scb_data *scb_data;
1128
1129	scb_data = ahc->scb_data;
1130
1131	switch (scb_data->init_level) {
1132	default:
1133	case 7:
1134	{
1135		struct sg_map_node *sg_map;
1136
1137		while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) {
1138			SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
1139			bus_dmamap_unload(scb_data->sg_dmat,
1140					  sg_map->sg_dmamap);
1141			bus_dmamem_free(scb_data->sg_dmat, sg_map->sg_vaddr,
1142					sg_map->sg_dmamap);
1143			free(sg_map, M_DEVBUF);
1144		}
1145		bus_dma_tag_destroy(scb_data->sg_dmat);
1146	}
1147	case 6:
1148		bus_dmamap_unload(scb_data->sense_dmat,
1149				  scb_data->sense_dmamap);
1150	case 5:
1151		bus_dmamem_free(scb_data->sense_dmat, scb_data->sense,
1152				scb_data->sense_dmamap);
1153		bus_dmamap_destroy(scb_data->sense_dmat,
1154				   scb_data->sense_dmamap);
1155	case 4:
1156		bus_dma_tag_destroy(scb_data->sense_dmat);
1157	case 3:
1158		bus_dmamap_unload(scb_data->hscb_dmat, scb_data->hscb_dmamap);
1159	case 2:
1160		bus_dmamem_free(scb_data->hscb_dmat, scb_data->hscbs,
1161				scb_data->hscb_dmamap);
1162		bus_dmamap_destroy(scb_data->hscb_dmat, scb_data->hscb_dmamap);
1163	case 1:
1164		bus_dma_tag_destroy(scb_data->hscb_dmat);
1165		break;
1166	}
1167	if (scb_data->scbarray != NULL)
1168		free(scb_data->scbarray, M_DEVBUF);
1169}
1170
1171static void
1172ahcdmamapcb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1173{
1174	bus_addr_t *baddr;
1175
1176	baddr = (bus_addr_t *)arg;
1177	*baddr = segs->ds_addr;
1178}
1179
1180int
1181ahc_reset(struct ahc_softc *ahc)
1182{
1183	u_int	sblkctl;
1184	u_int	sxfrctl1;
1185	int	wait;
1186
1187#ifdef AHC_DUMP_SEQ
1188	if (ahc->init_level == 0)
1189		ahc_dumpseq(ahc);
1190#endif
1191
1192	/* Cache STPWEN.  It is cleared by a chip reset */
1193	pause_sequencer(ahc);
1194	sxfrctl1 = ahc_inb(ahc, SXFRCTL1) & STPWEN;
1195	ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause);
1196	/*
1197	 * Ensure that the reset has finished
1198	 */
1199	wait = 1000;
1200	do {
1201		DELAY(1000);
1202	} while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK));
1203
1204	if (wait == 0) {
1205		printf("%s: WARNING - Failed chip reset!  "
1206		       "Trying to initialize anyway.\n", ahc_name(ahc));
1207	}
1208	ahc_outb(ahc, HCNTRL, ahc->pause);
1209	/*
1210	 * Reload sxfrctl1 with the cached value of STPWEN
1211	 * to minimize the amount of time our terminators
1212	 * are disabled.  If a BIOS has initialized the chip,
1213	 * then sxfrctl1 will have the correct value.  If
1214	 * not, STPWEN will be false (the value after a POST)
1215	 * and this action will be harmless.
1216	 *
1217	 * We must always initialize STPWEN to 1 before we
1218	 * restore the saved value.  STPWEN is initialized
1219	 * to a tri-state condition which is only be cleared
1220	 * by turning it on.
1221	 */
1222	ahc_outb(ahc, SXFRCTL1, sxfrctl1|STPWEN);
1223	ahc_outb(ahc, SXFRCTL1, sxfrctl1);
1224
1225	/* Determine channel configuration */
1226	sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE);
1227	/* No Twin Channel PCI cards */
1228	if ((ahc->chip & AHC_PCI) != 0)
1229		sblkctl &= ~SELBUSB;
1230	switch (sblkctl) {
1231	case 0:
1232		/* Single Narrow Channel */
1233		break;
1234	case 2:
1235		/* Wide Channel */
1236		ahc->features |= AHC_WIDE;
1237		break;
1238	case 8:
1239		/* Twin Channel */
1240		ahc->features |= AHC_TWIN;
1241		break;
1242	default:
1243		printf(" Unsupported adapter type.  Ignoring\n");
1244		return(-1);
1245	}
1246
1247	return (0);
1248}
1249
1250/*
1251 * Called when we have an active connection to a target on the bus,
1252 * this function finds the nearest syncrate to the input period limited
1253 * by the capabilities of the bus connectivity of the target.
1254 */
1255static struct ahc_syncrate *
1256ahc_devlimited_syncrate(struct ahc_softc *ahc, u_int *period,
1257			u_int *ppr_options) {
1258	u_int	maxsync;
1259
1260	if ((ahc->features & AHC_ULTRA2) != 0) {
1261		if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0
1262		 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) {
1263			maxsync = AHC_SYNCRATE_DT;
1264		} else {
1265			maxsync = AHC_SYNCRATE_ULTRA;
1266			/* Can't do DT on an SE bus */
1267			*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1268		}
1269	} else if ((ahc->features & AHC_ULTRA) != 0) {
1270		maxsync = AHC_SYNCRATE_ULTRA;
1271	} else {
1272		maxsync = AHC_SYNCRATE_FAST;
1273	}
1274	return (ahc_find_syncrate(ahc, period, ppr_options, maxsync));
1275}
1276
1277/*
1278 * Look up the valid period to SCSIRATE conversion in our table.
1279 * Return the period and offset that should be sent to the target
1280 * if this was the beginning of an SDTR.
1281 */
1282static struct ahc_syncrate *
1283ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1284		  u_int *ppr_options, u_int maxsync)
1285{
1286	struct ahc_syncrate *syncrate;
1287
1288	if ((ahc->features & AHC_DT) == 0)
1289		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1290
1291	for (syncrate = &ahc_syncrates[maxsync];
1292	     syncrate->rate != NULL;
1293	     syncrate++) {
1294
1295		/*
1296		 * The Ultra2 table doesn't go as low
1297		 * as for the Fast/Ultra cards.
1298		 */
1299		if ((ahc->features & AHC_ULTRA2) != 0
1300		 && (syncrate->sxfr_u2 == 0))
1301			break;
1302
1303		/* Skip any DT entries if DT is not available */
1304		if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
1305		 && (syncrate->sxfr_u2 & DT_SXFR) != 0)
1306			continue;
1307
1308		if (*period <= syncrate->period) {
1309			/*
1310			 * When responding to a target that requests
1311			 * sync, the requested rate may fall between
1312			 * two rates that we can output, but still be
1313			 * a rate that we can receive.  Because of this,
1314			 * we want to respond to the target with
1315			 * the same rate that it sent to us even
1316			 * if the period we use to send data to it
1317			 * is lower.  Only lower the response period
1318			 * if we must.
1319			 */
1320			if (syncrate == &ahc_syncrates[maxsync])
1321				*period = syncrate->period;
1322
1323			/*
1324			 * At some speeds, we only support
1325			 * ST transfers.
1326			 */
1327		 	if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
1328				*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1329			break;
1330		}
1331	}
1332
1333	if ((*period == 0)
1334	 || (syncrate->rate == NULL)
1335	 || ((ahc->features & AHC_ULTRA2) != 0
1336	  && (syncrate->sxfr_u2 == 0))) {
1337		/* Use asynchronous transfers. */
1338		*period = 0;
1339		syncrate = NULL;
1340		*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1341	}
1342	return (syncrate);
1343}
1344
1345static u_int
1346ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
1347{
1348	struct ahc_syncrate *syncrate;
1349
1350	if ((ahc->features & AHC_ULTRA2) != 0)
1351		scsirate &= SXFR_ULTRA2;
1352	else
1353		scsirate &= SXFR;
1354
1355	syncrate = &ahc_syncrates[maxsync];
1356	while (syncrate->rate != NULL) {
1357
1358		if ((ahc->features & AHC_ULTRA2) != 0) {
1359			if (syncrate->sxfr_u2 == 0)
1360				break;
1361			else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2))
1362				return (syncrate->period);
1363		} else if (scsirate == (syncrate->sxfr & SXFR)) {
1364				return (syncrate->period);
1365		}
1366		syncrate++;
1367	}
1368	return (0); /* async */
1369}
1370
1371static void
1372ahc_validate_offset(struct ahc_softc *ahc, struct ahc_syncrate *syncrate,
1373		    u_int *offset, int wide)
1374{
1375	u_int maxoffset;
1376
1377	/* Limit offset to what we can do */
1378	if (syncrate == NULL) {
1379		maxoffset = 0;
1380	} else if ((ahc->features & AHC_ULTRA2) != 0) {
1381		maxoffset = MAX_OFFSET_ULTRA2;
1382	} else {
1383		if (wide)
1384			maxoffset = MAX_OFFSET_16BIT;
1385		else
1386			maxoffset = MAX_OFFSET_8BIT;
1387	}
1388	*offset = MIN(*offset, maxoffset);
1389}
1390
1391
1392static void
1393ahc_validate_width(struct ahc_softc *ahc, u_int *bus_width)
1394{
1395	switch (*bus_width) {
1396	default:
1397		if (ahc->features & AHC_WIDE) {
1398			/* Respond Wide */
1399			*bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1400			break;
1401		}
1402		/* FALLTHROUGH */
1403	case MSG_EXT_WDTR_BUS_8_BIT:
1404		bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1405		break;
1406	}
1407}
1408
1409static void
1410ahc_update_target_msg_request(struct ahc_softc *ahc,
1411			      struct ahc_devinfo *devinfo,
1412			      struct ahc_initiator_tinfo *tinfo,
1413			      int force, int paused)
1414{
1415	u_int targ_msg_req_orig;
1416
1417	targ_msg_req_orig = ahc->targ_msg_req;
1418	if (tinfo->current.period != tinfo->goal.period
1419	 || tinfo->current.width != tinfo->goal.width
1420	 || tinfo->current.offset != tinfo->goal.offset
1421	 || (force
1422	  && (tinfo->goal.period != 0
1423	   || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT)))
1424		ahc->targ_msg_req |= devinfo->target_mask;
1425	else
1426		ahc->targ_msg_req &= ~devinfo->target_mask;
1427
1428	if (ahc->targ_msg_req != targ_msg_req_orig) {
1429		/* Update the message request bit for this target */
1430		if (!paused)
1431			pause_sequencer(ahc);
1432
1433		ahc_outb(ahc, TARGET_MSG_REQUEST,
1434			 ahc->targ_msg_req & 0xFF);
1435		ahc_outb(ahc, TARGET_MSG_REQUEST + 1,
1436			 (ahc->targ_msg_req >> 8) & 0xFF);
1437
1438		if (!paused)
1439			unpause_sequencer(ahc);
1440	}
1441}
1442
1443static int
1444ahc_create_path(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1445		     struct cam_path **path)
1446{
1447	path_id_t path_id;
1448
1449	if (devinfo->channel == 'B')
1450		path_id = cam_sim_path(ahc->sim_b);
1451	else
1452		path_id = cam_sim_path(ahc->sim);
1453
1454	return (xpt_create_path(path, /*periph*/NULL,
1455				path_id, devinfo->target,
1456				devinfo->lun));
1457}
1458
1459static void
1460ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1461		 struct cam_path *path, struct ahc_syncrate *syncrate,
1462		 u_int period, u_int offset, u_int ppr_options,
1463		 u_int type, int paused)
1464{
1465	struct	ahc_initiator_tinfo *tinfo;
1466	struct	tmode_tstate *tstate;
1467	u_int	old_period;
1468	u_int	old_offset;
1469	int	active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1470
1471	if (syncrate == NULL) {
1472		period = 0;
1473		offset = 0;
1474	}
1475
1476	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1477				    devinfo->target, &tstate);
1478	old_period = tinfo->current.period;
1479	old_offset = tinfo->current.offset;
1480
1481	if ((type & AHC_TRANS_CUR) != 0
1482	 && (old_period != period || old_offset != offset)) {
1483		struct	cam_path *path2;
1484		u_int	scsirate;
1485
1486		scsirate = tinfo->scsirate;
1487		if ((ahc->features & AHC_ULTRA2) != 0) {
1488
1489			scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC);
1490			if (syncrate != NULL) {
1491				scsirate |= syncrate->sxfr_u2;
1492				if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0)
1493					scsirate |= ENABLE_CRC;
1494				else
1495					scsirate |= SINGLE_EDGE;
1496			}
1497			if (active)
1498				ahc_outb(ahc, SCSIOFFSET, offset);
1499		} else {
1500
1501			scsirate &= ~(SXFR|SOFS);
1502			/*
1503			 * Ensure Ultra mode is set properly for
1504			 * this target.
1505			 */
1506			tstate->ultraenb &= ~devinfo->target_mask;
1507			if (syncrate != NULL) {
1508				if (syncrate->sxfr & ULTRA_SXFR) {
1509					tstate->ultraenb |=
1510						devinfo->target_mask;
1511				}
1512				scsirate |= syncrate->sxfr & SXFR;
1513				scsirate |= offset & SOFS;
1514			}
1515			if (active) {
1516				u_int sxfrctl0;
1517
1518				sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
1519				sxfrctl0 &= ~FAST20;
1520				if (tstate->ultraenb & devinfo->target_mask)
1521					sxfrctl0 |= FAST20;
1522				ahc_outb(ahc, SXFRCTL0, sxfrctl0);
1523			}
1524		}
1525		if (active)
1526			ahc_outb(ahc, SCSIRATE, scsirate);
1527
1528		tinfo->scsirate = scsirate;
1529		tinfo->current.period = period;
1530		tinfo->current.offset = offset;
1531		tinfo->current.ppr_options = ppr_options;
1532
1533		/* Update the syncrates in any pending scbs */
1534		ahc_update_pending_syncrates(ahc);
1535
1536		/*
1537		 * If possible, tell the SCSI layer about the
1538		 * new transfer parameters.
1539		 */
1540		/* If possible, update the XPT's notion of our transfer rate */
1541		path2 = NULL;
1542		if (path == NULL) {
1543			int error;
1544
1545			error = ahc_create_path(ahc, devinfo, &path2);
1546			if (error == CAM_REQ_CMP)
1547				path = path2;
1548			else
1549				path2 = NULL;
1550		}
1551
1552		if (path != NULL) {
1553			struct	ccb_trans_settings neg;
1554
1555			neg.flags = CCB_TRANS_CURRENT_SETTINGS;
1556			neg.sync_period = period;
1557			neg.sync_offset = offset;
1558			neg.valid = CCB_TRANS_SYNC_RATE_VALID
1559				  | CCB_TRANS_SYNC_OFFSET_VALID;
1560			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1561			xpt_async(AC_TRANSFER_NEG, path, &neg);
1562		}
1563
1564		if (path2 != NULL)
1565			xpt_free_path(path2);
1566
1567		if (bootverbose) {
1568			if (offset != 0) {
1569				printf("%s: target %d synchronous at %sMHz%s, "
1570				       "offset = 0x%x\n", ahc_name(ahc),
1571				       devinfo->target, syncrate->rate,
1572				       (ppr_options & MSG_EXT_PPR_DT_REQ)
1573				       ? " DT" : "", offset);
1574			} else {
1575				printf("%s: target %d using "
1576				       "asynchronous transfers\n",
1577				       ahc_name(ahc), devinfo->target);
1578			}
1579		}
1580	}
1581
1582	if ((type & AHC_TRANS_GOAL) != 0) {
1583		tinfo->goal.period = period;
1584		tinfo->goal.offset = offset;
1585		tinfo->goal.ppr_options = ppr_options;
1586	}
1587
1588	if ((type & AHC_TRANS_USER) != 0) {
1589		tinfo->user.period = period;
1590		tinfo->user.offset = offset;
1591		tinfo->user.ppr_options = ppr_options;
1592	}
1593
1594	ahc_update_target_msg_request(ahc, devinfo, tinfo,
1595				      /*force*/FALSE,
1596				      paused);
1597}
1598
1599static void
1600ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1601	      struct cam_path *path, u_int width, u_int type, int paused)
1602{
1603	struct ahc_initiator_tinfo *tinfo;
1604	struct tmode_tstate *tstate;
1605	u_int  oldwidth;
1606	int    active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1607
1608	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1609				    devinfo->target, &tstate);
1610	oldwidth = tinfo->current.width;
1611
1612	if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) {
1613		struct  cam_path *path2;
1614		u_int	scsirate;
1615
1616		scsirate =  tinfo->scsirate;
1617		scsirate &= ~WIDEXFER;
1618		if (width == MSG_EXT_WDTR_BUS_16_BIT)
1619			scsirate |= WIDEXFER;
1620
1621		tinfo->scsirate = scsirate;
1622
1623		if (active)
1624			ahc_outb(ahc, SCSIRATE, scsirate);
1625
1626		tinfo->current.width = width;
1627
1628		/* If possible, update the XPT's notion of our transfer rate */
1629		path2 = NULL;
1630		if (path == NULL) {
1631			int error;
1632
1633			error = ahc_create_path(ahc, devinfo, &path2);
1634			if (error == CAM_REQ_CMP)
1635				path = path2;
1636			else
1637				path2 = NULL;
1638		}
1639
1640		if (path != NULL) {
1641			struct	ccb_trans_settings neg;
1642
1643			neg.flags = CCB_TRANS_CURRENT_SETTINGS;
1644			neg.bus_width = width;
1645			neg.valid = CCB_TRANS_BUS_WIDTH_VALID;
1646			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1647			xpt_async(AC_TRANSFER_NEG, path, &neg);
1648		}
1649
1650		if (path2 != NULL)
1651			xpt_free_path(path2);
1652
1653		if (bootverbose) {
1654			printf("%s: target %d using %dbit transfers\n",
1655			       ahc_name(ahc), devinfo->target,
1656			       8 * (0x01 << width));
1657		}
1658	}
1659	if ((type & AHC_TRANS_GOAL) != 0)
1660		tinfo->goal.width = width;
1661	if ((type & AHC_TRANS_USER) != 0)
1662		tinfo->user.width = width;
1663
1664	ahc_update_target_msg_request(ahc, devinfo, tinfo,
1665				      /*force*/FALSE, paused);
1666}
1667
1668static void
1669ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, int enable)
1670{
1671	struct ahc_initiator_tinfo *tinfo;
1672	struct tmode_tstate *tstate;
1673
1674	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1675				    devinfo->target, &tstate);
1676
1677	if (enable)
1678		tstate->tagenable |= devinfo->target_mask;
1679	else
1680		tstate->tagenable &= ~devinfo->target_mask;
1681}
1682
1683/*
1684 * Attach all the sub-devices we can find
1685 */
1686int
1687ahc_attach(struct ahc_softc *ahc)
1688{
1689	struct ccb_setasync csa;
1690	struct cam_devq *devq;
1691	int bus_id;
1692	int bus_id2;
1693	struct cam_sim *sim;
1694	struct cam_sim *sim2;
1695	struct cam_path *path;
1696	struct cam_path *path2;
1697	int count;
1698	int s;
1699	int error;
1700
1701	count = 0;
1702	sim = NULL;
1703	sim2 = NULL;
1704
1705	s = splcam();
1706	/* Hook up our interrupt handler */
1707	if ((error = bus_setup_intr(ahc->device, ahc->irq, INTR_TYPE_CAM,
1708				    ahc_intr, ahc, &ahc->ih)) != 0) {
1709		device_printf(ahc->device, "bus_setup_intr() failed: %d\n",
1710			      error);
1711		goto fail;
1712	}
1713
1714	/*
1715	 * Attach secondary channel first if the user has
1716	 * declared it the primary channel.
1717	 */
1718	if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) {
1719		bus_id = 1;
1720		bus_id2 = 0;
1721	} else {
1722		bus_id = 0;
1723		bus_id2 = 1;
1724	}
1725
1726	/*
1727	 * Create the device queue for our SIM(s).
1728	 */
1729	devq = cam_simq_alloc(AHC_SCB_MAX);
1730	if (devq == NULL)
1731		goto fail;
1732
1733	/*
1734	 * Construct our first channel SIM entry
1735	 */
1736	sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc, ahc->unit,
1737			    1, AHC_SCB_MAX, devq);
1738	if (sim == NULL) {
1739		cam_simq_free(devq);
1740		goto fail;
1741	}
1742
1743	if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) {
1744		cam_sim_free(sim, /*free_devq*/TRUE);
1745		sim = NULL;
1746		goto fail;
1747	}
1748
1749	if (xpt_create_path(&path, /*periph*/NULL,
1750			    cam_sim_path(sim), CAM_TARGET_WILDCARD,
1751			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1752		xpt_bus_deregister(cam_sim_path(sim));
1753		cam_sim_free(sim, /*free_devq*/TRUE);
1754		sim = NULL;
1755		goto fail;
1756	}
1757
1758	xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
1759	csa.ccb_h.func_code = XPT_SASYNC_CB;
1760	csa.event_enable = AC_LOST_DEVICE;
1761	csa.callback = ahc_async;
1762	csa.callback_arg = sim;
1763	xpt_action((union ccb *)&csa);
1764	count++;
1765
1766	if (ahc->features & AHC_TWIN) {
1767		sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc",
1768				    ahc, ahc->unit, 1,
1769				    AHC_SCB_MAX, devq);
1770
1771		if (sim2 == NULL) {
1772			printf("ahc_attach: Unable to attach second "
1773			       "bus due to resource shortage");
1774			goto fail;
1775		}
1776
1777		if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) {
1778			printf("ahc_attach: Unable to attach second "
1779			       "bus due to resource shortage");
1780			/*
1781			 * We do not want to destroy the device queue
1782			 * because the first bus is using it.
1783			 */
1784			cam_sim_free(sim2, /*free_devq*/FALSE);
1785			goto fail;
1786		}
1787
1788		if (xpt_create_path(&path2, /*periph*/NULL,
1789				    cam_sim_path(sim2),
1790				    CAM_TARGET_WILDCARD,
1791				    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1792			xpt_bus_deregister(cam_sim_path(sim2));
1793			cam_sim_free(sim2, /*free_devq*/FALSE);
1794			sim2 = NULL;
1795			goto fail;
1796		}
1797		xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5);
1798		csa.ccb_h.func_code = XPT_SASYNC_CB;
1799		csa.event_enable = AC_LOST_DEVICE;
1800		csa.callback = ahc_async;
1801		csa.callback_arg = sim2;
1802		xpt_action((union ccb *)&csa);
1803		count++;
1804	}
1805
1806fail:
1807	if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) {
1808		ahc->sim_b = sim;
1809		ahc->path_b = path;
1810		ahc->sim = sim2;
1811		ahc->path = path2;
1812	} else {
1813		ahc->sim = sim;
1814		ahc->path = path;
1815		ahc->sim_b = sim2;
1816		ahc->path_b = path2;
1817	}
1818	splx(s);
1819	return (count);
1820}
1821
1822#if UNUSED
1823static void
1824ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1825		struct scb *scb)
1826{
1827	role_t	role;
1828	int	our_id;
1829
1830	if (scb->ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1831		our_id = scb->ccb->ccb_h.target_id;
1832		role = ROLE_TARGET;
1833	} else {
1834		our_id = SCB_GET_CHANNEL(scb) == 'B' ? ahc->our_id_b : ahc->our_id;
1835		role = ROLE_INITIATOR;
1836	}
1837	ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb),
1838			    SCB_GET_LUN(scb), SCB_GET_CHANNEL(scb), role);
1839}
1840#endif
1841
1842static void
1843ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
1844{
1845	u_int	saved_scsiid;
1846	role_t	role;
1847	int	our_id;
1848
1849	if (ahc_inb(ahc, SSTAT0) & TARGET)
1850		role = ROLE_TARGET;
1851	else
1852		role = ROLE_INITIATOR;
1853
1854	if (role == ROLE_TARGET
1855	 && (ahc->features & AHC_MULTI_TID) != 0
1856	 && (ahc_inb(ahc, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) {
1857		/* We were selected, so pull our id from TARGIDIN */
1858		our_id = ahc_inb(ahc, TARGIDIN) & OID;
1859	} else if ((ahc->features & AHC_ULTRA2) != 0)
1860		our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
1861	else
1862		our_id = ahc_inb(ahc, SCSIID) & OID;
1863
1864	saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
1865	ahc_compile_devinfo(devinfo,
1866			    our_id,
1867			    SCSIID_TARGET(ahc, saved_scsiid),
1868			    ahc_inb(ahc, SAVED_LUN),
1869			    SCSIID_CHANNEL(ahc, saved_scsiid),
1870			    role);
1871}
1872
1873static void
1874ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target,
1875		    u_int lun, char channel, role_t role)
1876{
1877	devinfo->our_scsiid = our_id;
1878	devinfo->target = target;
1879	devinfo->lun = lun;
1880	devinfo->target_offset = target;
1881	devinfo->channel = channel;
1882	devinfo->role = role;
1883	if (channel == 'B')
1884		devinfo->target_offset += 8;
1885	devinfo->target_mask = (0x01 << devinfo->target_offset);
1886}
1887
1888/*
1889 * Catch an interrupt from the adapter
1890 */
1891void
1892ahc_intr(void *arg)
1893{
1894	struct	ahc_softc *ahc;
1895	u_int	intstat;
1896
1897	ahc = (struct ahc_softc *)arg;
1898
1899	intstat = ahc_inb(ahc, INTSTAT);
1900
1901	/*
1902	 * Any interrupts to process?
1903	 */
1904#if NPCI > 0
1905	if ((intstat & INT_PEND) == 0) {
1906		if ((ahc->chip & AHC_PCI) != 0
1907		 && (ahc->unsolicited_ints > 500)) {
1908			if ((ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
1909				ahc_pci_intr(ahc);
1910			ahc->unsolicited_ints = 0;
1911		} else {
1912			ahc->unsolicited_ints++;
1913		}
1914		return;
1915	} else {
1916		ahc->unsolicited_ints = 0;
1917	}
1918#else
1919	if ((intstat & INT_PEND) == 0)
1920		return;
1921#endif
1922
1923	if (intstat & CMDCMPLT) {
1924		ahc_outb(ahc, CLRINT, CLRCMDINT);
1925		ahc_run_qoutfifo(ahc);
1926		if ((ahc->flags & AHC_TARGETMODE) != 0)
1927			ahc_run_tqinfifo(ahc, /*paused*/FALSE);
1928	}
1929	if (intstat & BRKADRINT) {
1930		/*
1931		 * We upset the sequencer :-(
1932		 * Lookup the error message
1933		 */
1934		int i, error, num_errors;
1935
1936		error = ahc_inb(ahc, ERROR);
1937		num_errors =  sizeof(hard_error)/sizeof(hard_error[0]);
1938		for (i = 0; error != 1 && i < num_errors; i++)
1939			error >>= 1;
1940		panic("%s: brkadrint, %s at seqaddr = 0x%x\n",
1941		      ahc_name(ahc), hard_error[i].errmesg,
1942		      ahc_inb(ahc, SEQADDR0) |
1943		      (ahc_inb(ahc, SEQADDR1) << 8));
1944
1945		/* Tell everyone that this HBA is no longer availible */
1946		ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
1947			       CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
1948			       CAM_NO_HBA);
1949	}
1950
1951	if ((intstat & (SEQINT|SCSIINT)) != 0)
1952		ahc_pause_bug_fix(ahc);
1953
1954	if ((intstat & SEQINT) != 0)
1955		ahc_handle_seqint(ahc, intstat);
1956
1957	if ((intstat & SCSIINT) != 0)
1958		ahc_handle_scsiint(ahc, intstat);
1959}
1960
1961static struct tmode_tstate *
1962ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel)
1963{
1964	struct tmode_tstate *master_tstate;
1965	struct tmode_tstate *tstate;
1966	int i, s;
1967
1968	master_tstate = ahc->enabled_targets[ahc->our_id];
1969	if (channel == 'B') {
1970		scsi_id += 8;
1971		master_tstate = ahc->enabled_targets[ahc->our_id_b + 8];
1972	}
1973	if (ahc->enabled_targets[scsi_id] != NULL
1974	 && ahc->enabled_targets[scsi_id] != master_tstate)
1975		panic("%s: ahc_alloc_tstate - Target already allocated",
1976		      ahc_name(ahc));
1977	tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT);
1978	if (tstate == NULL)
1979		return (NULL);
1980
1981	/*
1982	 * If we have allocated a master tstate, copy user settings from
1983	 * the master tstate (taken from SRAM or the EEPROM) for this
1984	 * channel, but reset our current and goal settings to async/narrow
1985	 * until an initiator talks to us.
1986	 */
1987	if (master_tstate != NULL) {
1988		bcopy(master_tstate, tstate, sizeof(*tstate));
1989		bzero(tstate->enabled_luns, sizeof(tstate->enabled_luns));
1990		tstate->ultraenb = 0;
1991		for (i = 0; i < 16; i++) {
1992			bzero(&tstate->transinfo[i].current,
1993			      sizeof(tstate->transinfo[i].current));
1994			bzero(&tstate->transinfo[i].goal,
1995			      sizeof(tstate->transinfo[i].goal));
1996		}
1997	} else
1998		bzero(tstate, sizeof(*tstate));
1999	s = splcam();
2000	ahc->enabled_targets[scsi_id] = tstate;
2001	splx(s);
2002	return (tstate);
2003}
2004
2005static void
2006ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
2007{
2008	struct tmode_tstate *tstate;
2009
2010	/* Don't clean up the entry for our initiator role */
2011	if ((ahc->flags & AHC_INITIATORMODE) != 0
2012	 && ((channel == 'B' && scsi_id == ahc->our_id_b)
2013	  || (channel == 'A' && scsi_id == ahc->our_id))
2014	 && force == FALSE)
2015		return;
2016
2017	if (channel == 'B')
2018		scsi_id += 8;
2019	tstate = ahc->enabled_targets[scsi_id];
2020	if (tstate != NULL)
2021		free(tstate, M_DEVBUF);
2022	ahc->enabled_targets[scsi_id] = NULL;
2023}
2024
2025static void
2026ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
2027{
2028	struct	   tmode_tstate *tstate;
2029	struct	   tmode_lstate *lstate;
2030	struct	   ccb_en_lun *cel;
2031	cam_status status;
2032	u_int	   target;
2033	u_int	   lun;
2034	u_int	   target_mask;
2035	char	   channel;
2036	int	   s;
2037
2038	status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate,
2039				     /* notfound_failure*/FALSE);
2040
2041	if (status != CAM_REQ_CMP) {
2042		ccb->ccb_h.status = status;
2043		return;
2044	}
2045
2046	cel = &ccb->cel;
2047	target = ccb->ccb_h.target_id;
2048	lun = ccb->ccb_h.target_lun;
2049	channel = SIM_CHANNEL(ahc, sim);
2050	target_mask = 0x01 << target;
2051	if (channel == 'B')
2052		target_mask <<= 8;
2053
2054	if (cel->enable != 0) {
2055		u_int scsiseq;
2056
2057		/* Are we already enabled?? */
2058		if (lstate != NULL) {
2059			xpt_print_path(ccb->ccb_h.path);
2060			printf("Lun already enabled\n");
2061			ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
2062			return;
2063		}
2064
2065		if (cel->grp6_len != 0
2066		 || cel->grp7_len != 0) {
2067			/*
2068			 * Don't (yet?) support vendor
2069			 * specific commands.
2070			 */
2071			ccb->ccb_h.status = CAM_REQ_INVALID;
2072			printf("Non-zero Group Codes\n");
2073			return;
2074		}
2075
2076		/*
2077		 * Seems to be okay.
2078		 * Setup our data structures.
2079		 */
2080		if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
2081			tstate = ahc_alloc_tstate(ahc, target, channel);
2082			if (tstate == NULL) {
2083				xpt_print_path(ccb->ccb_h.path);
2084				printf("Couldn't allocate tstate\n");
2085				ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2086				return;
2087			}
2088		}
2089		lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT);
2090		if (lstate == NULL) {
2091			xpt_print_path(ccb->ccb_h.path);
2092			printf("Couldn't allocate lstate\n");
2093			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2094			return;
2095		}
2096		bzero(lstate, sizeof(*lstate));
2097		status = xpt_create_path(&lstate->path, /*periph*/NULL,
2098					 xpt_path_path_id(ccb->ccb_h.path),
2099					 xpt_path_target_id(ccb->ccb_h.path),
2100					 xpt_path_lun_id(ccb->ccb_h.path));
2101		if (status != CAM_REQ_CMP) {
2102			free(lstate, M_DEVBUF);
2103			xpt_print_path(ccb->ccb_h.path);
2104			printf("Couldn't allocate path\n");
2105			ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2106			return;
2107		}
2108		SLIST_INIT(&lstate->accept_tios);
2109		SLIST_INIT(&lstate->immed_notifies);
2110		s = splcam();
2111		pause_sequencer(ahc);
2112		if (target != CAM_TARGET_WILDCARD) {
2113			tstate->enabled_luns[lun] = lstate;
2114			ahc->enabled_luns++;
2115
2116			if ((ahc->features & AHC_MULTI_TID) != 0) {
2117				u_int targid_mask;
2118
2119				targid_mask = ahc_inb(ahc, TARGID)
2120					    | (ahc_inb(ahc, TARGID + 1) << 8);
2121
2122				targid_mask |= target_mask;
2123				ahc_outb(ahc, TARGID, targid_mask);
2124				ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
2125
2126				ahc_update_scsiid(ahc, targid_mask);
2127			} else {
2128				u_int our_id;
2129				char  channel;
2130
2131				channel = SIM_CHANNEL(ahc, sim);
2132				our_id = SIM_SCSI_ID(ahc, sim);
2133
2134				/*
2135				 * This can only happen if selections
2136				 * are not enabled
2137				 */
2138				if (target != our_id) {
2139					u_int sblkctl;
2140					char  cur_channel;
2141					int   swap;
2142
2143					sblkctl = ahc_inb(ahc, SBLKCTL);
2144					cur_channel = (sblkctl & SELBUSB)
2145						    ? 'B' : 'A';
2146					if ((ahc->features & AHC_TWIN) == 0)
2147						cur_channel = 'A';
2148					swap = cur_channel != channel;
2149					if (channel == 'A')
2150						ahc->our_id = target;
2151					else
2152						ahc->our_id_b = target;
2153
2154					if (swap)
2155						ahc_outb(ahc, SBLKCTL,
2156							 sblkctl ^ SELBUSB);
2157
2158					ahc_outb(ahc, SCSIID, target);
2159
2160					if (swap)
2161						ahc_outb(ahc, SBLKCTL, sblkctl);
2162				}
2163			}
2164		} else
2165			ahc->black_hole = lstate;
2166		/* Allow select-in operations */
2167		if (ahc->black_hole != NULL && ahc->enabled_luns > 0) {
2168			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
2169			scsiseq |= ENSELI;
2170			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
2171			scsiseq = ahc_inb(ahc, SCSISEQ);
2172			scsiseq |= ENSELI;
2173			ahc_outb(ahc, SCSISEQ, scsiseq);
2174		}
2175		unpause_sequencer(ahc);
2176		splx(s);
2177		ccb->ccb_h.status = CAM_REQ_CMP;
2178		xpt_print_path(ccb->ccb_h.path);
2179		printf("Lun now enabled for target mode\n");
2180	} else {
2181		struct ccb_hdr *elm;
2182		int i, empty;
2183
2184		if (lstate == NULL) {
2185			ccb->ccb_h.status = CAM_LUN_INVALID;
2186			return;
2187		}
2188
2189		s = splcam();
2190		ccb->ccb_h.status = CAM_REQ_CMP;
2191		LIST_FOREACH(elm, &ahc->pending_ccbs, sim_links.le) {
2192			if (elm->func_code == XPT_CONT_TARGET_IO
2193			 && !xpt_path_comp(elm->path, ccb->ccb_h.path)){
2194				printf("CTIO pending\n");
2195				ccb->ccb_h.status = CAM_REQ_INVALID;
2196				splx(s);
2197				return;
2198			}
2199		}
2200
2201		if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
2202			printf("ATIOs pending\n");
2203			ccb->ccb_h.status = CAM_REQ_INVALID;
2204		}
2205
2206		if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
2207			printf("INOTs pending\n");
2208			ccb->ccb_h.status = CAM_REQ_INVALID;
2209		}
2210
2211		if (ccb->ccb_h.status != CAM_REQ_CMP) {
2212			splx(s);
2213			return;
2214		}
2215
2216		xpt_print_path(ccb->ccb_h.path);
2217		printf("Target mode disabled\n");
2218		xpt_free_path(lstate->path);
2219		free(lstate, M_DEVBUF);
2220
2221		pause_sequencer(ahc);
2222		/* Can we clean up the target too? */
2223		if (target != CAM_TARGET_WILDCARD) {
2224			tstate->enabled_luns[lun] = NULL;
2225			ahc->enabled_luns--;
2226			for (empty = 1, i = 0; i < 8; i++)
2227				if (tstate->enabled_luns[i] != NULL) {
2228					empty = 0;
2229					break;
2230				}
2231
2232			if (empty) {
2233				ahc_free_tstate(ahc, target, channel,
2234						/*force*/FALSE);
2235				if (ahc->features & AHC_MULTI_TID) {
2236					u_int targid_mask;
2237
2238					targid_mask = ahc_inb(ahc, TARGID)
2239						    | (ahc_inb(ahc, TARGID + 1)
2240						       << 8);
2241
2242					targid_mask &= ~target_mask;
2243					ahc_outb(ahc, TARGID, targid_mask);
2244					ahc_outb(ahc, TARGID+1,
2245					 	 (targid_mask >> 8));
2246					ahc_update_scsiid(ahc, targid_mask);
2247				}
2248			}
2249		} else {
2250
2251			ahc->black_hole = NULL;
2252
2253			/*
2254			 * We can't allow selections without
2255			 * our black hole device.
2256			 */
2257			empty = TRUE;
2258		}
2259		if (ahc->enabled_luns == 0) {
2260			/* Disallow select-in */
2261			u_int scsiseq;
2262
2263			scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
2264			scsiseq &= ~ENSELI;
2265			ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
2266			scsiseq = ahc_inb(ahc, SCSISEQ);
2267			scsiseq &= ~ENSELI;
2268			ahc_outb(ahc, SCSISEQ, scsiseq);
2269		}
2270		unpause_sequencer(ahc);
2271		splx(s);
2272	}
2273}
2274
2275static void
2276ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
2277{
2278	u_int scsiid_mask;
2279	u_int scsiid;
2280
2281	if ((ahc->features & AHC_MULTI_TID) == 0)
2282		panic("ahc_update_scsiid called on non-multitid unit\n");
2283
2284	/*
2285	 * Since we will rely on the the TARGID mask
2286	 * for selection enables, ensure that OID
2287	 * in SCSIID is not set to some other ID
2288	 * that we don't want to allow selections on.
2289	 */
2290	if ((ahc->features & AHC_ULTRA2) != 0)
2291		scsiid = ahc_inb(ahc, SCSIID_ULTRA2);
2292	else
2293		scsiid = ahc_inb(ahc, SCSIID);
2294	scsiid_mask = 0x1 << (scsiid & OID);
2295	if ((targid_mask & scsiid_mask) == 0) {
2296		u_int our_id;
2297
2298		/* ffs counts from 1 */
2299		our_id = ffs(targid_mask);
2300		if (our_id == 0)
2301			our_id = ahc->our_id;
2302		else
2303			our_id--;
2304		scsiid &= TID;
2305		scsiid |= our_id;
2306	}
2307	if ((ahc->features & AHC_ULTRA2) != 0)
2308		ahc_outb(ahc, SCSIID_ULTRA2, scsiid);
2309	else
2310		ahc_outb(ahc, SCSIID, scsiid);
2311}
2312
2313static int
2314ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
2315{
2316	struct	  tmode_tstate *tstate;
2317	struct	  tmode_lstate *lstate;
2318	struct	  ccb_accept_tio *atio;
2319	uint8_t *byte;
2320	int	  initiator;
2321	int	  target;
2322	int	  lun;
2323
2324	initiator = SCSIID_TARGET(ahc, cmd->scsiid);
2325	target = SCSIID_OUR_ID(cmd->scsiid);
2326	lun    = (cmd->identify & MSG_IDENTIFY_LUNMASK);
2327
2328	byte = cmd->bytes;
2329	tstate = ahc->enabled_targets[target];
2330	lstate = NULL;
2331	if (tstate != NULL)
2332		lstate = tstate->enabled_luns[lun];
2333
2334	/*
2335	 * Commands for disabled luns go to the black hole driver.
2336	 */
2337	if (lstate == NULL)
2338		lstate = ahc->black_hole;
2339
2340	atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
2341	if (atio == NULL) {
2342		ahc->flags |= AHC_TQINFIFO_BLOCKED;
2343		/*
2344		 * Wait for more ATIOs from the peripheral driver for this lun.
2345		 */
2346		return (1);
2347	} else
2348		ahc->flags &= ~AHC_TQINFIFO_BLOCKED;
2349#if 0
2350	printf("Incoming command from %d for %d:%d%s\n",
2351	       initiator, target, lun,
2352	       lstate == ahc->black_hole ? "(Black Holed)" : "");
2353#endif
2354	SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
2355
2356	if (lstate == ahc->black_hole) {
2357		/* Fill in the wildcards */
2358		atio->ccb_h.target_id = target;
2359		atio->ccb_h.target_lun = lun;
2360	}
2361
2362	/*
2363	 * Package it up and send it off to
2364	 * whomever has this lun enabled.
2365	 */
2366	atio->sense_len = 0;
2367	atio->init_id = initiator;
2368	if (byte[0] != 0xFF) {
2369		/* Tag was included */
2370		atio->tag_action = *byte++;
2371		atio->tag_id = *byte++;
2372		atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
2373	} else {
2374		atio->ccb_h.flags = 0;
2375	}
2376	byte++;
2377
2378	/* Okay.  Now determine the cdb size based on the command code */
2379	switch (*byte >> CMD_GROUP_CODE_SHIFT) {
2380	case 0:
2381		atio->cdb_len = 6;
2382		break;
2383	case 1:
2384	case 2:
2385		atio->cdb_len = 10;
2386		break;
2387	case 4:
2388		atio->cdb_len = 16;
2389		break;
2390	case 5:
2391		atio->cdb_len = 12;
2392		break;
2393	case 3:
2394	default:
2395		/* Only copy the opcode. */
2396		atio->cdb_len = 1;
2397		printf("Reserved or VU command code type encountered\n");
2398		break;
2399	}
2400	bcopy(byte, atio->cdb_io.cdb_bytes, atio->cdb_len);
2401
2402	atio->ccb_h.status |= CAM_CDB_RECVD;
2403
2404	if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
2405		/*
2406		 * We weren't allowed to disconnect.
2407		 * We're hanging on the bus until a
2408		 * continue target I/O comes in response
2409		 * to this accept tio.
2410		 */
2411#if 0
2412		printf("Received Immediate Command %d:%d:%d - %p\n",
2413		       initiator, target, lun, ahc->pending_device);
2414#endif
2415		ahc->pending_device = lstate;
2416		ahc_freeze_ccb((union ccb *)atio);
2417		atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
2418	}
2419	xpt_done((union ccb*)atio);
2420	return (0);
2421}
2422
2423static void
2424ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
2425{
2426	struct scb *scb;
2427	struct ahc_devinfo devinfo;
2428
2429	ahc_fetch_devinfo(ahc, &devinfo);
2430
2431	/*
2432	 * Clear the upper byte that holds SEQINT status
2433	 * codes and clear the SEQINT bit. We will unpause
2434	 * the sequencer, if appropriate, after servicing
2435	 * the request.
2436	 */
2437	ahc_outb(ahc, CLRINT, CLRSEQINT);
2438	switch (intstat & SEQINT_MASK) {
2439	case BAD_STATUS:
2440	{
2441		u_int  scb_index;
2442		struct hardware_scb *hscb;
2443		struct ccb_scsiio *csio;
2444		/*
2445		 * The sequencer will notify us when a command
2446		 * has an error that would be of interest to
2447		 * the kernel.  This allows us to leave the sequencer
2448		 * running in the common case of command completes
2449		 * without error.  The sequencer will already have
2450		 * dma'd the SCB back up to us, so we can reference
2451		 * the in kernel copy directly.
2452		 */
2453		scb_index = ahc_inb(ahc, SCB_TAG);
2454		scb = &ahc->scb_data->scbarray[scb_index];
2455
2456		/*
2457		 * Set the default return value to 0 (don't
2458		 * send sense).  The sense code will change
2459		 * this if needed.
2460		 */
2461		ahc_outb(ahc, RETURN_1, 0);
2462		if (!(scb_index < ahc->scb_data->numscbs
2463		   && (scb->flags & SCB_ACTIVE) != 0)) {
2464			printf("%s:%c:%d: ahc_intr - referenced scb "
2465			       "not valid during seqint 0x%x scb(%d)\n",
2466			       ahc_name(ahc), devinfo.channel,
2467			       devinfo.target, intstat, scb_index);
2468			goto unpause;
2469		}
2470
2471		hscb = scb->hscb;
2472
2473		/* Don't want to clobber the original sense code */
2474		if ((scb->flags & SCB_SENSE) != 0) {
2475			/*
2476			 * Clear the SCB_SENSE Flag and have
2477			 * the sequencer do a normal command
2478			 * complete.
2479			 */
2480			scb->flags &= ~SCB_SENSE;
2481			ahcsetccbstatus(scb->ccb, CAM_AUTOSENSE_FAIL);
2482			break;
2483		}
2484		ahcsetccbstatus(scb->ccb, CAM_SCSI_STATUS_ERROR);
2485		/* Freeze the queue until the client sees the error. */
2486		ahc_freeze_devq(ahc, scb->ccb->ccb_h.path);
2487		ahc_freeze_ccb(scb->ccb);
2488		csio = &scb->ccb->csio;
2489		csio->scsi_status = hscb->shared_data.status.scsi_status;
2490		switch (csio->scsi_status) {
2491		case SCSI_STATUS_OK:
2492			printf("%s: Interrupted for staus of 0???\n",
2493			       ahc_name(ahc));
2494			break;
2495		case SCSI_STATUS_CMD_TERMINATED:
2496		case SCSI_STATUS_CHECK_COND:
2497#ifdef AHC_DEBUG
2498			if (ahc_debug & AHC_SHOWSENSE) {
2499				xpt_print_path(csio->ccb_h.path);
2500				printf("SCB %d: requests Check Status\n",
2501				       scb->hscb->tag);
2502			}
2503#endif
2504			if ((csio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
2505				struct ahc_dma_seg *sg;
2506				struct scsi_sense *sc;
2507				struct ahc_initiator_tinfo *targ_info;
2508				struct tmode_tstate *tstate;
2509				struct ahc_transinfo *tinfo;
2510
2511				targ_info =
2512				    ahc_fetch_transinfo(ahc,
2513							devinfo.channel,
2514							devinfo.our_scsiid,
2515							devinfo.target,
2516							&tstate);
2517				tinfo = &targ_info->current;
2518				sg = scb->sg_list;
2519				sc = (struct scsi_sense *)
2520				     (&hscb->shared_data.cdb);
2521				/*
2522				 * Save off the residual if there is one.
2523				 */
2524				if (ahc_check_residual(scb))
2525					ahc_calc_residual(scb);
2526				else
2527					scb->ccb->csio.resid = 0;
2528
2529#ifdef AHC_DEBUG
2530				if (ahc_debug & AHC_SHOWSENSE) {
2531					xpt_print_path(csio->ccb_h.path);
2532					printf("Sending Sense\n");
2533				}
2534#endif
2535				sg->addr = ahc->scb_data->sense_busaddr
2536				   + (hscb->tag*sizeof(struct scsi_sense_data));
2537				sg->len = MIN(sizeof(struct scsi_sense_data),
2538					      csio->sense_len);
2539				sg->len |= AHC_DMA_LAST_SEG;
2540
2541				sc->opcode = REQUEST_SENSE;
2542				sc->byte2 = 0;
2543				if (tinfo->protocol_version <= SCSI_REV_2
2544				 && SCB_GET_LUN(scb) < 8)
2545					sc->byte2 = SCB_GET_LUN(scb) << 5;
2546				sc->unused[0] = 0;
2547				sc->unused[1] = 0;
2548				sc->length = sg->len;
2549				sc->control = 0;
2550
2551				/*
2552				 * Would be nice to preserve DISCENB here,
2553				 * but due to the way we manage busy targets,
2554				 * we can't.
2555				 */
2556				hscb->control = 0;
2557
2558				/*
2559				 * This request sense could be because the
2560				 * the device lost power or in some other
2561				 * way has lost our transfer negotiations.
2562				 * Renegotiate if appropriate.  Unit attention
2563				 * errors will be reported before any data
2564				 * phases occur.
2565				 */
2566				if (scb->ccb->csio.resid
2567				 == scb->ccb->csio.dxfer_len) {
2568					ahc_update_target_msg_request(ahc,
2569							      &devinfo,
2570							      targ_info,
2571							      /*force*/TRUE,
2572							      /*paused*/TRUE);
2573				}
2574				hscb->cdb_len = sizeof(*sc);
2575				hscb->dataptr = sg->addr;
2576				hscb->datacnt = sg->len;
2577				hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
2578				scb->sg_count = 1;
2579				scb->flags |= SCB_SENSE;
2580				ahc_outb(ahc, RETURN_1, SEND_SENSE);
2581
2582				/*
2583				 * Ensure we have enough time to actually
2584				 * retrieve the sense.
2585				 */
2586				untimeout(ahc_timeout, (caddr_t)scb,
2587					  scb->ccb->ccb_h.timeout_ch);
2588				scb->ccb->ccb_h.timeout_ch =
2589				    timeout(ahc_timeout, (caddr_t)scb, 5 * hz);
2590			}
2591			break;
2592		default:
2593			break;
2594		}
2595		break;
2596	}
2597	case NO_MATCH:
2598	{
2599		/* Ensure we don't leave the selection hardware on */
2600		ahc_outb(ahc, SCSISEQ,
2601			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
2602
2603		printf("%s:%c:%d: no active SCB for reconnecting "
2604		       "target - issuing BUS DEVICE RESET\n",
2605		       ahc_name(ahc), devinfo.channel, devinfo.target);
2606		printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
2607		       "ARG_1 == 0x%x ARG_2 = 0x%x, SEQ_FLAGS == 0x%x\n",
2608		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
2609		       ahc_inb(ahc, ARG_1), ahc_inb(ahc, ARG_2),
2610		       ahc_inb(ahc, SEQ_FLAGS));
2611		printf("SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
2612		       "SCB_TAG == 0x%x\n",
2613		       ahc_inb(ahc, SCB_SCSIID), ahc_inb(ahc, SCB_LUN),
2614		       ahc_inb(ahc, SCB_TAG));
2615		ahc->msgout_buf[0] = MSG_BUS_DEV_RESET;
2616		ahc->msgout_len = 1;
2617		ahc->msgout_index = 0;
2618		ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2619		ahc_outb(ahc, MSG_OUT, HOST_MSG);
2620		ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, LASTPHASE) | ATNO);
2621		break;
2622	}
2623	case SEND_REJECT:
2624	{
2625		u_int rejbyte = ahc_inb(ahc, ACCUM);
2626		printf("%s:%c:%d: Warning - unknown message received from "
2627		       "target (0x%x).  Rejecting\n",
2628		       ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
2629		break;
2630	}
2631	case NO_IDENT:
2632	{
2633		/*
2634		 * The reconnecting target either did not send an identify
2635		 * message, or did, but we didn't find an SCB to match and
2636		 * before it could respond to our ATN/abort, it hit a dataphase.
2637		 * The only safe thing to do is to blow it away with a bus
2638		 * reset.
2639		 */
2640		int found;
2641
2642		printf("%s:%c:%d: Target did not send an IDENTIFY message. "
2643		       "LASTPHASE = 0x%x, SAVED_SCSIID == 0x%x\n",
2644		       ahc_name(ahc), devinfo.channel, devinfo.target,
2645		       ahc_inb(ahc, LASTPHASE), ahc_inb(ahc, SAVED_SCSIID));
2646		found = ahc_reset_channel(ahc, devinfo.channel,
2647					  /*initiate reset*/TRUE);
2648		printf("%s: Issued Channel %c Bus Reset. "
2649		       "%d SCBs aborted\n", ahc_name(ahc), devinfo.channel,
2650		       found);
2651		return;
2652	}
2653	case IGN_WIDE_RES:
2654		ahc_handle_ign_wide_residue(ahc, &devinfo);
2655		break;
2656	case BAD_PHASE:
2657	{
2658		u_int lastphase;
2659
2660		lastphase = ahc_inb(ahc, LASTPHASE);
2661		if (lastphase == P_BUSFREE) {
2662			printf("%s:%c:%d: Missed busfree.  Curphase = 0x%x\n",
2663			       ahc_name(ahc), devinfo.channel, devinfo.target,
2664			       ahc_inb(ahc, SCSISIGI));
2665			restart_sequencer(ahc);
2666			return;
2667		} else {
2668			printf("%s:%c:%d: unknown scsi bus phase %x.  "
2669			       "Attempting to continue\n",
2670			       ahc_name(ahc), devinfo.channel, devinfo.target,
2671			       ahc_inb(ahc, SCSISIGI));
2672		}
2673		break;
2674	}
2675	case HOST_MSG_LOOP:
2676	{
2677		/*
2678		 * The sequencer has encountered a message phase
2679		 * that requires host assistance for completion.
2680		 * While handling the message phase(s), we will be
2681		 * notified by the sequencer after each byte is
2682		 * transfered so we can track bus phase changes.
2683		 *
2684		 * If this is the first time we've seen a HOST_MSG_LOOP
2685		 * interrupt, initialize the state of the host message
2686		 * loop.
2687		 */
2688		if (ahc->msg_type == MSG_TYPE_NONE) {
2689			u_int bus_phase;
2690
2691			bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2692			if (bus_phase != P_MESGIN
2693			 && bus_phase != P_MESGOUT) {
2694				printf("ahc_intr: HOST_MSG_LOOP bad "
2695				       "phase 0x%x\n",
2696				      bus_phase);
2697				/*
2698				 * Probably transitioned to bus free before
2699				 * we got here.  Just punt the message.
2700				 */
2701				ahc_clear_intstat(ahc);
2702				restart_sequencer(ahc);
2703				return;
2704			}
2705
2706			if (devinfo.role == ROLE_INITIATOR) {
2707				struct scb *scb;
2708				u_int scb_index;
2709
2710				scb_index = ahc_inb(ahc, SCB_TAG);
2711				scb = &ahc->scb_data->scbarray[scb_index];
2712
2713				if (bus_phase == P_MESGOUT)
2714					ahc_setup_initiator_msgout(ahc,
2715								   &devinfo,
2716								   scb);
2717				else {
2718					ahc->msg_type =
2719					    MSG_TYPE_INITIATOR_MSGIN;
2720					ahc->msgin_index = 0;
2721				}
2722			} else {
2723				if (bus_phase == P_MESGOUT) {
2724					ahc->msg_type =
2725					    MSG_TYPE_TARGET_MSGOUT;
2726					ahc->msgin_index = 0;
2727				} else
2728					/* XXX Ever executed??? */
2729					ahc_setup_target_msgin(ahc, &devinfo);
2730			}
2731		}
2732
2733		/* Pass a NULL path so that handlers generate their own */
2734		ahc_handle_message_phase(ahc, /*path*/NULL);
2735		break;
2736	}
2737	case PERR_DETECTED:
2738	{
2739		/*
2740		 * If we've cleared the parity error interrupt
2741		 * but the sequencer still believes that SCSIPERR
2742		 * is true, it must be that the parity error is
2743		 * for the currently presented byte on the bus,
2744		 * and we are not in a phase (data-in) where we will
2745		 * eventually ack this byte.  Ack the byte and
2746		 * throw it away in the hope that the target will
2747		 * take us to message out to deliver the appropriate
2748		 * error message.
2749		 */
2750		if ((intstat & SCSIINT) == 0
2751		 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) {
2752			u_int curphase;
2753
2754			/*
2755			 * The hardware will only let you ack bytes
2756			 * if the expected phase in SCSISIGO matches
2757			 * the current phase.  Make sure this is
2758			 * currently the case.
2759			 */
2760			curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2761			ahc_outb(ahc, LASTPHASE, curphase);
2762			ahc_outb(ahc, SCSISIGO, curphase);
2763			ahc_inb(ahc, SCSIDATL);
2764		}
2765		break;
2766	}
2767	case DATA_OVERRUN:
2768	{
2769		/*
2770		 * When the sequencer detects an overrun, it
2771		 * places the controller in "BITBUCKET" mode
2772		 * and allows the target to complete its transfer.
2773		 * Unfortunately, none of the counters get updated
2774		 * when the controller is in this mode, so we have
2775		 * no way of knowing how large the overrun was.
2776		 */
2777		u_int scbindex = ahc_inb(ahc, SCB_TAG);
2778		u_int lastphase = ahc_inb(ahc, LASTPHASE);
2779		u_int i;
2780
2781		scb = &ahc->scb_data->scbarray[scbindex];
2782		for (i = 0; i < num_phases; i++) {
2783			if (lastphase == phase_table[i].phase)
2784				break;
2785		}
2786		xpt_print_path(scb->ccb->ccb_h.path);
2787		printf("data overrun detected %s."
2788		       "  Tag == 0x%x.\n",
2789		       phase_table[i].phasemsg,
2790  		       scb->hscb->tag);
2791		xpt_print_path(scb->ccb->ccb_h.path);
2792		printf("%s seen Data Phase.  Length = %d.  NumSGs = %d.\n",
2793		       ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
2794		       scb->ccb->csio.dxfer_len, scb->sg_count);
2795		if (scb->sg_count > 0) {
2796			for (i = 0; i < scb->sg_count; i++) {
2797				printf("sg[%d] - Addr 0x%x : Length %d\n",
2798				       i,
2799				       scb->sg_list[i].addr,
2800				       scb->sg_list[i].len & AHC_SG_LEN_MASK);
2801			}
2802		}
2803		/*
2804		 * Set this and it will take effect when the
2805		 * target does a command complete.
2806		 */
2807		ahc_freeze_devq(ahc, scb->ccb->ccb_h.path);
2808		ahcsetccbstatus(scb->ccb, CAM_DATA_RUN_ERR);
2809		ahc_freeze_ccb(scb->ccb);
2810		break;
2811	}
2812	case TRACEPOINT:
2813	{
2814		printf("SAVED_SCSIID %x, SAVED_LUN %x, SCBPTR %x\n",
2815		       ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
2816		       ahc_inb(ahc, SCBPTR));
2817#if 0
2818		printf("%s: SCB_DATAPTR = %x, SCB_DATACNT = %x\n",
2819		       ahc_name(ahc),
2820		       ahc_inb(ahc, SCB_DATAPTR)
2821		    | (ahc_inb(ahc, SCB_DATAPTR + 1) << 8)
2822		    | (ahc_inb(ahc, SCB_DATAPTR + 2) << 16)
2823		    | (ahc_inb(ahc, SCB_DATAPTR + 3) << 24),
2824		       ahc_inb(ahc, SCB_DATACNT)
2825		    | (ahc_inb(ahc, SCB_DATACNT + 1) << 8)
2826		    | (ahc_inb(ahc, SCB_DATACNT + 2) << 16)
2827		    | (ahc_inb(ahc, SCB_DATACNT + 3) << 24));
2828		printf("SCSIRATE = %x\n", ahc_inb(ahc, SCSIRATE));
2829		printf("SG_CACHEPTR = %x\n", ahc_inb(ahc, SINDEX));
2830		printf("DFCNTRL = %x, DFSTATUS = %x\n",
2831		       ahc_inb(ahc, DFCNTRL),
2832		       ahc_inb(ahc, DFSTATUS));
2833		if ((ahc->features & AHC_CMD_CHAN) != 0) {
2834			printf("CCHADDR = 0x%x\n",
2835			       ahc_inb(ahc, CCHADDR)
2836			     | (ahc_inb(ahc, CCHADDR + 1) << 8)
2837			     | (ahc_inb(ahc, CCHADDR + 2) << 16)
2838			     | (ahc_inb(ahc, CCHADDR + 3) << 24));
2839		} else {
2840			printf("HADDR = 0x%x\n",
2841			       ahc_inb(ahc, HADDR)
2842			     | (ahc_inb(ahc, HADDR + 1) << 8)
2843			     | (ahc_inb(ahc, HADDR + 2) << 16)
2844			     | (ahc_inb(ahc, HADDR + 3) << 24));
2845		}
2846
2847#endif
2848		break;
2849	}
2850	case TRACEPOINT2:
2851	{
2852		printf("SINDEX = %x\n", ahc_inb(ahc, SINDEX));
2853		printf("SCSIRATE = %x\n", ahc_inb(ahc, SCSIRATE));
2854#if 0
2855		printf("SCB_RESIDUAL_SGPTR = %x, SCB_RESIDUAL_DATACNT = %x\n",
2856		       ahc_inb(ahc, SCB_RESIDUAL_SGPTR)
2857		    | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8)
2858		    | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
2859		    | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24),
2860		       ahc_inb(ahc, SCB_RESIDUAL_DATACNT)
2861		    | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8)
2862		    | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16)
2863		    | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 3) << 24));
2864		printf("DATA_COUNT_ODD = %x\n", ahc_inb(ahc, DATA_COUNT_ODD));
2865		printf("SINDEX = %x\n", ahc_inb(ahc, SINDEX));
2866		printf("SCB_SGPTR %x, SCB_RESIDUAL_SGPTR %x\n",
2867		       ahc_inb(ahc, SCB_SGPTR),
2868		       ahc_inb(ahc, SCB_RESIDUAL_SGPTR));
2869		printf("SAVED_SCSIID %x, SAVED_LUN %d, "
2870		       "DISCONNECTED_SCBH %d\n",
2871		       ahc_inb(ahc, SAVED_SCSIID),
2872		       ahc_inb(ahc, SAVED_LUN),
2873		       ahc_inb(ahc, DISCONNECTED_SCBH));
2874		int i;
2875
2876		if (ahc->unit != 1)
2877			break;
2878		for (i = 0; i < 32;) {
2879			printf("0x%x 0x%x 0x%x 0x%x\n",
2880			       ahc_inb(ahc, SCB_CONTROL + i),
2881			       ahc_inb(ahc, SCB_CONTROL + i + 1),
2882			       ahc_inb(ahc, SCB_CONTROL + i + 2),
2883			       ahc_inb(ahc, SCB_CONTROL + i + 3));
2884			i += 4;
2885		}
2886#endif
2887#if 0
2888		printf("SSTAT1 == 0x%x\n", ahc_inb(ahc, SSTAT1));
2889		printf("SSTAT0 == 0x%x\n", ahc_inb(ahc, SSTAT0));
2890		printf(", SCSISIGI == 0x%x\n", ahc_inb(ahc, SCSISIGI));
2891		printf("TRACEPOINT: CCHCNT = %d, SG_COUNT = %d\n",
2892		       ahc_inb(ahc, CCHCNT), ahc_inb(ahc, SG_COUNT));
2893		printf("TRACEPOINT: SCB_TAG = %d\n", ahc_inb(ahc, SCB_TAG));
2894		printf("TRACEPOINT1: CCHADDR = %d, CCHCNT = %d, SCBPTR = %d\n",
2895		       ahc_inb(ahc, CCHADDR)
2896		    | (ahc_inb(ahc, CCHADDR+1) << 8)
2897		    | (ahc_inb(ahc, CCHADDR+2) << 16)
2898		    | (ahc_inb(ahc, CCHADDR+3) << 24),
2899		       ahc_inb(ahc, CCHCNT)
2900		    | (ahc_inb(ahc, CCHCNT+1) << 8)
2901		    | (ahc_inb(ahc, CCHCNT+2) << 16),
2902		       ahc_inb(ahc, SCBPTR));
2903		printf("TRACEPOINT: WAITING_SCBH = %d\n", ahc_inb(ahc, WAITING_SCBH));
2904		printf("TRACEPOINT: SCB_TAG = %d\n", ahc_inb(ahc, SCB_TAG));
2905#endif
2906		break;
2907	}
2908	default:
2909		printf("ahc_intr: seqint, "
2910		       "intstat == 0x%x, scsisigi = 0x%x\n",
2911		       intstat, ahc_inb(ahc, SCSISIGI));
2912		break;
2913	}
2914
2915unpause:
2916	/*
2917	 *  The sequencer is paused immediately on
2918	 *  a SEQINT, so we should restart it when
2919	 *  we're done.
2920	 */
2921	unpause_sequencer(ahc);
2922}
2923
2924static void
2925ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
2926{
2927	u_int	scb_index;
2928	u_int	status;
2929	struct	scb *scb;
2930	char	cur_channel;
2931	char	intr_channel;
2932
2933	if ((ahc->features & AHC_TWIN) != 0
2934	 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0))
2935		cur_channel = 'B';
2936	else
2937		cur_channel = 'A';
2938	intr_channel = cur_channel;
2939
2940	status = ahc_inb(ahc, SSTAT1);
2941	if (status == 0) {
2942		if ((ahc->features & AHC_TWIN) != 0) {
2943			/* Try the other channel */
2944		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
2945			status = ahc_inb(ahc, SSTAT1);
2946		 	ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
2947			intr_channel = (cur_channel == 'A') ? 'B' : 'A';
2948		}
2949		if (status == 0) {
2950			printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc));
2951			return;
2952		}
2953	}
2954
2955	scb_index = ahc_inb(ahc, SCB_TAG);
2956	if (scb_index < ahc->scb_data->numscbs) {
2957		scb = &ahc->scb_data->scbarray[scb_index];
2958		if ((scb->flags & SCB_ACTIVE) == 0
2959		 || (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) == 0)
2960			scb = NULL;
2961	} else
2962		scb = NULL;
2963
2964	if ((status & SCSIRSTI) != 0) {
2965		printf("%s: Someone reset channel %c\n",
2966			ahc_name(ahc), intr_channel);
2967		ahc_reset_channel(ahc, intr_channel, /* Initiate Reset */FALSE);
2968	} else if ((status & SCSIPERR) != 0) {
2969		/*
2970		 * Determine the bus phase and queue an appropriate message.
2971		 * SCSIPERR is latched true as soon as a parity error
2972		 * occurs.  If the sequencer acked the transfer that
2973		 * caused the parity error and the currently presented
2974		 * transfer on the bus has correct parity, SCSIPERR will
2975		 * be cleared by CLRSCSIPERR.  Use this to determine if
2976		 * we should look at the last phase the sequencer recorded,
2977		 * or the current phase presented on the bus.
2978		 */
2979		u_int mesg_out;
2980		u_int curphase;
2981		u_int errorphase;
2982		u_int lastphase;
2983		u_int i;
2984
2985		lastphase = ahc_inb(ahc, LASTPHASE);
2986		curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2987		ahc_outb(ahc, CLRSINT1, CLRSCSIPERR);
2988		/*
2989		 * For all phases save DATA, the sequencer won't
2990		 * automatically ack a byte that has a parity error
2991		 * in it.  So the only way that the current phase
2992		 * could be 'data-in' is if the parity error is for
2993		 * an already acked byte in the data phase.  During
2994		 * synchronous data-in transfers, we may actually
2995		 * ack bytes before latching the current phase in
2996		 * LASTPHASE, leading to the discrepancy between
2997		 * curphase and lastphase.
2998		 */
2999		if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0
3000		 || curphase == P_DATAIN)
3001			errorphase = curphase;
3002		else
3003			errorphase = lastphase;
3004
3005		for (i = 0; i < num_phases; i++) {
3006			if (errorphase == phase_table[i].phase)
3007				break;
3008		}
3009		mesg_out = phase_table[i].mesg_out;
3010		if (scb != NULL)
3011			xpt_print_path(scb->ccb->ccb_h.path);
3012		else
3013			printf("%s:%c:%d: ", ahc_name(ahc),
3014			       intr_channel,
3015			       SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID)));
3016
3017		printf("parity error detected %s. "
3018		       "SEQADDR(0x%x) SCSIRATE(0x%x)\n",
3019		       phase_table[i].phasemsg,
3020		       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8),
3021		       ahc_inb(ahc, SCSIRATE));
3022
3023		/*
3024		 * We've set the hardware to assert ATN if we
3025		 * get a parity error on "in" phases, so all we
3026		 * need to do is stuff the message buffer with
3027		 * the appropriate message.  "In" phases have set
3028		 * mesg_out to something other than MSG_NOP.
3029		 */
3030		if (mesg_out != MSG_NOOP) {
3031			if (ahc->msg_type != MSG_TYPE_NONE)
3032				ahc->send_msg_perror = TRUE;
3033			else
3034				ahc_outb(ahc, MSG_OUT, mesg_out);
3035		}
3036		ahc_outb(ahc, CLRINT, CLRSCSIINT);
3037		unpause_sequencer(ahc);
3038	} else if ((status & BUSFREE) != 0
3039		&& (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) {
3040		/*
3041		 * First look at what phase we were last in.
3042		 * If its message out, chances are pretty good
3043		 * that the busfree was in response to one of
3044		 * our abort requests.
3045		 */
3046		u_int lastphase = ahc_inb(ahc, LASTPHASE);
3047		u_int saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
3048		u_int saved_lun = ahc_inb(ahc, SAVED_LUN);
3049		u_int target = SCSIID_TARGET(ahc, saved_scsiid);
3050		u_int initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
3051		char channel = SCSIID_CHANNEL(ahc, saved_scsiid);
3052		int printerror = 1;
3053
3054		ahc_outb(ahc, SCSISEQ,
3055			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
3056		if (lastphase == P_MESGOUT) {
3057			u_int message;
3058			u_int tag;
3059
3060			message = ahc->msgout_buf[ahc->msgout_index - 1];
3061			tag = SCB_LIST_NULL;
3062			switch (message) {
3063			case MSG_ABORT_TAG:
3064				tag = scb->hscb->tag;
3065				/* FALLTRHOUGH */
3066			case MSG_ABORT:
3067				xpt_print_path(scb->ccb->ccb_h.path);
3068				printf("SCB %d - Abort %s Completed.\n",
3069				       scb->hscb->tag, tag == SCB_LIST_NULL ?
3070				       "" : "Tag");
3071				ahc_abort_scbs(ahc, target, channel,
3072					       saved_lun, tag,
3073					       ROLE_INITIATOR,
3074					       CAM_REQ_ABORTED);
3075				printerror = 0;
3076				break;
3077			case MSG_BUS_DEV_RESET:
3078			{
3079				struct ahc_devinfo devinfo;
3080
3081				/*
3082				 * Don't mark the user's request for this BDR
3083				 * as completing with CAM_BDR_SENT.  CAM3
3084				 * specifies CAM_REQ_CMP.
3085				 */
3086				if (scb != NULL
3087				 && scb->ccb->ccb_h.func_code == XPT_RESET_DEV
3088				 && ahc_match_scb(ahc, scb, target, channel,
3089						  saved_lun,
3090						  SCB_LIST_NULL,
3091						  ROLE_INITIATOR)) {
3092					ahcsetccbstatus(scb->ccb, CAM_REQ_CMP);
3093				}
3094				ahc_compile_devinfo(&devinfo,
3095						    initiator_role_id,
3096						    target,
3097						    saved_lun,
3098						    channel,
3099						    ROLE_INITIATOR);
3100				ahc_handle_devreset(ahc, &devinfo,
3101						    CAM_BDR_SENT, AC_SENT_BDR,
3102						    "Bus Device Reset",
3103						    /*verbose_level*/0);
3104				printerror = 0;
3105				break;
3106			}
3107			default:
3108				break;
3109			}
3110		}
3111		if (printerror != 0) {
3112			u_int i;
3113
3114			if (scb != NULL) {
3115				u_int tag;
3116
3117				if ((scb->hscb->control & TAG_ENB) != 0)
3118					tag = scb->hscb->tag;
3119				else
3120					tag = SCB_LIST_NULL;
3121				ahc_abort_scbs(ahc, target, channel,
3122					       SCB_GET_LUN(scb), tag,
3123					       ROLE_INITIATOR,
3124					       CAM_UNEXP_BUSFREE);
3125				xpt_print_path(scb->ccb->ccb_h.path);
3126			} else {
3127				/*
3128				 * We had not fully identified this connection,
3129				 * so we cannot abort anything.
3130				 */
3131				printf("%s: ", ahc_name(ahc));
3132			}
3133			for (i = 0; i < num_phases; i++) {
3134				if (lastphase == phase_table[i].phase)
3135					break;
3136			}
3137			printf("Unexpected busfree %s\n"
3138			       "SEQADDR == 0x%x\n",
3139			       phase_table[i].phasemsg, ahc_inb(ahc, SEQADDR0)
3140				| (ahc_inb(ahc, SEQADDR1) << 8));
3141		}
3142		ahc_clear_msg_state(ahc);
3143		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
3144		ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR);
3145		ahc_outb(ahc, CLRINT, CLRSCSIINT);
3146		restart_sequencer(ahc);
3147	} else if ((status & SELTO) != 0) {
3148		u_int scbptr;
3149
3150		scbptr = ahc_inb(ahc, WAITING_SCBH);
3151		ahc_outb(ahc, SCBPTR, scbptr);
3152		scb_index = ahc_inb(ahc, SCB_TAG);
3153
3154		if (scb_index < ahc->scb_data->numscbs) {
3155			scb = &ahc->scb_data->scbarray[scb_index];
3156			if ((scb->flags & SCB_ACTIVE) == 0)
3157				scb = NULL;
3158		} else
3159			scb = NULL;
3160
3161		if (scb == NULL) {
3162			printf("%s: ahc_intr - referenced scb not "
3163			       "valid during SELTO scb(%d, %d)\n",
3164			       ahc_name(ahc), scbptr, scb_index);
3165		} else {
3166			ahcsetccbstatus(scb->ccb, CAM_SEL_TIMEOUT);
3167			ahc_freeze_devq(ahc, scb->ccb->ccb_h.path);
3168		}
3169		/* Stop the selection */
3170		ahc_outb(ahc, SCSISEQ, 0);
3171
3172		/* No more pending messages */
3173		ahc_clear_msg_state(ahc);
3174
3175		/*
3176		 * Although the driver does not care about the
3177		 * 'Selection in Progress' status bit, the busy
3178		 * LED does.  SELINGO is only cleared by a sucessful
3179		 * selection, so we must manually clear it to insure
3180		 * the LED turns off just incase no future successful
3181		 * selections occur (e.g. no devices on the bus).
3182		 */
3183		ahc_outb(ahc, CLRSINT0, CLRSELINGO);
3184
3185		/* Clear interrupt state */
3186		ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
3187		ahc_outb(ahc, CLRINT, CLRSCSIINT);
3188		restart_sequencer(ahc);
3189	} else {
3190		xpt_print_path(scb->ccb->ccb_h.path);
3191		printf("Unknown SCSIINT. Status = 0x%x\n", status);
3192		ahc_outb(ahc, CLRSINT1, status);
3193		ahc_outb(ahc, CLRINT, CLRSCSIINT);
3194		unpause_sequencer(ahc);
3195	}
3196}
3197
3198static void
3199ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3200{
3201	/*
3202	 * We need to initiate transfer negotiations.
3203	 * If our current and goal settings are identical,
3204	 * we want to renegotiate due to a check condition.
3205	 */
3206	struct	ahc_initiator_tinfo *tinfo;
3207	struct	tmode_tstate *tstate;
3208	struct	ahc_syncrate *rate;
3209	int	dowide;
3210	int	dosync;
3211	int	doppr;
3212	int	use_ppr;
3213	u_int	period;
3214	u_int	ppr_options;
3215	u_int	offset;
3216
3217	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
3218				    devinfo->target, &tstate);
3219	dowide = tinfo->current.width != tinfo->goal.width;
3220	dosync = tinfo->current.period != tinfo->goal.period;
3221	doppr = tinfo->current.ppr_options != tinfo->goal.ppr_options;
3222
3223	if (!dowide && !dosync && !doppr) {
3224		dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
3225		dosync = tinfo->goal.period != 0;
3226		doppr = tinfo->goal.ppr_options != 0;
3227	}
3228
3229	if (!dowide && !dosync && !doppr) {
3230		panic("ahc_intr: AWAITING_MSG for negotiation, "
3231		      "but no negotiation needed\n");
3232	}
3233
3234	use_ppr = (tinfo->current.transport_version >= 3) || doppr;
3235	if (use_ppr) {
3236		ahc_construct_ppr(ahc, tinfo->goal.period, tinfo->goal.offset,
3237				  tinfo->goal.width, tinfo->goal.ppr_options);
3238	} else  if (dowide) {
3239		ahc_construct_wdtr(ahc, tinfo->goal.width);
3240	} else if (dosync) {
3241
3242		period = tinfo->goal.period;
3243		ppr_options = 0;
3244		rate = ahc_devlimited_syncrate(ahc, &period, &ppr_options);
3245		offset = tinfo->goal.offset;
3246		ahc_validate_offset(ahc, rate, &offset,
3247				    tinfo->current.width);
3248		ahc_construct_sdtr(ahc, period, offset);
3249	}
3250}
3251
3252static void
3253ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3254			   struct scb *scb)
3255{
3256	/*
3257	 * To facilitate adding multiple messages together,
3258	 * each routine should increment the index and len
3259	 * variables instead of setting them explicitly.
3260	 */
3261	ahc->msgout_index = 0;
3262	ahc->msgout_len = 0;
3263
3264	if ((scb->flags & SCB_DEVICE_RESET) == 0
3265	 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) {
3266		u_int identify_msg;
3267
3268		identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
3269		if ((scb->hscb->control & DISCENB) != 0)
3270			identify_msg |= MSG_IDENTIFY_DISCFLAG;
3271		ahc->msgout_buf[ahc->msgout_index++] = identify_msg;
3272		ahc->msgout_len++;
3273
3274		if ((scb->hscb->control & TAG_ENB) != 0) {
3275			ahc->msgout_buf[ahc->msgout_index++] =
3276			    scb->ccb->csio.tag_action;
3277			ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag;
3278			ahc->msgout_len += 2;
3279		}
3280	}
3281
3282	if (scb->flags & SCB_DEVICE_RESET) {
3283		ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET;
3284		ahc->msgout_len++;
3285		xpt_print_path(scb->ccb->ccb_h.path);
3286		printf("Bus Device Reset Message Sent\n");
3287	} else if ((scb->flags & SCB_ABORT) != 0) {
3288		if ((scb->hscb->control & TAG_ENB) != 0)
3289			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG;
3290		else
3291			ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT;
3292		ahc->msgout_len++;
3293		xpt_print_path(scb->ccb->ccb_h.path);
3294		printf("Abort Message Sent\n");
3295	} else if ((ahc->targ_msg_req & devinfo->target_mask) != 0
3296		|| (scb->flags & SCB_NEGOTIATE) != 0) {
3297		ahc_build_transfer_msg(ahc, devinfo);
3298	} else {
3299		printf("ahc_intr: AWAITING_MSG for an SCB that "
3300		       "does not have a waiting message\n");
3301		printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
3302		       devinfo->target_mask);
3303		panic("SCB = %d, SCB Control = %x, MSG_OUT = %x "
3304		      "SCB flags = %x", scb->hscb->tag, scb->hscb->control,
3305		      ahc_inb(ahc, MSG_OUT), scb->flags);
3306	}
3307
3308	/*
3309	 * Clear the MK_MESSAGE flag from the SCB so we aren't
3310	 * asked to send this message again.
3311	 */
3312	ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE);
3313	ahc->msgout_index = 0;
3314	ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3315}
3316
3317static void
3318ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3319{
3320	/*
3321	 * To facilitate adding multiple messages together,
3322	 * each routine should increment the index and len
3323	 * variables instead of setting them explicitly.
3324	 */
3325	ahc->msgout_index = 0;
3326	ahc->msgout_len = 0;
3327
3328	if ((ahc->targ_msg_req & devinfo->target_mask) != 0)
3329		ahc_build_transfer_msg(ahc, devinfo);
3330	else
3331		panic("ahc_intr: AWAITING target message with no message");
3332
3333	ahc->msgout_index = 0;
3334	ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
3335}
3336
3337static int
3338ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3339{
3340	/*
3341	 * What we care about here is if we had an
3342	 * outstanding SDTR or WDTR message for this
3343	 * target.  If we did, this is a signal that
3344	 * the target is refusing negotiation.
3345	 */
3346	struct scb *scb;
3347	struct ahc_initiator_tinfo *tinfo;
3348	struct tmode_tstate *tstate;
3349	u_int scb_index;
3350	u_int last_msg;
3351	int   response = 0;
3352
3353	scb_index = ahc_inb(ahc, SCB_TAG);
3354	scb = &ahc->scb_data->scbarray[scb_index];
3355
3356	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
3357				    devinfo->our_scsiid,
3358				    devinfo->target, &tstate);
3359	/* Might be necessary */
3360	last_msg = ahc_inb(ahc, LAST_MSG);
3361
3362	if (ahc_sent_msg(ahc, MSG_EXT_WDTR, /*full*/FALSE)) {
3363
3364		/* note 8bit xfers */
3365		printf("%s:%c:%d: refuses WIDE negotiation.  Using "
3366		       "8bit transfers\n", ahc_name(ahc),
3367		       devinfo->channel, devinfo->target);
3368		ahc_set_width(ahc, devinfo, scb->ccb->ccb_h.path,
3369			      MSG_EXT_WDTR_BUS_8_BIT,
3370			      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3371			      /*paused*/TRUE);
3372		/*
3373		 * No need to clear the sync rate.  If the target
3374		 * did not accept the command, our syncrate is
3375		 * unaffected.  If the target started the negotiation,
3376		 * but rejected our response, we already cleared the
3377		 * sync rate before sending our WDTR.
3378		 */
3379		if (tinfo->goal.period) {
3380			u_int period;
3381			u_int ppr_options;
3382
3383			/* Start the sync negotiation */
3384			period = tinfo->goal.period;
3385			ppr_options = 0;
3386			ahc_devlimited_syncrate(ahc, &period, &ppr_options);
3387			ahc->msgout_index = 0;
3388			ahc->msgout_len = 0;
3389			ahc_construct_sdtr(ahc, period, tinfo->goal.offset);
3390			ahc->msgout_index = 0;
3391			response = 1;
3392		}
3393	} else if (ahc_sent_msg(ahc, MSG_EXT_SDTR, /*full*/FALSE)) {
3394		/* note asynch xfers and clear flag */
3395		ahc_set_syncrate(ahc, devinfo, scb->ccb->ccb_h.path,
3396				 /*syncrate*/NULL, /*period*/0,
3397				 /*offset*/0, /*ppr_options*/0,
3398				 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3399				 /*paused*/TRUE);
3400		printf("%s:%c:%d: refuses synchronous negotiation. "
3401		       "Using asynchronous transfers\n",
3402		       ahc_name(ahc),
3403		       devinfo->channel, devinfo->target);
3404	} else if ((scb->hscb->control & MSG_SIMPLE_Q_TAG) != 0) {
3405		struct	ccb_trans_settings neg;
3406
3407		printf("%s:%c:%d: refuses tagged commands.  Performing "
3408		       "non-tagged I/O\n", ahc_name(ahc),
3409		       devinfo->channel, devinfo->target);
3410
3411		ahc_set_tags(ahc, devinfo, FALSE);
3412		neg.flags = CCB_TRANS_CURRENT_SETTINGS;
3413		neg.valid = CCB_TRANS_TQ_VALID;
3414		xpt_setup_ccb(&neg.ccb_h, scb->ccb->ccb_h.path, /*priority*/1);
3415		xpt_async(AC_TRANSFER_NEG, scb->ccb->ccb_h.path, &neg);
3416
3417		/*
3418		 * Resend the identify for this CCB as the target
3419		 * may believe that the selection is invalid otherwise.
3420		 */
3421		ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL)
3422					  & ~MSG_SIMPLE_Q_TAG);
3423	 	scb->hscb->control &= ~MSG_SIMPLE_Q_TAG;
3424		scb->ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3425		ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG);
3426		ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO);
3427
3428		/*
3429		 * Requeue all tagged commands for this target
3430		 * currently in our posession so they can be
3431		 * converted to untagged commands.
3432		 */
3433		ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
3434				   SCB_GET_CHANNEL(ahc, scb),
3435				   SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
3436				   ROLE_INITIATOR, CAM_REQUEUE_REQ,
3437				   SEARCH_COMPLETE);
3438	} else {
3439		/*
3440		 * Otherwise, we ignore it.
3441		 */
3442		printf("%s:%c:%d: Message reject for %x -- ignored\n",
3443		       ahc_name(ahc), devinfo->channel, devinfo->target,
3444		       last_msg);
3445	}
3446	return (response);
3447}
3448
3449static void
3450ahc_clear_msg_state(struct ahc_softc *ahc)
3451{
3452	ahc->msgout_len = 0;
3453	ahc->msgin_index = 0;
3454	ahc->msg_type = MSG_TYPE_NONE;
3455	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
3456}
3457
3458static void
3459ahc_handle_message_phase(struct ahc_softc *ahc, struct cam_path *path)
3460{
3461	struct	ahc_devinfo devinfo;
3462	u_int	bus_phase;
3463	int	end_session;
3464
3465	ahc_fetch_devinfo(ahc, &devinfo);
3466	end_session = FALSE;
3467	bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
3468
3469reswitch:
3470	switch (ahc->msg_type) {
3471	case MSG_TYPE_INITIATOR_MSGOUT:
3472	{
3473		int lastbyte;
3474		int phasemis;
3475		int msgdone;
3476
3477		if (ahc->msgout_len == 0)
3478			panic("REQINIT interrupt with no active message");
3479
3480		phasemis = bus_phase != P_MESGOUT;
3481		if (phasemis) {
3482			if (bus_phase == P_MESGIN) {
3483				/*
3484				 * Change gears and see if
3485				 * this messages is of interest to
3486				 * us or should be passed back to
3487				 * the sequencer.
3488				 */
3489				ahc_outb(ahc, CLRSINT1, CLRATNO);
3490				ahc->send_msg_perror = FALSE;
3491				ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN;
3492				ahc->msgin_index = 0;
3493				goto reswitch;
3494			}
3495			end_session = TRUE;
3496			break;
3497		}
3498
3499		if (ahc->send_msg_perror) {
3500			ahc_outb(ahc, CLRSINT1, CLRATNO);
3501			ahc_outb(ahc, CLRSINT1, CLRREQINIT);
3502			ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR);
3503			break;
3504		}
3505
3506		msgdone	= ahc->msgout_index == ahc->msgout_len;
3507		if (msgdone) {
3508			/*
3509			 * The target has requested a retry.
3510			 * Re-assert ATN, reset our message index to
3511			 * 0, and try again.
3512			 */
3513			ahc->msgout_index = 0;
3514			ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO);
3515		}
3516
3517		lastbyte = ahc->msgout_index == (ahc->msgout_len - 1);
3518		if (lastbyte) {
3519			/* Last byte is signified by dropping ATN */
3520			ahc_outb(ahc, CLRSINT1, CLRATNO);
3521		}
3522
3523		/*
3524		 * Clear our interrupt status and present
3525		 * the next byte on the bus.
3526		 */
3527		ahc_outb(ahc, CLRSINT1, CLRREQINIT);
3528		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
3529		break;
3530	}
3531	case MSG_TYPE_INITIATOR_MSGIN:
3532	{
3533		int phasemis;
3534		int message_done;
3535
3536		phasemis = bus_phase != P_MESGIN;
3537
3538		if (phasemis) {
3539			ahc->msgin_index = 0;
3540			if (bus_phase == P_MESGOUT
3541			 && (ahc->send_msg_perror == TRUE
3542			  || (ahc->msgout_len != 0
3543			   && ahc->msgout_index == 0))) {
3544				ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
3545				goto reswitch;
3546			}
3547			end_session = TRUE;
3548			break;
3549		}
3550
3551		/* Pull the byte in without acking it */
3552		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL);
3553
3554		message_done = ahc_parse_msg(ahc, path, &devinfo);
3555
3556		if (message_done) {
3557			/*
3558			 * Clear our incoming message buffer in case there
3559			 * is another message following this one.
3560			 */
3561			ahc->msgin_index = 0;
3562
3563			/*
3564			 * If this message illicited a response,
3565			 * assert ATN so the target takes us to the
3566			 * message out phase.
3567			 */
3568			if (ahc->msgout_len != 0)
3569				ahc_outb(ahc, SCSISIGO,
3570					 ahc_inb(ahc, SCSISIGO) | ATNO);
3571		} else
3572			ahc->msgin_index++;
3573
3574		/* Ack the byte */
3575		ahc_outb(ahc, CLRSINT1, CLRREQINIT);
3576		ahc_inb(ahc, SCSIDATL);
3577		break;
3578	}
3579	case MSG_TYPE_TARGET_MSGIN:
3580	{
3581		int msgdone;
3582		int msgout_request;
3583
3584		if (ahc->msgout_len == 0)
3585			panic("Target MSGIN with no active message");
3586
3587		/*
3588		 * If we interrupted a mesgout session, the initiator
3589		 * will not know this until our first REQ.  So, we
3590		 * only honor mesgout requests after we've sent our
3591		 * first byte.
3592		 */
3593		if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0
3594		 && ahc->msgout_index > 0)
3595			msgout_request = TRUE;
3596		else
3597			msgout_request = FALSE;
3598
3599		if (msgout_request) {
3600
3601			/*
3602			 * Change gears and see if
3603			 * this messages is of interest to
3604			 * us or should be passed back to
3605			 * the sequencer.
3606			 */
3607			ahc->msg_type = MSG_TYPE_TARGET_MSGOUT;
3608			ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO);
3609			ahc->msgin_index = 0;
3610			/* Dummy read to REQ for first byte */
3611			ahc_inb(ahc, SCSIDATL);
3612			ahc_outb(ahc, SXFRCTL0,
3613				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
3614			break;
3615		}
3616
3617		msgdone = ahc->msgout_index == ahc->msgout_len;
3618		if (msgdone) {
3619			ahc_outb(ahc, SXFRCTL0,
3620				 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
3621			end_session = TRUE;
3622			break;
3623		}
3624
3625		/*
3626		 * Present the next byte on the bus.
3627		 */
3628		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN);
3629		ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
3630		break;
3631	}
3632	case MSG_TYPE_TARGET_MSGOUT:
3633	{
3634		int lastbyte;
3635		int msgdone;
3636
3637		/*
3638		 * The initiator signals that this is
3639		 * the last byte by dropping ATN.
3640		 */
3641		lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0;
3642
3643		/*
3644		 * Read the latched byte, but turn off SPIOEN first
3645		 * so that we don't inadvertantly cause a REQ for the
3646		 * next byte.
3647		 */
3648		ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
3649		ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL);
3650		msgdone = ahc_parse_msg(ahc, path, &devinfo);
3651		if (msgdone == MSGLOOP_TERMINATED) {
3652			/*
3653			 * The message is *really* done in that it caused
3654			 * us to go to bus free.  The sequencer has already
3655			 * been reset at this point, so pull the ejection
3656			 * handle.
3657			 */
3658			return;
3659		}
3660
3661		ahc->msgin_index++;
3662
3663		/*
3664		 * XXX Read spec about initiator dropping ATN too soon
3665		 *     and use msgdone to detect it.
3666		 */
3667		if (msgdone == MSGLOOP_MSGCOMPLETE) {
3668			ahc->msgin_index = 0;
3669
3670			/*
3671			 * If this message illicited a response, transition
3672			 * to the Message in phase and send it.
3673			 */
3674			if (ahc->msgout_len != 0) {
3675				ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO);
3676				ahc_outb(ahc, SXFRCTL0,
3677					 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
3678				ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
3679				ahc->msgin_index = 0;
3680				break;
3681			}
3682		}
3683
3684		if (lastbyte)
3685			end_session = TRUE;
3686		else {
3687			/* Ask for the next byte. */
3688			ahc_outb(ahc, SXFRCTL0,
3689				 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
3690		}
3691
3692		break;
3693	}
3694	default:
3695		panic("Unknown REQINIT message type");
3696	}
3697
3698	if (end_session) {
3699		ahc_clear_msg_state(ahc);
3700		ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP);
3701	} else
3702		ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
3703}
3704
3705/*
3706 * See if we sent a particular extended message to the target.
3707 * If "full" is true, the target saw the full message.
3708 * If "full" is false, the target saw at least the first
3709 * byte of the message.
3710 */
3711static int
3712ahc_sent_msg(struct ahc_softc *ahc, u_int msgtype, int full)
3713{
3714	int found;
3715	u_int index;
3716
3717	found = FALSE;
3718	index = 0;
3719
3720	while (index < ahc->msgout_len) {
3721		if (ahc->msgout_buf[index] == MSG_EXTENDED) {
3722
3723			/* Found a candidate */
3724			if (ahc->msgout_buf[index+2] == msgtype) {
3725				u_int end_index;
3726
3727				end_index = index + 1
3728					  + ahc->msgout_buf[index + 1];
3729				if (full) {
3730					if (ahc->msgout_index > end_index)
3731						found = TRUE;
3732				} else if (ahc->msgout_index > index)
3733					found = TRUE;
3734			}
3735			break;
3736		} else if (ahc->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
3737			&& ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
3738
3739			/* Skip tag type and tag id or residue param*/
3740			index += 2;
3741		} else {
3742			/* Single byte message */
3743			index++;
3744		}
3745	}
3746	return (found);
3747}
3748
3749static int
3750ahc_parse_msg(struct ahc_softc *ahc, struct cam_path *path,
3751	      struct ahc_devinfo *devinfo)
3752{
3753	struct	ahc_initiator_tinfo *tinfo;
3754	struct	tmode_tstate *tstate;
3755	int	reject;
3756	int	done;
3757	int	response;
3758	u_int	targ_scsirate;
3759
3760	done = MSGLOOP_IN_PROG;
3761	response = FALSE;
3762	reject = FALSE;
3763	tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
3764				    devinfo->target, &tstate);
3765	targ_scsirate = tinfo->scsirate;
3766
3767	/*
3768	 * Parse as much of the message as is availible,
3769	 * rejecting it if we don't support it.  When
3770	 * the entire message is availible and has been
3771	 * handled, return MSGLOOP_MSGCOMPLETE, indicating
3772	 * that we have parsed an entire message.
3773	 *
3774	 * In the case of extended messages, we accept the length
3775	 * byte outright and perform more checking once we know the
3776	 * extended message type.
3777	 */
3778	switch (ahc->msgin_buf[0]) {
3779	case MSG_MESSAGE_REJECT:
3780		response = ahc_handle_msg_reject(ahc, devinfo);
3781		/* FALLTHROUGH */
3782	case MSG_NOOP:
3783		done = MSGLOOP_MSGCOMPLETE;
3784		break;
3785	case MSG_EXTENDED:
3786	{
3787		/* Wait for enough of the message to begin validation */
3788		if (ahc->msgin_index < 2)
3789			break;
3790		switch (ahc->msgin_buf[2]) {
3791		case MSG_EXT_SDTR:
3792		{
3793			struct	 ahc_syncrate *syncrate;
3794			u_int	 period;
3795			u_int	 ppr_options;
3796			u_int	 offset;
3797			u_int	 saved_offset;
3798
3799			if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
3800				reject = TRUE;
3801				break;
3802			}
3803
3804			/*
3805			 * Wait until we have both args before validating
3806			 * and acting on this message.
3807			 *
3808			 * Add one to MSG_EXT_SDTR_LEN to account for
3809			 * the extended message preamble.
3810			 */
3811			if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1))
3812				break;
3813
3814			period = ahc->msgin_buf[3];
3815			ppr_options = 0;
3816			saved_offset = offset = ahc->msgin_buf[4];
3817			syncrate = ahc_devlimited_syncrate(ahc, &period,
3818							   &ppr_options);
3819			ahc_validate_offset(ahc, syncrate, &offset,
3820					    targ_scsirate & WIDEXFER);
3821			ahc_set_syncrate(ahc, devinfo, path,
3822					 syncrate, period,
3823					 offset, ppr_options,
3824					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3825					 /*paused*/TRUE);
3826
3827			/*
3828			 * See if we initiated Sync Negotiation
3829			 * and didn't have to fall down to async
3830			 * transfers.
3831			 */
3832			if (ahc_sent_msg(ahc, MSG_EXT_SDTR, /*full*/TRUE)) {
3833				/* We started it */
3834				if (saved_offset != offset) {
3835					/* Went too low - force async */
3836					reject = TRUE;
3837				}
3838			} else {
3839				/*
3840				 * Send our own SDTR in reply
3841				 */
3842				if (bootverbose)
3843					printf("Sending SDTR!\n");
3844				ahc->msgout_index = 0;
3845				ahc->msgout_len = 0;
3846				ahc_construct_sdtr(ahc, period, offset);
3847				ahc->msgout_index = 0;
3848				response = TRUE;
3849			}
3850			done = MSGLOOP_MSGCOMPLETE;
3851			break;
3852		}
3853		case MSG_EXT_WDTR:
3854		{
3855			u_int bus_width;
3856			u_int saved_width;
3857			u_int sending_reply;
3858
3859			sending_reply = FALSE;
3860			if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
3861				reject = TRUE;
3862				break;
3863			}
3864
3865			/*
3866			 * Wait until we have our arg before validating
3867			 * and acting on this message.
3868			 *
3869			 * Add one to MSG_EXT_WDTR_LEN to account for
3870			 * the extended message preamble.
3871			 */
3872			if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1))
3873				break;
3874
3875			bus_width = ahc->msgin_buf[3];
3876			saved_width = bus_width;
3877			ahc_validate_width(ahc, &bus_width);
3878
3879			if (ahc_sent_msg(ahc, MSG_EXT_WDTR, /*full*/TRUE)) {
3880				/*
3881				 * Don't send a WDTR back to the
3882				 * target, since we asked first.
3883				 * If the width went higher than our
3884				 * request, reject it.
3885				 */
3886				if (saved_width > bus_width) {
3887					reject = TRUE;
3888					printf("%s: target %d requested %dBit "
3889					       "transfers.  Rejecting...\n",
3890					       ahc_name(ahc), devinfo->target,
3891					       8 * (0x01 << bus_width));
3892					bus_width = 0;
3893				}
3894			} else {
3895				/*
3896				 * Send our own WDTR in reply
3897				 */
3898				if (bootverbose)
3899					printf("Sending WDTR!\n");
3900				ahc->msgout_index = 0;
3901				ahc->msgout_len = 0;
3902				ahc_construct_wdtr(ahc, bus_width);
3903				ahc->msgout_index = 0;
3904				response = TRUE;
3905				sending_reply = TRUE;
3906			}
3907			ahc_set_width(ahc, devinfo, path, bus_width,
3908				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3909				      /*paused*/TRUE);
3910
3911			/* After a wide message, we are async */
3912			ahc_set_syncrate(ahc, devinfo, path,
3913					 /*syncrate*/NULL, /*period*/0,
3914					 /*offset*/0, /*ppr_options*/0,
3915					 AHC_TRANS_ACTIVE, /*paused*/TRUE);
3916			if (sending_reply == FALSE && reject == FALSE) {
3917
3918				/* XXX functionalize */
3919				if (tinfo->goal.period) {
3920					struct	ahc_syncrate *rate;
3921					u_int	period;
3922					u_int	ppr;
3923					u_int	offset;
3924
3925					/* Start the sync negotiation */
3926					period = tinfo->goal.period;
3927					ppr = 0;
3928					rate = ahc_devlimited_syncrate(ahc,
3929								       &period,
3930								       &ppr);
3931					offset = tinfo->goal.offset;
3932					ahc_validate_offset(ahc, rate, &offset,
3933							  tinfo->current.width);
3934					ahc->msgout_index = 0;
3935					ahc->msgout_len = 0;
3936					ahc_construct_sdtr(ahc, period, offset);
3937					ahc->msgout_index = 0;
3938					response = TRUE;
3939				}
3940			}
3941			done = MSGLOOP_MSGCOMPLETE;
3942			break;
3943		}
3944		case MSG_EXT_PPR:
3945		{
3946			struct	ahc_syncrate *syncrate;
3947			u_int	period;
3948			u_int	offset;
3949			u_int	bus_width;
3950			u_int	ppr_options;
3951			u_int	saved_width;
3952			u_int	saved_offset;
3953			u_int	saved_ppr_options;
3954
3955			if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) {
3956				reject = TRUE;
3957				break;
3958			}
3959
3960			/*
3961			 * Wait until we have all args before validating
3962			 * and acting on this message.
3963			 *
3964			 * Add one to MSG_EXT_PPR_LEN to account for
3965			 * the extended message preamble.
3966			 */
3967			if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1))
3968				break;
3969
3970			period = ahc->msgin_buf[3];
3971			offset = ahc->msgin_buf[5];
3972			bus_width = ahc->msgin_buf[6];
3973			saved_width = bus_width;
3974			ppr_options = ahc->msgin_buf[7];
3975			/*
3976			 * According to the spec, a DT only
3977			 * period factor with no DT option
3978			 * set implies async.
3979			 */
3980			if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
3981			 && period == 9)
3982				offset = 0;
3983			saved_ppr_options = ppr_options;
3984			saved_offset = offset;
3985
3986			/*
3987			 * Mask out any options we don't support
3988			 * on any controller.  Transfer options are
3989			 * only available if we are negotiating wide.
3990			 */
3991			ppr_options &= MSG_EXT_PPR_DT_REQ;
3992			if (bus_width == 0)
3993				ppr_options = 0;
3994
3995			ahc_validate_width(ahc, &bus_width);
3996			syncrate = ahc_devlimited_syncrate(ahc, &period,
3997							   &ppr_options);
3998			ahc_validate_offset(ahc, syncrate, &offset, bus_width);
3999
4000			if (ahc_sent_msg(ahc, MSG_EXT_PPR, /*full*/TRUE)) {
4001				/*
4002				 * If we are unable to do any of the
4003				 * requested options (we went too low),
4004				 * then we'll have to reject the message.
4005				 */
4006				if (saved_width > bus_width
4007				 || saved_offset != offset
4008				 || saved_ppr_options != ppr_options)
4009					reject = TRUE;
4010			} else {
4011				printf("Target Initated PPR detected!\n");
4012				response = TRUE;
4013			}
4014			ahc_set_syncrate(ahc, devinfo, path,
4015					 syncrate, period,
4016					 offset, ppr_options,
4017					 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
4018					 /*paused*/TRUE);
4019			ahc_set_width(ahc, devinfo, path, bus_width,
4020				      AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
4021				      /*paused*/TRUE);
4022			break;
4023		}
4024		default:
4025			/* Unknown extended message.  Reject it. */
4026			reject = TRUE;
4027			break;
4028		}
4029		break;
4030	}
4031	case MSG_BUS_DEV_RESET:
4032		ahc_handle_devreset(ahc, devinfo,
4033				    CAM_BDR_SENT, AC_SENT_BDR,
4034				    "Bus Device Reset Received",
4035				    /*verbose_level*/0);
4036		restart_sequencer(ahc);
4037		done = MSGLOOP_TERMINATED;
4038		break;
4039	case MSG_ABORT_TAG:
4040	case MSG_ABORT:
4041	case MSG_CLEAR_QUEUE:
4042		/* Target mode messages */
4043		if (devinfo->role != ROLE_TARGET) {
4044			reject = TRUE;
4045			break;
4046		}
4047		ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
4048			       devinfo->lun,
4049			       ahc->msgin_buf[0] == MSG_ABORT_TAG
4050						  ? SCB_LIST_NULL
4051						  : ahc_inb(ahc, INITIATOR_TAG),
4052			       ROLE_TARGET, CAM_REQ_ABORTED);
4053
4054		tstate = ahc->enabled_targets[devinfo->our_scsiid];
4055		if (tstate != NULL) {
4056			struct tmode_lstate* lstate;
4057
4058			lstate = tstate->enabled_luns[devinfo->lun];
4059			if (lstate != NULL) {
4060				ahc_queue_lstate_event(ahc, lstate,
4061						       devinfo->our_scsiid,
4062						       ahc->msgin_buf[0],
4063						       /*arg*/0);
4064				ahc_send_lstate_events(ahc, lstate);
4065			}
4066		}
4067		done = MSGLOOP_MSGCOMPLETE;
4068		break;
4069	case MSG_TERM_IO_PROC:
4070	default:
4071		reject = TRUE;
4072		break;
4073	}
4074
4075	if (reject) {
4076		/*
4077		 * Setup to reject the message.
4078		 */
4079		ahc->msgout_index = 0;
4080		ahc->msgout_len = 1;
4081		ahc->msgout_buf[0] = MSG_MESSAGE_REJECT;
4082		done = MSGLOOP_MSGCOMPLETE;
4083		response = TRUE;
4084	}
4085
4086	if (done != MSGLOOP_IN_PROG && !response)
4087		/* Clear the outgoing message buffer */
4088		ahc->msgout_len = 0;
4089
4090	return (done);
4091}
4092
4093static void
4094ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
4095{
4096	u_int scb_index;
4097	struct scb *scb;
4098
4099	scb_index = ahc_inb(ahc, SCB_TAG);
4100	scb = &ahc->scb_data->scbarray[scb_index];
4101	if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0
4102	 || (scb->ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_IN) {
4103		/*
4104		 * Ignore the message if we haven't
4105		 * seen an appropriate data phase yet.
4106		 */
4107	} else {
4108		/*
4109		 * If the residual occurred on the last
4110		 * transfer and the transfer request was
4111		 * expected to end on an odd count, do
4112		 * nothing.  Otherwise, subtract a byte
4113		 * and update the residual count accordingly.
4114		 */
4115		uint32_t sgptr;
4116
4117		sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
4118		if ((sgptr & SG_LIST_NULL) != 0
4119		 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) {
4120			/*
4121			 * If the residual occurred on the last
4122			 * transfer and the transfer request was
4123			 * expected to end on an odd count, do
4124			 * nothing.
4125			 */
4126		} else {
4127			struct ahc_dma_seg *sg;
4128			uint32_t data_cnt;
4129			uint32_t data_addr;
4130
4131			/* Pull in the rest of the sgptr */
4132			sgptr |= (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
4133			      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
4134			      | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8);
4135			sgptr &= SG_PTR_MASK;
4136			data_cnt = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+2) << 16)
4137				 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+1) << 8)
4138				 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT));
4139
4140			data_addr = (ahc_inb(ahc, SHADDR + 3) << 24)
4141				  | (ahc_inb(ahc, SHADDR + 2) << 16)
4142				  | (ahc_inb(ahc, SHADDR + 1) << 8)
4143				  | (ahc_inb(ahc, SHADDR));
4144
4145			data_cnt += 1;
4146			data_addr -= 1;
4147
4148			sg = ahc_sg_bus_to_virt(scb, sgptr);
4149			/*
4150			 * The residual sg ptr points to the next S/G
4151			 * to load so we must go back one.
4152			 */
4153			sg--;
4154			if (sg != scb->sg_list
4155			 && (sg->len & AHC_SG_LEN_MASK) < data_cnt) {
4156
4157				sg--;
4158				data_cnt = 1 | (sg->len & AHC_DMA_LAST_SEG);
4159				data_addr = sg->addr
4160					  + (sg->len & AHC_SG_LEN_MASK) - 1;
4161
4162				/*
4163				 * Increment sg so it points to the
4164				 * "next" sg.
4165				 */
4166				sg++;
4167				sgptr = ahc_sg_virt_to_bus(scb, sg);
4168				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 3,
4169					 sgptr >> 24);
4170				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 2,
4171					 sgptr >> 16);
4172				ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 1,
4173					 sgptr >> 8);
4174				ahc_outb(ahc, SCB_RESIDUAL_SGPTR, sgptr);
4175			}
4176
4177/* XXX What about high address byte??? */
4178			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24);
4179			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16);
4180			ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8);
4181			ahc_outb(ahc, SCB_RESIDUAL_DATACNT, data_cnt);
4182
4183/* XXX Perhaps better to just keep the saved address in sram */
4184			if ((ahc->features & AHC_ULTRA2) != 0) {
4185				ahc_outb(ahc, HADDR + 3, data_addr >> 24);
4186				ahc_outb(ahc, HADDR + 2, data_addr >> 16);
4187				ahc_outb(ahc, HADDR + 1, data_addr >> 8);
4188				ahc_outb(ahc, HADDR, data_addr);
4189				ahc_outb(ahc, DFCNTRL, PRELOADEN);
4190				ahc_outb(ahc, SXFRCTL0,
4191					 ahc_inb(ahc, SXFRCTL0) | CLRCHN);
4192			} else {
4193				ahc_outb(ahc, SHADDR + 3, data_addr >> 24);
4194				ahc_outb(ahc, SHADDR + 2, data_addr >> 16);
4195				ahc_outb(ahc, SHADDR + 1, data_addr >> 8);
4196				ahc_outb(ahc, SHADDR, data_addr);
4197			}
4198		}
4199	}
4200}
4201
4202static void
4203ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
4204		    cam_status status, ac_code acode, char *message,
4205		    int verbose_level)
4206{
4207	struct cam_path *path;
4208	int found;
4209	int error;
4210	struct tmode_tstate* tstate;
4211	u_int lun;
4212
4213	error = ahc_create_path(ahc, devinfo, &path);
4214
4215	found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
4216			       CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role,
4217			       status);
4218
4219	/*
4220	 * Send an immediate notify ccb to all target more peripheral
4221	 * drivers affected by this action.
4222	 */
4223	tstate = ahc->enabled_targets[devinfo->our_scsiid];
4224	if (tstate != NULL) {
4225		for (lun = 0; lun <= 7; lun++) {
4226			struct tmode_lstate* lstate;
4227
4228			lstate = tstate->enabled_luns[lun];
4229			if (lstate == NULL)
4230				continue;
4231
4232			ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid,
4233					       MSG_BUS_DEV_RESET, /*arg*/0);
4234			ahc_send_lstate_events(ahc, lstate);
4235		}
4236	}
4237
4238	/*
4239	 * Go back to async/narrow transfers and renegotiate.
4240	 * ahc_set_width and ahc_set_syncrate can cope with NULL
4241	 * paths.
4242	 */
4243	ahc_set_width(ahc, devinfo, path, MSG_EXT_WDTR_BUS_8_BIT,
4244		      AHC_TRANS_CUR, /*paused*/TRUE);
4245	ahc_set_syncrate(ahc, devinfo, path, /*syncrate*/NULL,
4246			 /*period*/0, /*offset*/0, /*ppr_options*/0,
4247			 AHC_TRANS_CUR, /*paused*/TRUE);
4248
4249	if (error == CAM_REQ_CMP && acode != 0)
4250		xpt_async(AC_SENT_BDR, path, NULL);
4251
4252	if (error == CAM_REQ_CMP)
4253		xpt_free_path(path);
4254
4255	if (message != NULL
4256	 && (verbose_level <= bootverbose))
4257		printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc),
4258		       message, devinfo->channel, devinfo->target, found);
4259}
4260
4261/*
4262 * We have an scb which has been processed by the
4263 * adaptor, now we look to see how the operation
4264 * went.
4265 */
4266static void
4267ahc_done(struct ahc_softc *ahc, struct scb *scb)
4268{
4269	union ccb *ccb;
4270
4271	CAM_DEBUG(scb->ccb->ccb_h.path, CAM_DEBUG_TRACE,
4272		  ("ahc_done - scb %d\n", scb->hscb->tag));
4273
4274	ccb = scb->ccb;
4275	LIST_REMOVE(&ccb->ccb_h, sim_links.le);
4276	if (ccb->ccb_h.func_code == XPT_SCSI_IO
4277	  && ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
4278	   || ccb->csio.tag_action == CAM_TAG_ACTION_NONE)
4279	  && (ahc->features & AHC_SCB_BTT) == 0) {
4280		struct scb_tailq *untagged_q;
4281
4282		untagged_q = &ahc->untagged_queues[ccb->ccb_h.target_id];
4283		TAILQ_REMOVE(untagged_q, scb, links.tqe);
4284		ahc_run_untagged_queue(ahc, untagged_q);
4285	}
4286
4287	untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch);
4288
4289	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
4290		bus_dmasync_op_t op;
4291
4292		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
4293			op = BUS_DMASYNC_POSTREAD;
4294		else
4295			op = BUS_DMASYNC_POSTWRITE;
4296		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
4297		bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
4298	}
4299
4300	if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
4301		if (ahc_ccb_status(ccb) == CAM_REQ_INPROG)
4302			ccb->ccb_h.status |= CAM_REQ_CMP;
4303		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4304		ahcfreescb(ahc, scb);
4305		xpt_done(ccb);
4306		return;
4307	}
4308
4309	/*
4310	 * If the recovery SCB completes, we have to be
4311	 * out of our timeout.
4312	 */
4313	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
4314
4315		struct	ccb_hdr *ccbh;
4316
4317		/*
4318		 * We were able to complete the command successfully,
4319		 * so reinstate the timeouts for all other pending
4320		 * commands.
4321		 */
4322		ccbh = ahc->pending_ccbs.lh_first;
4323		while (ccbh != NULL) {
4324			struct scb *pending_scb;
4325
4326			pending_scb = (struct scb *)ccbh->ccb_scb_ptr;
4327			ccbh->timeout_ch =
4328			    timeout(ahc_timeout, pending_scb,
4329				    (ccbh->timeout * hz)/1000);
4330			ccbh = LIST_NEXT(ccbh, sim_links.le);
4331		}
4332
4333		/*
4334		 * Ensure that we didn't put a second instance of this
4335		 * SCB into the QINFIFO.
4336		 */
4337		ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
4338				   SCB_GET_CHANNEL(ahc, scb),
4339				   SCB_GET_LUN(scb), scb->hscb->tag,
4340				   ROLE_INITIATOR, /*status*/0,
4341				   SEARCH_REMOVE);
4342		if (ahc_ccb_status(ccb) == CAM_BDR_SENT
4343		 || ahc_ccb_status(ccb) == CAM_REQ_ABORTED)
4344			ahcsetccbstatus(ccb, CAM_CMD_TIMEOUT);
4345		xpt_print_path(ccb->ccb_h.path);
4346		printf("no longer in timeout, status = %x\n",
4347		       ccb->ccb_h.status);
4348	}
4349
4350	/* Don't clobber any existing error state */
4351	if (ahc_ccb_status(ccb) == CAM_REQ_INPROG) {
4352		ccb->ccb_h.status |= CAM_REQ_CMP;
4353	} else if ((scb->flags & SCB_SENSE) != 0) {
4354		/*
4355		 * We performed autosense retrieval.
4356		 *
4357		 * bzero the sense data before having
4358		 * the drive fill it.  The SCSI spec mandates
4359		 * that any untransfered data should be
4360		 * assumed to be zero.  Complete the 'bounce'
4361		 * of sense information through buffers accessible
4362		 * via bus-space by copying it into the clients
4363		 * csio.
4364		 */
4365		bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
4366		bcopy(&ahc->scb_data->sense[scb->hscb->tag],
4367		      &ccb->csio.sense_data,
4368		      scb->sg_list->len & AHC_SG_LEN_MASK);
4369		scb->ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
4370	}
4371	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4372	ahcfreescb(ahc, scb);
4373	xpt_done(ccb);
4374}
4375
4376/*
4377 * Determine the number of SCBs available on the controller
4378 */
4379int
4380ahc_probe_scbs(struct ahc_softc *ahc) {
4381	int i;
4382
4383	for (i = 0; i < AHC_SCB_MAX; i++) {
4384		ahc_outb(ahc, SCBPTR, i);
4385		ahc_outb(ahc, SCB_CONTROL, i);
4386		if (ahc_inb(ahc, SCB_CONTROL) != i)
4387			break;
4388		ahc_outb(ahc, SCBPTR, 0);
4389		if (ahc_inb(ahc, SCB_CONTROL) != 0)
4390			break;
4391	}
4392	return (i);
4393}
4394
4395/*
4396 * Start the board, ready for normal operation
4397 */
4398int
4399ahc_init(struct ahc_softc *ahc)
4400{
4401	int	  max_targ = 15;
4402	int	  i;
4403	int	  term;
4404	u_int	  scsi_conf;
4405	u_int	  scsiseq_template;
4406	u_int	  ultraenb;
4407	u_int	  discenable;
4408	u_int	  tagenable;
4409	size_t	  driver_data_size;
4410	uint32_t physaddr;
4411
4412#ifdef AHC_PRINT_SRAM
4413	printf("Scratch Ram:");
4414	for (i = 0x20; i < 0x5f; i++) {
4415		if (((i % 8) == 0) && (i != 0)) {
4416			printf ("\n              ");
4417		}
4418		printf (" 0x%x", ahc_inb(ahc, i));
4419	}
4420	if ((ahc->features & AHC_MORE_SRAM) != 0) {
4421		for (i = 0x70; i < 0x7f; i++) {
4422			if (((i % 8) == 0) && (i != 0)) {
4423				printf ("\n              ");
4424			}
4425			printf (" 0x%x", ahc_inb(ahc, i));
4426		}
4427	}
4428	printf ("\n");
4429#endif
4430
4431	/*
4432	 * Assume we have a board at this stage and it has been reset.
4433	 */
4434	if ((ahc->flags & AHC_USEDEFAULTS) != 0)
4435		ahc->our_id = ahc->our_id_b = 7;
4436
4437	/*
4438	 * Default to allowing initiator operations.
4439	 */
4440	ahc->flags |= AHC_INITIATORMODE;
4441
4442	/*
4443	 * XXX Would be better to use a per device flag, but PCI and EISA
4444	 *     devices don't have them yet.
4445	 */
4446	if ((AHC_TMODE_ENABLE & (0x01 << ahc->unit)) != 0) {
4447		ahc->flags |= AHC_TARGETMODE;
4448		/*
4449		 * Although we have space for both the initiator and
4450		 * target roles on ULTRA2 chips, we currently disable
4451		 * the initiator role to allow multi-scsi-id target mode
4452		 * configurations.  We can only respond on the same SCSI
4453		 * ID as our initiator role if we allow initiator operation.
4454		 * At some point, we should add a configuration knob to
4455		 * allow both roles to be loaded.
4456		 */
4457		ahc->flags &= ~AHC_INITIATORMODE;
4458	}
4459
4460	/* DMA tag for mapping buffers into device visible space. */
4461	if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0,
4462			       /*lowaddr*/BUS_SPACE_MAXADDR,
4463			       /*highaddr*/BUS_SPACE_MAXADDR,
4464			       /*filter*/NULL, /*filterarg*/NULL,
4465			       /*maxsize*/MAXBSIZE, /*nsegments*/AHC_NSEG,
4466			       /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
4467			       /*flags*/BUS_DMA_ALLOCNOW,
4468			       &ahc->buffer_dmat) != 0) {
4469		return (ENOMEM);
4470	}
4471
4472	ahc->init_level++;
4473
4474	/*
4475	 * DMA tag for our command fifos and other data in system memory
4476	 * the card's sequencer must be able to access.  For initiator
4477	 * roles, we need to allocate space for the the qinfifo and qoutfifo.
4478	 * The qinfifo and qoutfifo are composed of 256 1 byte elements.
4479	 * When providing for the target mode role, we additionally must
4480	 * provide space for the incoming target command fifo and an extra
4481	 * byte to deal with a dma bug in some chip versions.
4482	 */
4483	driver_data_size = 2 * 256 * sizeof(uint8_t);
4484	if ((ahc->flags & AHC_TARGETMODE) != 0)
4485		driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd)
4486				 + /*DMA WideOdd Bug Buffer*/1;
4487	if (bus_dma_tag_create(ahc->parent_dmat, /*alignment*/1, /*boundary*/0,
4488			       /*lowaddr*/BUS_SPACE_MAXADDR,
4489			       /*highaddr*/BUS_SPACE_MAXADDR,
4490			       /*filter*/NULL, /*filterarg*/NULL,
4491			       driver_data_size,
4492			       /*nsegments*/1,
4493			       /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4494			       /*flags*/0, &ahc->shared_data_dmat) != 0) {
4495		return (ENOMEM);
4496	}
4497
4498	ahc->init_level++;
4499
4500	/* Allocation of driver data */
4501	if (bus_dmamem_alloc(ahc->shared_data_dmat,
4502			     (void **)&ahc->qoutfifo,
4503			     BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) {
4504		return (ENOMEM);
4505	}
4506
4507	ahc->init_level++;
4508
4509        /* And permanently map it in */
4510	bus_dmamap_load(ahc->shared_data_dmat, ahc->shared_data_dmamap,
4511			ahc->qoutfifo, driver_data_size, ahcdmamapcb,
4512			&ahc->shared_data_busaddr, /*flags*/0);
4513
4514	if ((ahc->flags & AHC_TARGETMODE) != 0) {
4515		ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo;
4516		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256];
4517		ahc->dma_bug_buf = ahc->shared_data_busaddr
4518				 + driver_data_size - 1;
4519		/* All target command blocks start out invalid. */
4520		for (i = 0; i < AHC_TMODE_CMDS; i++)
4521			ahc->targetcmds[i].cmd_valid = 0;
4522		ahc->tqinfifonext = 1;
4523		ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1);
4524		ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
4525		ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256];
4526	}
4527	ahc->qinfifo = &ahc->qoutfifo[256];
4528
4529	ahc->init_level++;
4530
4531	/* Allocate SCB data now that buffer_dmat is initialized */
4532	if (ahc->scb_data->maxhscbs == 0)
4533		if (ahcinitscbdata(ahc) != 0)
4534			return (ENOMEM);
4535
4536	/*
4537	 * Allocate a tstate to house information for our
4538	 * initiator presence on the bus as well as the user
4539	 * data for any target mode initiator.
4540	 */
4541	if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) {
4542		printf("%s: unable to allocate tmode_tstate.  "
4543		       "Failing attach\n", ahc_name(ahc));
4544		return (-1);
4545	}
4546
4547	if ((ahc->features & AHC_TWIN) != 0) {
4548		if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) {
4549			printf("%s: unable to allocate tmode_tstate.  "
4550			       "Failing attach\n", ahc_name(ahc));
4551			return (-1);
4552		}
4553 		printf("Twin Channel, A SCSI Id=%d, B SCSI Id=%d, primary %c, ",
4554		       ahc->our_id, ahc->our_id_b,
4555		       ahc->flags & AHC_CHANNEL_B_PRIMARY? 'B': 'A');
4556	} else {
4557		if ((ahc->features & AHC_WIDE) != 0) {
4558			printf("Wide ");
4559		} else {
4560			printf("Single ");
4561		}
4562		printf("Channel %c, SCSI Id=%d, ", ahc->channel, ahc->our_id);
4563	}
4564
4565	ahc_outb(ahc, SEQ_FLAGS, 0);
4566
4567	if (ahc->scb_data->maxhscbs < AHC_SCB_MAX) {
4568		ahc->flags |= AHC_PAGESCBS;
4569		printf("%d/%d SCBs\n", ahc->scb_data->maxhscbs, AHC_SCB_MAX);
4570	} else {
4571		ahc->flags &= ~AHC_PAGESCBS;
4572		printf("%d SCBs\n", ahc->scb_data->maxhscbs);
4573	}
4574
4575#ifdef AHC_DEBUG
4576	if (ahc_debug & AHC_SHOWMISC) {
4577		printf("%s: hardware scb %d bytes; kernel scb %d bytes; "
4578		       "ahc_dma %d bytes\n",
4579			ahc_name(ahc),
4580		        sizeof(struct hardware_scb),
4581			sizeof(struct scb),
4582			sizeof(struct ahc_dma_seg));
4583	}
4584#endif /* AHC_DEBUG */
4585
4586	/* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/
4587	if (ahc->features & AHC_TWIN) {
4588
4589		/*
4590		 * The device is gated to channel B after a chip reset,
4591		 * so set those values first
4592		 */
4593		term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0;
4594		if ((ahc->features & AHC_ULTRA2) != 0)
4595			ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id_b);
4596		else
4597			ahc_outb(ahc, SCSIID, ahc->our_id_b);
4598		scsi_conf = ahc_inb(ahc, SCSICONF + 1);
4599		ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4600					|term|ENSTIMER|ACTNEGEN);
4601		ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4602		ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4603
4604		if ((scsi_conf & RESET_SCSI) != 0
4605		 && (ahc->flags & AHC_INITIATORMODE) != 0)
4606			ahc->flags |= AHC_RESET_BUS_B;
4607
4608		/* Select Channel A */
4609		ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4610	}
4611	term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0;
4612	if ((ahc->features & AHC_ULTRA2) != 0)
4613		ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
4614	else
4615		ahc_outb(ahc, SCSIID, ahc->our_id);
4616	scsi_conf = ahc_inb(ahc, SCSICONF);
4617	ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4618				|term
4619				|ENSTIMER|ACTNEGEN);
4620	ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4621	ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4622
4623	if ((scsi_conf & RESET_SCSI) != 0
4624	 && (ahc->flags & AHC_INITIATORMODE) != 0)
4625		ahc->flags |= AHC_RESET_BUS_A;
4626
4627	/*
4628	 * Look at the information that board initialization or
4629	 * the board bios has left us.
4630	 */
4631	ultraenb = 0;
4632	tagenable = ALL_TARGETS_MASK;
4633
4634	/* Grab the disconnection disable table and invert it for our needs */
4635	if (ahc->flags & AHC_USEDEFAULTS) {
4636		printf("%s: Host Adapter Bios disabled.  Using default SCSI "
4637			"device parameters\n", ahc_name(ahc));
4638		ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B|
4639			      AHC_TERM_ENB_A|AHC_TERM_ENB_B;
4640		discenable = ALL_TARGETS_MASK;
4641		if ((ahc->features & AHC_ULTRA) != 0)
4642			ultraenb = ALL_TARGETS_MASK;
4643	} else {
4644		discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8)
4645			   | ahc_inb(ahc, DISC_DSB));
4646		if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0)
4647			ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8)
4648				      | ahc_inb(ahc, ULTRA_ENB);
4649	}
4650
4651	if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
4652		max_targ = 7;
4653
4654	for (i = 0; i <= max_targ; i++) {
4655		struct ahc_initiator_tinfo *tinfo;
4656		struct tmode_tstate *tstate;
4657		u_int our_id;
4658		u_int target_id;
4659		char channel;
4660
4661		channel = 'A';
4662		our_id = ahc->our_id;
4663		target_id = i;
4664		if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
4665			channel = 'B';
4666			our_id = ahc->our_id_b;
4667			target_id = i % 8;
4668		}
4669		tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
4670					    target_id, &tstate);
4671		/* Default to async narrow across the board */
4672		bzero(tinfo, sizeof(*tinfo));
4673		if (ahc->flags & AHC_USEDEFAULTS) {
4674			if ((ahc->features & AHC_WIDE) != 0)
4675				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4676
4677			/*
4678			 * These will be truncated when we determine the
4679			 * connection type we have with the target.
4680			 */
4681			tinfo->user.period = ahc_syncrates->period;
4682			tinfo->user.offset = ~0;
4683		} else {
4684			u_int scsirate;
4685			uint16_t mask;
4686
4687			/* Take the settings leftover in scratch RAM. */
4688			scsirate = ahc_inb(ahc, TARG_SCSIRATE + i);
4689			mask = (0x01 << i);
4690			if ((ahc->features & AHC_ULTRA2) != 0) {
4691				u_int offset;
4692				u_int maxsync;
4693
4694				if ((scsirate & SOFS) == 0x0F) {
4695					/*
4696					 * Haven't negotiated yet,
4697					 * so the format is different.
4698					 */
4699					scsirate = (scsirate & SXFR) >> 4
4700						 | (ultraenb & mask)
4701						  ? 0x08 : 0x0
4702						 | (scsirate & WIDEXFER);
4703					offset = MAX_OFFSET_ULTRA2;
4704				} else
4705					offset = ahc_inb(ahc, TARG_OFFSET + i);
4706				maxsync = AHC_SYNCRATE_ULTRA2;
4707				if ((ahc->features & AHC_DT) != 0)
4708					maxsync = AHC_SYNCRATE_DT;
4709				tinfo->user.period =
4710				    ahc_find_period(ahc, scsirate, maxsync);
4711				if (offset == 0)
4712					tinfo->user.period = 0;
4713				else
4714					tinfo->user.offset = ~0;
4715				if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/
4716				 && (ahc->features & AHC_DT) != 0)
4717					tinfo->user.ppr_options =
4718					    MSG_EXT_PPR_DT_REQ;
4719			} else if ((scsirate & SOFS) != 0) {
4720				tinfo->user.period =
4721				    ahc_find_period(ahc, scsirate,
4722						    (ultraenb & mask)
4723						   ? AHC_SYNCRATE_ULTRA
4724						   : AHC_SYNCRATE_FAST);
4725				if (tinfo->user.period != 0)
4726					tinfo->user.offset = ~0;
4727			}
4728			if ((scsirate & WIDEXFER) != 0
4729			 && (ahc->features & AHC_WIDE) != 0)
4730				tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4731			tinfo->user.protocol_version = 4;
4732			if ((ahc->features & AHC_DT) != 0)
4733				tinfo->user.transport_version = 3;
4734			else
4735				tinfo->user.transport_version = 2;
4736			tinfo->goal.protocol_version = 2;
4737			tinfo->goal.transport_version = 2;
4738			tinfo->current.protocol_version = 2;
4739			tinfo->current.transport_version = 2;
4740		}
4741		tstate->ultraenb = ultraenb;
4742		tstate->discenable = discenable;
4743		tstate->tagenable = 0; /* Wait until the XPT says its okay */
4744	}
4745	ahc->user_discenable = discenable;
4746	ahc->user_tagenable = tagenable;
4747
4748	/* There are no untagged SCBs active yet. */
4749	for (i = 0; i < 16; i++) {
4750		ahc_index_busy_tcl(ahc, BUILD_TCL(i << 4, 0), /*unbusy*/TRUE);
4751		if ((ahc->features & AHC_SCB_BTT) != 0) {
4752			int lun;
4753
4754			/*
4755			 * The SCB based BTT allows an entry per
4756			 * target and lun pair.
4757			 */
4758			for (lun = 1; lun < AHC_NUM_LUNS; lun++) {
4759				ahc_index_busy_tcl(ahc,
4760						   BUILD_TCL(i << 4, lun),
4761						   /*unbusy*/TRUE);
4762			}
4763		}
4764	}
4765
4766	/* All of our queues are empty */
4767	for (i = 0; i < 256; i++)
4768		ahc->qoutfifo[i] = SCB_LIST_NULL;
4769
4770	for (i = 0; i < 256; i++)
4771		ahc->qinfifo[i] = SCB_LIST_NULL;
4772
4773	if ((ahc->features & AHC_MULTI_TID) != 0) {
4774		ahc_outb(ahc, TARGID, 0);
4775		ahc_outb(ahc, TARGID + 1, 0);
4776	}
4777
4778	/*
4779	 * Tell the sequencer where it can find our arrays in memory.
4780	 */
4781	physaddr = ahc->scb_data->hscb_busaddr;
4782	ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF);
4783	ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF);
4784	ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF);
4785	ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF);
4786
4787	physaddr = ahc->shared_data_busaddr;
4788	ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF);
4789	ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF);
4790	ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF);
4791	ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF);
4792
4793	/*
4794	 * Initialize the group code to command length table.
4795	 * This overrides the values in TARG_SCSIRATE, so only
4796	 * setup the table after we have processed that information.
4797	 */
4798	ahc_outb(ahc, CMDSIZE_TABLE, 5);
4799	ahc_outb(ahc, CMDSIZE_TABLE + 1, 9);
4800	ahc_outb(ahc, CMDSIZE_TABLE + 2, 9);
4801	ahc_outb(ahc, CMDSIZE_TABLE + 3, 0);
4802	ahc_outb(ahc, CMDSIZE_TABLE + 4, 15);
4803	ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
4804	ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
4805	ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
4806
4807	/* Tell the sequencer of our initial queue positions */
4808	ahc_outb(ahc, KERNEL_QINPOS, 0);
4809	ahc_outb(ahc, QINPOS, 0);
4810	ahc_outb(ahc, QOUTPOS, 0);
4811
4812	/* Don't have any special messages to send to targets */
4813	ahc_outb(ahc, TARGET_MSG_REQUEST, 0);
4814	ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 0);
4815
4816	/*
4817	 * Use the built in queue management registers
4818	 * if they are available.
4819	 */
4820	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4821		ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256);
4822		ahc_outb(ahc, SDSCB_QOFF, 0);
4823		ahc_outb(ahc, SNSCB_QOFF, 0);
4824		ahc_outb(ahc, HNSCB_QOFF, 0);
4825	}
4826
4827
4828	/* We don't have any waiting selections */
4829	ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL);
4830
4831	/* Our disconnection list is empty too */
4832	ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL);
4833
4834	/* Message out buffer starts empty */
4835	ahc_outb(ahc, MSG_OUT, MSG_NOOP);
4836
4837	/*
4838	 * Setup the allowed SCSI Sequences based on operational mode.
4839	 * If we are a target, we'll enalbe select in operations once
4840	 * we've had a lun enabled.
4841	 */
4842	scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP;
4843	if ((ahc->flags & AHC_INITIATORMODE) != 0)
4844		scsiseq_template |= ENRSELI;
4845	ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template);
4846
4847	/*
4848	 * Load the Sequencer program and Enable the adapter
4849	 * in "fast" mode.
4850         */
4851	if (bootverbose)
4852		printf("%s: Downloading Sequencer Program...",
4853		       ahc_name(ahc));
4854
4855	ahc_loadseq(ahc);
4856
4857	/* We have to wait until after any system dumps... */
4858	EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown,
4859			      ahc, SHUTDOWN_PRI_DEFAULT);
4860
4861	if ((ahc->features & AHC_ULTRA2) != 0) {
4862		int wait;
4863
4864		/*
4865		 * Wait for up to 500ms for our transceivers
4866		 * to settle.  If the adapter does not have
4867		 * a cable attached, the tranceivers may
4868		 * never settle, so don't complain if we
4869		 * fail here.
4870		 */
4871		pause_sequencer(ahc);
4872		for (wait = 5000;
4873		     (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
4874		     wait--)
4875			DELAY(100);
4876		unpause_sequencer(ahc);
4877	}
4878
4879	return (0);
4880}
4881
4882static cam_status
4883ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb,
4884		    struct tmode_tstate **tstate, struct tmode_lstate **lstate,
4885		    int notfound_failure)
4886{
4887	u_int our_id;
4888
4889	/*
4890	 * If we are not configured for target mode, someone
4891	 * is really confused to be sending this to us.
4892	 */
4893	if ((ahc->flags & AHC_TARGETMODE) == 0)
4894		return (CAM_REQ_INVALID);
4895
4896	/* Range check target and lun */
4897
4898	/*
4899	 * Handle the 'black hole' device that sucks up
4900	 * requests to unattached luns on enabled targets.
4901	 */
4902	if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
4903	 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4904		*tstate = NULL;
4905		*lstate = ahc->black_hole;
4906	} else {
4907		u_int max_id;
4908
4909		if (cam_sim_bus(sim) == 0)
4910			our_id = ahc->our_id;
4911		else
4912			our_id = ahc->our_id_b;
4913
4914		max_id = (ahc->features & AHC_WIDE) ? 15 : 7;
4915		if (ccb->ccb_h.target_id > max_id)
4916			return (CAM_TID_INVALID);
4917
4918		if (ccb->ccb_h.target_lun > 7)
4919			return (CAM_LUN_INVALID);
4920
4921		if (ccb->ccb_h.target_id != our_id) {
4922			if ((ahc->features & AHC_MULTI_TID) != 0) {
4923				/*
4924				 * Only allow additional targets if
4925				 * the initiator role is disabled.
4926				 * The hardware cannot handle a re-select-in
4927				 * on the initiator id during a re-select-out
4928				 * on a different target id.
4929				 */
4930			   	if ((ahc->flags & AHC_INITIATORMODE) != 0)
4931					return (CAM_TID_INVALID);
4932			} else {
4933				/*
4934				 * Only allow our target id to change
4935				 * if the initiator role is not configured
4936				 * and there are no enabled luns which
4937				 * are attached to the currently registered
4938				 * scsi id.
4939				 */
4940			   	if ((ahc->flags & AHC_INITIATORMODE) != 0
4941				 || ahc->enabled_luns > 0)
4942					return (CAM_TID_INVALID);
4943			}
4944		}
4945
4946		*tstate = ahc->enabled_targets[ccb->ccb_h.target_id];
4947		*lstate = NULL;
4948		if (*tstate != NULL)
4949			*lstate =
4950			    (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
4951	}
4952
4953	if (notfound_failure != 0 && *lstate == NULL)
4954		return (CAM_PATH_INVALID);
4955
4956	return (CAM_REQ_CMP);
4957}
4958
4959static void
4960ahc_action(struct cam_sim *sim, union ccb *ccb)
4961{
4962	struct	ahc_softc *ahc;
4963	struct	tmode_lstate *lstate;
4964	u_int	target_id;
4965	u_int	our_id;
4966	int	s;
4967
4968	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n"));
4969
4970	ahc = (struct ahc_softc *)cam_sim_softc(sim);
4971
4972	target_id = ccb->ccb_h.target_id;
4973	our_id = SIM_SCSI_ID(ahc, sim);
4974
4975	switch (ccb->ccb_h.func_code) {
4976	/* Common cases first */
4977	case XPT_ACCEPT_TARGET_IO:	/* Accept Host Target Mode CDB */
4978	case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
4979	{
4980		struct	   tmode_tstate *tstate;
4981		cam_status status;
4982
4983		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
4984					     &lstate, TRUE);
4985
4986		if (status != CAM_REQ_CMP) {
4987			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
4988				/* Response from the black hole device */
4989				tstate = NULL;
4990				lstate = ahc->black_hole;
4991			} else {
4992				ccb->ccb_h.status = status;
4993				xpt_done(ccb);
4994				break;
4995			}
4996		}
4997		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4998			int s;
4999
5000			s = splcam();
5001			SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
5002					  sim_links.sle);
5003			ccb->ccb_h.status = CAM_REQ_INPROG;
5004			if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0)
5005				ahc_run_tqinfifo(ahc, /*paused*/FALSE);
5006			splx(s);
5007			break;
5008		}
5009
5010		/*
5011		 * The target_id represents the target we attempt to
5012		 * select.  In target mode, this is the initiator of
5013		 * the original command.
5014		 */
5015		our_id = target_id;
5016		target_id = ccb->csio.init_id;
5017		/* FALLTHROUGH */
5018	}
5019	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
5020	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
5021	{
5022		struct	   scb *scb;
5023		struct	   hardware_scb *hscb;
5024		struct	   ahc_initiator_tinfo *tinfo;
5025		struct	   tmode_tstate *tstate;
5026		uint16_t  mask;
5027
5028		/*
5029		 * get an scb to use.
5030		 */
5031		if ((scb = ahcgetscb(ahc)) == NULL) {
5032			int s;
5033
5034			s = splcam();
5035			ahc->flags |= AHC_RESOURCE_SHORTAGE;
5036			splx(s);
5037			xpt_freeze_simq(ahc->sim, /*count*/1);
5038			ahcsetccbstatus(ccb, CAM_REQUEUE_REQ);
5039			xpt_done(ccb);
5040			return;
5041		}
5042
5043		hscb = scb->hscb;
5044
5045		CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
5046			  ("start scb(%p)\n", scb));
5047		scb->ccb = ccb;
5048		/*
5049		 * So we can find the SCB when an abort is requested
5050		 */
5051		ccb->ccb_h.ccb_scb_ptr = scb;
5052		ccb->ccb_h.ccb_ahc_ptr = ahc;
5053
5054		/*
5055		 * Put all the arguments for the xfer in the scb
5056		 */
5057		hscb->control = 0;
5058		hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id);
5059		hscb->lun = ccb->ccb_h.target_lun;
5060		mask = SCB_GET_TARGET_MASK(ahc, scb);
5061		tinfo = ahc_fetch_transinfo(ahc, SIM_CHANNEL(ahc, sim), our_id,
5062					    target_id, &tstate);
5063
5064		hscb->scsirate = tinfo->scsirate;
5065		hscb->scsioffset = tinfo->current.offset;
5066		if ((tstate->ultraenb & mask) != 0)
5067			hscb->control |= ULTRAENB;
5068
5069		if ((tstate->discenable & mask) != 0
5070		 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
5071			hscb->control |= DISCENB;
5072
5073		if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
5074		 && (tinfo->current.width != 0 || tinfo->current.period != 0)) {
5075			scb->flags |= SCB_NEGOTIATE;
5076			hscb->control |= MK_MESSAGE;
5077		}
5078
5079		if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
5080			hscb->cdb_len = 0;
5081			scb->flags |= SCB_DEVICE_RESET;
5082			hscb->control |= MK_MESSAGE;
5083			ahc_execute_scb(scb, NULL, 0, 0);
5084		} else {
5085			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
5086				struct target_data *tdata;
5087
5088				tdata = &hscb->shared_data.tdata;
5089				if (ahc->pending_device == lstate) {
5090					scb->flags |= SCB_TARGET_IMMEDIATE;
5091					ahc->pending_device = NULL;
5092				}
5093				hscb->control |= TARGET_SCB;
5094				tdata->target_phases = IDENTIFY_SEEN;
5095				if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
5096					tdata->target_phases |= SPHASE_PENDING;
5097					tdata->scsi_status =
5098					    ccb->csio.scsi_status;
5099				}
5100				tdata->initiator_tag = ccb->csio.tag_id;
5101			}
5102			if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
5103				hscb->control |= ccb->csio.tag_action;
5104
5105			ahc_setup_data(ahc, &ccb->csio, scb);
5106		}
5107		break;
5108	}
5109	case XPT_NOTIFY_ACK:
5110	case XPT_IMMED_NOTIFY:
5111	{
5112		struct	   tmode_tstate *tstate;
5113		struct	   tmode_lstate *lstate;
5114		cam_status status;
5115
5116		status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate,
5117					     &lstate, TRUE);
5118
5119		if (status != CAM_REQ_CMP) {
5120			ccb->ccb_h.status = status;
5121			xpt_done(ccb);
5122			break;
5123		}
5124		SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
5125				  sim_links.sle);
5126		ccb->ccb_h.status = CAM_REQ_INPROG;
5127		ahc_send_lstate_events(ahc, lstate);
5128		break;
5129	}
5130	case XPT_EN_LUN:		/* Enable LUN as a target */
5131		ahc_handle_en_lun(ahc, sim, ccb);
5132		xpt_done(ccb);
5133		break;
5134	case XPT_ABORT:			/* Abort the specified CCB */
5135	{
5136		ahc_abort_ccb(ahc, sim, ccb);
5137		break;
5138	}
5139	case XPT_SET_TRAN_SETTINGS:
5140	{
5141		struct	  ahc_devinfo devinfo;
5142		struct	  ccb_trans_settings *cts;
5143		struct	  ahc_initiator_tinfo *tinfo;
5144		struct	  tmode_tstate *tstate;
5145		uint16_t *discenable;
5146		uint16_t *tagenable;
5147		u_int	  update_type;
5148		int	  s;
5149
5150		cts = &ccb->cts;
5151		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
5152				    cts->ccb_h.target_id,
5153				    cts->ccb_h.target_lun,
5154				    SIM_CHANNEL(ahc, sim),
5155				    ROLE_UNKNOWN);
5156		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
5157					    devinfo.our_scsiid,
5158					    devinfo.target, &tstate);
5159		update_type = 0;
5160		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
5161			update_type |= AHC_TRANS_GOAL;
5162			discenable = &tstate->discenable;
5163			tagenable = &tstate->tagenable;
5164		} else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
5165			update_type |= AHC_TRANS_USER;
5166			discenable = &ahc->user_discenable;
5167			tagenable = &ahc->user_tagenable;
5168		} else {
5169			ccb->ccb_h.status = CAM_REQ_INVALID;
5170			xpt_done(ccb);
5171			break;
5172		}
5173
5174		s = splcam();
5175
5176		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
5177			if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
5178				*discenable |= devinfo.target_mask;
5179			else
5180				*discenable &= ~devinfo.target_mask;
5181		}
5182
5183		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
5184			if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
5185				*tagenable |= devinfo.target_mask;
5186			else
5187				*tagenable &= ~devinfo.target_mask;
5188		}
5189
5190		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
5191			ahc_validate_width(ahc, &cts->bus_width);
5192			ahc_set_width(ahc, &devinfo, cts->ccb_h.path,
5193				      cts->bus_width, update_type,
5194				      /*paused*/FALSE);
5195		}
5196
5197		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
5198			if (update_type == AHC_TRANS_USER)
5199				cts->sync_offset = tinfo->user.offset;
5200			else
5201				cts->sync_offset = tinfo->goal.offset;
5202		}
5203
5204		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
5205			if (update_type == AHC_TRANS_USER)
5206				cts->sync_period = tinfo->user.period;
5207			else
5208				cts->sync_period = tinfo->goal.period;
5209		}
5210
5211		if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
5212		 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
5213			struct ahc_syncrate *syncrate;
5214			u_int ppr_options;
5215			u_int maxsync;
5216
5217			if ((ahc->features & AHC_ULTRA2) != 0)
5218				maxsync = AHC_SYNCRATE_DT;
5219			else if ((ahc->features & AHC_ULTRA) != 0)
5220				maxsync = AHC_SYNCRATE_ULTRA;
5221			else
5222				maxsync = AHC_SYNCRATE_FAST;
5223
5224			ppr_options = 0;
5225			if (cts->sync_period <= 9)
5226				ppr_options = MSG_EXT_PPR_DT_REQ;
5227
5228			syncrate = ahc_find_syncrate(ahc, &cts->sync_period,
5229						     &ppr_options,
5230						     maxsync);
5231			ahc_validate_offset(ahc, syncrate, &cts->sync_offset,
5232					    MSG_EXT_WDTR_BUS_8_BIT);
5233
5234			/* We use a period of 0 to represent async */
5235			if (cts->sync_offset == 0) {
5236				cts->sync_period = 0;
5237				ppr_options = 0;
5238			}
5239
5240			if (ppr_options == MSG_EXT_PPR_DT_REQ
5241			 && tinfo->user.transport_version >= 3) {
5242				tinfo->goal.transport_version =
5243				    tinfo->user.transport_version;
5244				tinfo->current.transport_version =
5245				    tinfo->user.transport_version;
5246			}
5247
5248			ahc_set_syncrate(ahc, &devinfo, cts->ccb_h.path,
5249					 syncrate, cts->sync_period,
5250					 cts->sync_offset, ppr_options,
5251					 update_type, /*paused*/FALSE);
5252		}
5253
5254		splx(s);
5255		ccb->ccb_h.status = CAM_REQ_CMP;
5256		xpt_done(ccb);
5257		break;
5258	}
5259	case XPT_GET_TRAN_SETTINGS:
5260	/* Get default/user set transfer settings for the target */
5261	{
5262		struct	ahc_devinfo devinfo;
5263		struct	ccb_trans_settings *cts;
5264		struct	ahc_initiator_tinfo *targ_info;
5265		struct	tmode_tstate *tstate;
5266		struct	ahc_transinfo *tinfo;
5267		int	s;
5268
5269		cts = &ccb->cts;
5270		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
5271				    cts->ccb_h.target_id,
5272				    cts->ccb_h.target_lun,
5273				    SIM_CHANNEL(ahc, sim),
5274				    ROLE_UNKNOWN);
5275		targ_info = ahc_fetch_transinfo(ahc, devinfo.channel,
5276						devinfo.our_scsiid,
5277						devinfo.target, &tstate);
5278
5279		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
5280			tinfo = &targ_info->current;
5281		else
5282			tinfo = &targ_info->user;
5283
5284		s = splcam();
5285
5286		cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
5287		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
5288			if ((ahc->user_discenable & devinfo.target_mask) != 0)
5289				cts->flags |= CCB_TRANS_DISC_ENB;
5290
5291			if ((ahc->user_tagenable & devinfo.target_mask) != 0)
5292				cts->flags |= CCB_TRANS_TAG_ENB;
5293		} else {
5294			if ((tstate->discenable & devinfo.target_mask) != 0)
5295				cts->flags |= CCB_TRANS_DISC_ENB;
5296
5297			if ((tstate->tagenable & devinfo.target_mask) != 0)
5298				cts->flags |= CCB_TRANS_TAG_ENB;
5299		}
5300		cts->sync_period = tinfo->period;
5301		cts->sync_offset = tinfo->offset;
5302		cts->bus_width = tinfo->width;
5303
5304		splx(s);
5305
5306		cts->valid = CCB_TRANS_SYNC_RATE_VALID
5307			   | CCB_TRANS_SYNC_OFFSET_VALID
5308			   | CCB_TRANS_BUS_WIDTH_VALID
5309			   | CCB_TRANS_DISC_VALID
5310			   | CCB_TRANS_TQ_VALID;
5311
5312		ccb->ccb_h.status = CAM_REQ_CMP;
5313		xpt_done(ccb);
5314		break;
5315	}
5316	case XPT_CALC_GEOMETRY:
5317	{
5318		struct	  ccb_calc_geometry *ccg;
5319		uint32_t size_mb;
5320		uint32_t secs_per_cylinder;
5321		int	  extended;
5322
5323		ccg = &ccb->ccg;
5324		size_mb = ccg->volume_size
5325			/ ((1024L * 1024L) / ccg->block_size);
5326		extended = SIM_IS_SCSIBUS_B(ahc, sim)
5327			? ahc->flags & AHC_EXTENDED_TRANS_B
5328			: ahc->flags & AHC_EXTENDED_TRANS_A;
5329
5330		if (size_mb > 1024 && extended) {
5331			ccg->heads = 255;
5332			ccg->secs_per_track = 63;
5333		} else {
5334			ccg->heads = 64;
5335			ccg->secs_per_track = 32;
5336		}
5337		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
5338		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
5339		ccb->ccb_h.status = CAM_REQ_CMP;
5340		xpt_done(ccb);
5341		break;
5342	}
5343	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
5344	{
5345		int  found;
5346
5347		s = splcam();
5348		found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim),
5349					  /*initiate reset*/TRUE);
5350		splx(s);
5351		if (bootverbose) {
5352			xpt_print_path(SIM_PATH(ahc, sim));
5353			printf("SCSI bus reset delivered. "
5354			       "%d SCBs aborted.\n", found);
5355		}
5356		ccb->ccb_h.status = CAM_REQ_CMP;
5357		xpt_done(ccb);
5358		break;
5359	}
5360	case XPT_TERM_IO:		/* Terminate the I/O process */
5361		/* XXX Implement */
5362		ccb->ccb_h.status = CAM_REQ_INVALID;
5363		xpt_done(ccb);
5364		break;
5365	case XPT_PATH_INQ:		/* Path routing inquiry */
5366	{
5367		struct ccb_pathinq *cpi = &ccb->cpi;
5368
5369		cpi->version_num = 1; /* XXX??? */
5370		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
5371		if ((ahc->features & AHC_WIDE) != 0)
5372			cpi->hba_inquiry |= PI_WIDE_16;
5373		if ((ahc->flags & AHC_TARGETMODE) != 0) {
5374			cpi->target_sprt = PIT_PROCESSOR
5375					 | PIT_DISCONNECT
5376					 | PIT_TERM_IO;
5377		} else {
5378			cpi->target_sprt = 0;
5379		}
5380		cpi->hba_misc = (ahc->flags & AHC_INITIATORMODE)
5381			      ? 0 : PIM_NOINITIATOR;
5382		cpi->hba_eng_cnt = 0;
5383		cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7;
5384		cpi->max_lun = 64;
5385		if (SIM_IS_SCSIBUS_B(ahc, sim)) {
5386			cpi->initiator_id = ahc->our_id_b;
5387			if ((ahc->flags & AHC_RESET_BUS_B) == 0)
5388				cpi->hba_misc |= PIM_NOBUSRESET;
5389		} else {
5390			cpi->initiator_id = ahc->our_id;
5391			if ((ahc->flags & AHC_RESET_BUS_A) == 0)
5392				cpi->hba_misc |= PIM_NOBUSRESET;
5393		}
5394		cpi->bus_id = cam_sim_bus(sim);
5395		cpi->base_transfer_speed = 3300;
5396		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5397		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
5398		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
5399		cpi->unit_number = cam_sim_unit(sim);
5400		cpi->ccb_h.status = CAM_REQ_CMP;
5401		xpt_done(ccb);
5402		break;
5403	}
5404	default:
5405		ccb->ccb_h.status = CAM_REQ_INVALID;
5406		xpt_done(ccb);
5407		break;
5408	}
5409}
5410
5411static void
5412ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
5413{
5414	struct ahc_softc *ahc;
5415	struct cam_sim *sim;
5416
5417	sim = (struct cam_sim *)callback_arg;
5418	ahc = (struct ahc_softc *)cam_sim_softc(sim);
5419	switch (code) {
5420	case AC_LOST_DEVICE:
5421	{
5422		struct	ahc_devinfo devinfo;
5423		int	s;
5424
5425		ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim),
5426				    xpt_path_target_id(path),
5427				    xpt_path_lun_id(path),
5428				    SIM_CHANNEL(ahc, sim),
5429				    ROLE_UNKNOWN);
5430
5431		/*
5432		 * Revert to async/narrow transfers
5433		 * for the next device.
5434		 */
5435		s = splcam();
5436		ahc_set_width(ahc, &devinfo, path, MSG_EXT_WDTR_BUS_8_BIT,
5437			      AHC_TRANS_GOAL|AHC_TRANS_CUR,
5438			      /*paused*/FALSE);
5439		ahc_set_syncrate(ahc, &devinfo, path, /*syncrate*/NULL,
5440				 /*period*/0, /*offset*/0, /*ppr_options*/0,
5441				 AHC_TRANS_GOAL|AHC_TRANS_CUR,
5442				 /*paused*/FALSE);
5443		splx(s);
5444		break;
5445	}
5446	default:
5447		break;
5448	}
5449}
5450
5451static void
5452ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
5453		int error)
5454{
5455	struct	 scb *scb;
5456	union	 ccb *ccb;
5457	struct	 ahc_softc *ahc;
5458	int	 s;
5459
5460	scb = (struct scb *)arg;
5461	ccb = scb->ccb;
5462	ahc = (struct ahc_softc *)ccb->ccb_h.ccb_ahc_ptr;
5463
5464	if (error != 0) {
5465		if (error == EFBIG)
5466			ahcsetccbstatus(scb->ccb, CAM_REQ_TOO_BIG);
5467		else
5468			ahcsetccbstatus(scb->ccb, CAM_REQ_CMP_ERR);
5469		if (nsegments != 0)
5470			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
5471		ahcfreescb(ahc, scb);
5472		xpt_done(ccb);
5473		return;
5474	}
5475	if (nsegments != 0) {
5476		struct	  ahc_dma_seg *sg;
5477		bus_dma_segment_t *end_seg;
5478		bus_dmasync_op_t op;
5479
5480		end_seg = dm_segs + nsegments;
5481
5482		/* Copy the segments into our SG list */
5483		sg = scb->sg_list;
5484		while (dm_segs < end_seg) {
5485			sg->addr = dm_segs->ds_addr;
5486/* XXX Add in the 5th byte of the address later. */
5487			sg->len = dm_segs->ds_len;
5488			sg++;
5489			dm_segs++;
5490		}
5491
5492		/*
5493		 * Note where to find the SG entries in bus space.
5494		 * We also set the full residual flag which the
5495		 * sequencer will clear as soon as a data transfer
5496		 * occurs.
5497		 */
5498		scb->hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
5499
5500		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
5501			op = BUS_DMASYNC_PREREAD;
5502		else
5503			op = BUS_DMASYNC_PREWRITE;
5504
5505		bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op);
5506
5507		if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
5508			struct target_data *tdata;
5509
5510			tdata = &scb->hscb->shared_data.tdata;
5511			tdata->target_phases |= DPHASE_PENDING;
5512			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
5513				tdata->data_phase = P_DATAOUT;
5514			else
5515				tdata->data_phase = P_DATAIN;
5516
5517			/*
5518			 * If the transfer is of an odd length and in the
5519			 * "in" direction (scsi->HostBus), then it may
5520			 * trigger a bug in the 'WideODD' feature of
5521			 * non-Ultra2 chips.  Force the total data-length
5522			 * to be even by adding an extra, 1 byte, SG,
5523			 * element.  We do this even if we are not currently
5524			 * negotiated wide as negotiation could occur before
5525			 * this command is executed.
5526			 */
5527			if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0
5528			 && (ccb->csio.dxfer_len & 0x1) != 0
5529			 && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
5530
5531				nsegments++;
5532				if (nsegments > AHC_NSEG) {
5533
5534					ahcsetccbstatus(scb->ccb,
5535							CAM_REQ_TOO_BIG);
5536					bus_dmamap_unload(ahc->buffer_dmat,
5537							  scb->dmamap);
5538					ahcfreescb(ahc, scb);
5539					xpt_done(ccb);
5540					return;
5541				}
5542				sg->addr = ahc->dma_bug_buf;
5543				sg->len = 1;
5544				sg++;
5545			}
5546		}
5547		sg--;
5548		sg->len |= AHC_DMA_LAST_SEG;
5549
5550		/* Copy the first SG into the "current" data pointer area */
5551		scb->hscb->dataptr = scb->sg_list->addr;
5552		scb->hscb->datacnt = scb->sg_list->len;
5553	} else {
5554		scb->hscb->sgptr = SG_LIST_NULL;
5555		scb->hscb->dataptr = 0;
5556		scb->hscb->datacnt = 0;
5557	}
5558
5559	scb->sg_count = nsegments;
5560
5561	s = splcam();
5562
5563	/*
5564	 * Last time we need to check if this SCB needs to
5565	 * be aborted.
5566	 */
5567	if (ahc_ccb_status(ccb) != CAM_REQ_INPROG) {
5568		if (nsegments != 0)
5569			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
5570		ahcfreescb(ahc, scb);
5571		xpt_done(ccb);
5572		splx(s);
5573		return;
5574	}
5575
5576	LIST_INSERT_HEAD(&ahc->pending_ccbs, &ccb->ccb_h,
5577			 sim_links.le);
5578
5579	ccb->ccb_h.status |= CAM_SIM_QUEUED;
5580
5581	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
5582		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
5583			ccb->ccb_h.timeout = 5 * 1000;
5584		ccb->ccb_h.timeout_ch =
5585		    timeout(ahc_timeout, (caddr_t)scb,
5586			    (ccb->ccb_h.timeout * hz) / 1000);
5587	}
5588
5589	/*
5590	 * We only allow one untagged transaction
5591	 * per target in the initiator role unless
5592	 * we are storing a full busy target *lun*
5593	 * table in SCB space.
5594	 */
5595	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
5596	 && (ahc->features & AHC_SCB_BTT) == 0) {
5597		struct scb_tailq *untagged_q;
5598
5599		untagged_q = &(ahc->untagged_queues[ccb->ccb_h.target_id]);
5600		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
5601		if (TAILQ_FIRST(untagged_q) != scb) {
5602			splx(s);
5603			return;
5604		}
5605	}
5606	scb->flags |= SCB_ACTIVE;
5607
5608	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
5609#if 0
5610		printf("Continueing Immediate Command %d:%d\n",
5611		       ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
5612#endif
5613		pause_sequencer(ahc);
5614		if ((ahc->flags & AHC_PAGESCBS) == 0)
5615			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
5616		ahc_outb(ahc, SCB_TAG, scb->hscb->tag);
5617		ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
5618		unpause_sequencer(ahc);
5619	} else {
5620		ahc_queue_scb(ahc, scb);
5621	}
5622
5623	splx(s);
5624}
5625
5626static void
5627ahc_poll(struct cam_sim *sim)
5628{
5629	ahc_intr(cam_sim_softc(sim));
5630}
5631
5632static void
5633ahc_setup_data(struct ahc_softc *ahc, struct ccb_scsiio *csio,
5634	       struct scb *scb)
5635{
5636	struct hardware_scb *hscb;
5637	struct ccb_hdr *ccb_h;
5638
5639	hscb = scb->hscb;
5640	ccb_h = &csio->ccb_h;
5641
5642	if (ccb_h->func_code == XPT_SCSI_IO) {
5643		hscb->cdb_len = csio->cdb_len;
5644		if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
5645
5646			if (hscb->cdb_len > sizeof(hscb->cdb32)
5647			 || (ccb_h->flags & CAM_CDB_PHYS) != 0) {
5648				ahcsetccbstatus(scb->ccb, CAM_REQ_INVALID);
5649				xpt_done(scb->ccb);
5650				ahcfreescb(ahc, scb);
5651				return;
5652			}
5653			if (hscb->cdb_len > 12) {
5654				memcpy(hscb->cdb32,
5655				       csio->cdb_io.cdb_ptr,
5656				       hscb->cdb_len);
5657				if ((ahc->flags & AHC_CMD_CHAN) == 0) {
5658					hscb->shared_data.cdb_ptr =
5659					    scb->cdb32_busaddr;
5660				}
5661			} else {
5662				memcpy(hscb->shared_data.cdb,
5663				       csio->cdb_io.cdb_ptr,
5664				       hscb->cdb_len);
5665			}
5666		} else {
5667			if (hscb->cdb_len > 12) {
5668				memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes,
5669				       hscb->cdb_len);
5670			 	if  ((ahc->flags & AHC_CMD_CHAN) == 0) {
5671					hscb->shared_data.cdb_ptr =
5672					    scb->cdb32_busaddr;
5673				}
5674			} else {
5675				memcpy(hscb->shared_data.cdb,
5676				       csio->cdb_io.cdb_bytes,
5677				       hscb->cdb_len);
5678			}
5679		}
5680	}
5681
5682	/* Only use S/G if there is a transfer */
5683	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
5684		if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
5685			/* We've been given a pointer to a single buffer */
5686			if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
5687				int s;
5688				int error;
5689
5690				s = splsoftvm();
5691				error = bus_dmamap_load(ahc->buffer_dmat,
5692							scb->dmamap,
5693							csio->data_ptr,
5694							csio->dxfer_len,
5695							ahc_execute_scb,
5696							scb, /*flags*/0);
5697				if (error == EINPROGRESS) {
5698					/*
5699					 * So as to maintain ordering,
5700					 * freeze the controller queue
5701					 * until our mapping is
5702					 * returned.
5703					 */
5704					xpt_freeze_simq(ahc->sim,
5705							/*count*/1);
5706					scb->ccb->ccb_h.status |=
5707					    CAM_RELEASE_SIMQ;
5708				}
5709				splx(s);
5710			} else {
5711				struct bus_dma_segment seg;
5712
5713				/* Pointer to physical buffer */
5714				if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE)
5715					panic("ahc_setup_data - Transfer size "
5716					      "larger than can device max");
5717
5718				seg.ds_addr = (intptr_t)csio->data_ptr;
5719				seg.ds_len = csio->dxfer_len;
5720				ahc_execute_scb(scb, &seg, 1, 0);
5721			}
5722		} else {
5723			struct bus_dma_segment *segs;
5724
5725			if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
5726				panic("ahc_setup_data - Physical segment "
5727				      "pointers unsupported");
5728
5729			if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
5730				panic("ahc_setup_data - Virtual segment "
5731				      "addresses unsupported");
5732
5733			/* Just use the segments provided */
5734			segs = (struct bus_dma_segment *)csio->data_ptr;
5735			ahc_execute_scb(scb, segs, csio->sglist_cnt, 0);
5736		}
5737	} else {
5738		ahc_execute_scb(scb, NULL, 0, 0);
5739	}
5740}
5741
5742static void
5743ahc_freeze_devq(struct ahc_softc *ahc, struct cam_path *path)
5744{
5745	int	target;
5746	char	channel;
5747	int	lun;
5748
5749	target = xpt_path_target_id(path);
5750	lun = xpt_path_lun_id(path);
5751	channel = xpt_path_sim(path)->bus_id == 0 ? 'A' : 'B';
5752
5753	ahc_search_qinfifo(ahc, target, channel, lun,
5754			   /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
5755			   CAM_REQUEUE_REQ, SEARCH_COMPLETE);
5756}
5757
5758static void
5759ahcallocscbs(struct ahc_softc *ahc)
5760{
5761	struct scb_data *scb_data;
5762	struct scb *next_scb;
5763	struct sg_map_node *sg_map;
5764	bus_addr_t physaddr;
5765	struct ahc_dma_seg *segs;
5766	int newcount;
5767	int i;
5768
5769	scb_data = ahc->scb_data;
5770	if (scb_data->numscbs >= AHC_SCB_MAX)
5771		/* Can't allocate any more */
5772		return;
5773
5774	next_scb = &scb_data->scbarray[scb_data->numscbs];
5775
5776	sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
5777
5778	if (sg_map == NULL)
5779		return;
5780
5781	/* Allocate S/G space for the next batch of SCBS */
5782	if (bus_dmamem_alloc(scb_data->sg_dmat, (void **)&sg_map->sg_vaddr,
5783			     BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
5784		free(sg_map, M_DEVBUF);
5785		return;
5786	}
5787
5788	SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
5789
5790	bus_dmamap_load(scb_data->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
5791			PAGE_SIZE, ahcdmamapcb, &sg_map->sg_physaddr,
5792			/*flags*/0);
5793
5794	segs = sg_map->sg_vaddr;
5795	physaddr = sg_map->sg_physaddr;
5796
5797	newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg)));
5798	for (i = 0; scb_data->numscbs < AHC_SCB_MAX && i < newcount; i++) {
5799		int error;
5800
5801		next_scb->sg_list = segs;
5802		/*
5803		 * The sequencer always starts with the second entry.
5804		 * The first entry is embedded in the scb.
5805		 */
5806		next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
5807		next_scb->flags = SCB_FREE;
5808		error = bus_dmamap_create(ahc->buffer_dmat, /*flags*/0,
5809					  &next_scb->dmamap);
5810		if (error != 0)
5811			break;
5812		next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
5813		next_scb->hscb->tag = ahc->scb_data->numscbs;
5814		next_scb->cdb32_busaddr =
5815		    ahc_hscb_busaddr(ahc, next_scb->hscb->tag)
5816		  + offsetof(struct hardware_scb, cdb32);
5817		SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
5818				  next_scb, links.sle);
5819		segs += AHC_NSEG;
5820		physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg));
5821		next_scb++;
5822		ahc->scb_data->numscbs++;
5823	}
5824}
5825
5826#ifdef AHC_DUMP_SEQ
5827static void
5828ahc_dumpseq(struct ahc_softc* ahc)
5829{
5830	int i;
5831	int max_prog;
5832
5833	if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI)
5834		max_prog = 448;
5835	else if ((ahc->features & AHC_ULTRA2) != 0)
5836		max_prog = 768;
5837	else
5838		max_prog = 512;
5839
5840	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
5841	ahc_outb(ahc, SEQADDR0, 0);
5842	ahc_outb(ahc, SEQADDR1, 0);
5843	for (i = 0; i < max_prog; i++) {
5844		uint8_t ins_bytes[4];
5845
5846		ahc_insb(ahc, SEQRAM, ins_bytes, 4);
5847		printf("0x%08x\n", ins_bytes[0] << 24
5848				 | ins_bytes[1] << 16
5849				 | ins_bytes[2] << 8
5850				 | ins_bytes[3]);
5851	}
5852}
5853#endif
5854
5855static void
5856ahc_loadseq(struct ahc_softc *ahc)
5857{
5858	struct patch *cur_patch;
5859	u_int i;
5860	int downloaded;
5861	u_int skip_addr;
5862	uint8_t download_consts[2];
5863
5864	/* Setup downloadable constant table */
5865	download_consts[QOUTFIFO_OFFSET] = 0;
5866	if (ahc->targetcmds != NULL)
5867		download_consts[QOUTFIFO_OFFSET] += 32;
5868	download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1;
5869
5870	cur_patch = patches;
5871	downloaded = 0;
5872	skip_addr = 0;
5873	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
5874	ahc_outb(ahc, SEQADDR0, 0);
5875	ahc_outb(ahc, SEQADDR1, 0);
5876
5877	for (i = 0; i < sizeof(seqprog)/4; i++) {
5878		if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) {
5879			/*
5880			 * Don't download this instruction as it
5881			 * is in a patch that was removed.
5882			 */
5883                        continue;
5884		}
5885		ahc_download_instr(ahc, i, download_consts);
5886		downloaded++;
5887	}
5888	ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
5889	restart_sequencer(ahc);
5890
5891	if (bootverbose)
5892		printf(" %d instructions downloaded\n", downloaded);
5893}
5894
5895static int
5896ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch,
5897		u_int start_instr, u_int *skip_addr)
5898{
5899	struct	patch *cur_patch;
5900	struct	patch *last_patch;
5901	u_int	num_patches;
5902
5903	num_patches = sizeof(patches)/sizeof(struct patch);
5904	last_patch = &patches[num_patches];
5905	cur_patch = *start_patch;
5906
5907	while (cur_patch < last_patch && start_instr == cur_patch->begin) {
5908
5909		if (cur_patch->patch_func(ahc) == 0) {
5910
5911			/* Start rejecting code */
5912			*skip_addr = start_instr + cur_patch->skip_instr;
5913			cur_patch += cur_patch->skip_patch;
5914		} else {
5915			/* Accepted this patch.  Advance to the next
5916			 * one and wait for our intruction pointer to
5917			 * hit this point.
5918			 */
5919			cur_patch++;
5920		}
5921	}
5922
5923	*start_patch = cur_patch;
5924	if (start_instr < *skip_addr)
5925		/* Still skipping */
5926		return (0);
5927
5928	return (1);
5929}
5930
5931static void
5932ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
5933{
5934	union	ins_formats instr;
5935	struct	ins_format1 *fmt1_ins;
5936	struct	ins_format3 *fmt3_ins;
5937	u_int	opcode;
5938
5939	/* Structure copy */
5940	instr = *(union ins_formats*)&seqprog[instrptr * 4];
5941
5942#if BYTE_ORDER == BIG_ENDIAN
5943	opcode = instr.format.bytes[0];
5944	instr.format.bytes[0] = instr.format.bytes[3];
5945	instr.format.bytes[3] = opcode;
5946	opcode = instr.format.bytes[1];
5947	instr.format.bytes[1] = instr.format.bytes[2];
5948	instr.format.bytes[2] = opcode;
5949#endif
5950
5951	fmt1_ins = &instr.format1;
5952	fmt3_ins = NULL;
5953
5954	/* Pull the opcode */
5955	opcode = instr.format1.opcode;
5956	switch (opcode) {
5957	case AIC_OP_JMP:
5958	case AIC_OP_JC:
5959	case AIC_OP_JNC:
5960	case AIC_OP_CALL:
5961	case AIC_OP_JNE:
5962	case AIC_OP_JNZ:
5963	case AIC_OP_JE:
5964	case AIC_OP_JZ:
5965	{
5966		struct patch *cur_patch;
5967		int address_offset;
5968		u_int address;
5969		u_int skip_addr;
5970		u_int i;
5971
5972		fmt3_ins = &instr.format3;
5973		address_offset = 0;
5974		address = fmt3_ins->address;
5975		cur_patch = patches;
5976		skip_addr = 0;
5977
5978		for (i = 0; i < address;) {
5979
5980			ahc_check_patch(ahc, &cur_patch, i, &skip_addr);
5981
5982			if (skip_addr > i) {
5983				int end_addr;
5984
5985				end_addr = MIN(address, skip_addr);
5986				address_offset += end_addr - i;
5987				i = skip_addr;
5988			} else {
5989				i++;
5990			}
5991		}
5992		address -= address_offset;
5993		fmt3_ins->address = address;
5994		/* FALLTHROUGH */
5995	}
5996	case AIC_OP_OR:
5997	case AIC_OP_AND:
5998	case AIC_OP_XOR:
5999	case AIC_OP_ADD:
6000	case AIC_OP_ADC:
6001	case AIC_OP_BMOV:
6002		if (fmt1_ins->parity != 0) {
6003			fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
6004		}
6005		fmt1_ins->parity = 0;
6006		/* FALLTHROUGH */
6007	case AIC_OP_ROL:
6008		if ((ahc->features & AHC_ULTRA2) != 0) {
6009			int i, count;
6010
6011			/* Calculate odd parity for the instruction */
6012			for (i = 0, count = 0; i < 31; i++) {
6013				uint32_t mask;
6014
6015				mask = 0x01 << i;
6016				if ((instr.integer & mask) != 0)
6017					count++;
6018			}
6019			if ((count & 0x01) == 0)
6020				instr.format1.parity = 1;
6021		} else {
6022			/* Compress the instruction for older sequencers */
6023			if (fmt3_ins != NULL) {
6024				instr.integer =
6025					fmt3_ins->immediate
6026				      | (fmt3_ins->source << 8)
6027				      | (fmt3_ins->address << 16)
6028				      |	(fmt3_ins->opcode << 25);
6029			} else {
6030				instr.integer =
6031					fmt1_ins->immediate
6032				      | (fmt1_ins->source << 8)
6033				      | (fmt1_ins->destination << 16)
6034				      |	(fmt1_ins->ret << 24)
6035				      |	(fmt1_ins->opcode << 25);
6036			}
6037		}
6038#if BYTE_ORDER == BIG_ENDIAN
6039		opcode = instr.format.bytes[0];
6040		instr.format.bytes[0] = instr.format.bytes[3];
6041		instr.format.bytes[3] = opcode;
6042		opcode = instr.format.bytes[1];
6043		instr.format.bytes[1] = instr.format.bytes[2];
6044		instr.format.bytes[2] = opcode;
6045#endif
6046		ahc_outsb(ahc, SEQRAM, instr.bytes, 4);
6047		break;
6048	default:
6049		panic("Unknown opcode encountered in seq program");
6050		break;
6051	}
6052}
6053
6054static void
6055ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
6056
6057	if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
6058		struct ccb_hdr *ccbh;
6059
6060		scb->flags |= SCB_RECOVERY_SCB;
6061
6062		/*
6063		 * Take all queued, but not sent SCBs out of the equation.
6064		 * Also ensure that no new CCBs are queued to us while we
6065		 * try to fix this problem.
6066		 */
6067		if ((scb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
6068			xpt_freeze_simq(ahc->sim, /*count*/1);
6069			scb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
6070		}
6071
6072		/*
6073		 * Go through all of our pending SCBs and remove
6074		 * any scheduled timeouts for them.  We will reschedule
6075		 * them after we've successfully fixed this problem.
6076		 */
6077		ccbh = ahc->pending_ccbs.lh_first;
6078		while (ccbh != NULL) {
6079			struct scb *pending_scb;
6080
6081			pending_scb = (struct scb *)ccbh->ccb_scb_ptr;
6082			untimeout(ahc_timeout, pending_scb, ccbh->timeout_ch);
6083			ccbh = ccbh->sim_links.le.le_next;
6084		}
6085	}
6086}
6087
6088static void
6089ahc_timeout(void *arg)
6090{
6091	struct	scb *scb;
6092	struct	ahc_softc *ahc;
6093	int	s, found;
6094	u_int	last_phase;
6095	int	target;
6096	int	lun;
6097	int	i;
6098	char	channel;
6099
6100	scb = (struct scb *)arg;
6101	ahc = (struct ahc_softc *)scb->ccb->ccb_h.ccb_ahc_ptr;
6102
6103	s = splcam();
6104
6105	/*
6106	 * Ensure that the card doesn't do anything
6107	 * behind our back.  Also make sure that we
6108	 * didn't "just" miss an interrupt that would
6109	 * affect this timeout.
6110	 */
6111	do {
6112		ahc_intr(ahc);
6113		pause_sequencer(ahc);
6114	} while (ahc_inb(ahc, INTSTAT) & INT_PEND);
6115
6116	xpt_print_path(scb->ccb->ccb_h.path);
6117	if ((scb->flags & SCB_ACTIVE) == 0) {
6118		/* Previous timeout took care of me already */
6119		printf("Timedout SCB %d handled by another timeout\n",
6120		       scb->hscb->tag);
6121		unpause_sequencer(ahc);
6122		splx(s);
6123		return;
6124	}
6125
6126	target = SCB_GET_TARGET(ahc, scb);
6127	channel = SCB_GET_CHANNEL(ahc, scb);
6128	lun = SCB_GET_LUN(scb);
6129
6130	printf("SCB 0x%x - timed out ", scb->hscb->tag);
6131	/*
6132	 * Take a snapshot of the bus state and print out
6133	 * some information so we can track down driver bugs.
6134	 */
6135	last_phase = ahc_inb(ahc, LASTPHASE);
6136
6137	for (i = 0; i < num_phases; i++) {
6138		if (last_phase == phase_table[i].phase)
6139			break;
6140	}
6141	printf("%s", phase_table[i].phasemsg);
6142
6143	printf(", SEQADDR == 0x%x\n",
6144	       ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
6145
6146#if 0
6147	printf("SSTAT1 == 0x%x\n", ahc_inb(ahc, SSTAT1));
6148	printf("SSTAT3 == 0x%x\n", ahc_inb(ahc, SSTAT3));
6149	printf("SCSIPHASE == 0x%x\n", ahc_inb(ahc, SCSIPHASE));
6150	printf("SCSIRATE == 0x%x\n", ahc_inb(ahc, SCSIRATE));
6151	printf("SCSIOFFSET == 0x%x\n", ahc_inb(ahc, SCSIOFFSET));
6152	printf("SEQ_FLAGS == 0x%x\n", ahc_inb(ahc, SEQ_FLAGS));
6153	printf("SCB_DATAPTR == 0x%x\n", ahc_inb(ahc, SCB_DATAPTR)
6154				      | ahc_inb(ahc, SCB_DATAPTR + 1) << 8
6155				      | ahc_inb(ahc, SCB_DATAPTR + 2) << 16
6156				      | ahc_inb(ahc, SCB_DATAPTR + 3) << 24);
6157	printf("SCB_DATACNT == 0x%x\n", ahc_inb(ahc, SCB_DATACNT)
6158				      | ahc_inb(ahc, SCB_DATACNT + 1) << 8
6159				      | ahc_inb(ahc, SCB_DATACNT + 2) << 16);
6160	printf("SCB_SGCOUNT == 0x%x\n", ahc_inb(ahc, SCB_SGCOUNT));
6161	printf("CCSCBCTL == 0x%x\n", ahc_inb(ahc, CCSCBCTL));
6162	printf("CCSCBCNT == 0x%x\n", ahc_inb(ahc, CCSCBCNT));
6163	printf("DFCNTRL == 0x%x\n", ahc_inb(ahc, DFCNTRL));
6164	printf("DFSTATUS == 0x%x\n", ahc_inb(ahc, DFSTATUS));
6165	printf("CCHCNT == 0x%x\n", ahc_inb(ahc, CCHCNT));
6166	if (scb->sg_count > 0) {
6167		for (i = 0; i < scb->sg_count; i++) {
6168			printf("sg[%d] - Addr 0x%x : Length %d\n",
6169			       i,
6170			       scb->sg_list[i].addr,
6171			       scb->sg_list[i].len);
6172		}
6173	}
6174#endif
6175	if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
6176		/*
6177		 * Been down this road before.
6178		 * Do a full bus reset.
6179		 */
6180bus_reset:
6181		ahcsetccbstatus(scb->ccb, CAM_CMD_TIMEOUT);
6182		found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
6183		printf("%s: Issued Channel %c Bus Reset. "
6184		       "%d SCBs aborted\n", ahc_name(ahc), channel, found);
6185	} else {
6186		/*
6187		 * If we are a target, transition to bus free and report
6188		 * the timeout.
6189		 *
6190		 * The target/initiator that is holding up the bus may not
6191		 * be the same as the one that triggered this timeout
6192		 * (different commands have different timeout lengths).
6193		 * If the bus is idle and we are actiing as the initiator
6194		 * for this request, queue a BDR message to the timed out
6195		 * target.  Otherwise, if the timed out transaction is
6196		 * active:
6197		 *   Initiator transaction:
6198		 *	Stuff the message buffer with a BDR message and assert
6199		 *	ATN in the hopes that the target will let go of the bus
6200		 *	and go to the mesgout phase.  If this fails, we'll
6201		 *	get another timeout 2 seconds later which will attempt
6202		 *	a bus reset.
6203		 *
6204		 *   Target transaction:
6205		 *	Transition to BUS FREE and report the error.
6206		 *	It's good to be the target!
6207		 */
6208		u_int active_scb_index;
6209
6210		active_scb_index = ahc_inb(ahc, SCB_TAG);
6211
6212		if (last_phase != P_BUSFREE
6213		  && (active_scb_index < ahc->scb_data->numscbs)) {
6214			struct scb *active_scb;
6215
6216			/*
6217			 * If the active SCB is not from our device,
6218			 * assume that another device is hogging the bus
6219			 * and wait for it's timeout to expire before
6220			 * taking additional action.
6221			 */
6222			active_scb = &ahc->scb_data->scbarray[active_scb_index];
6223			if (active_scb->hscb->scsiid != scb->hscb->scsiid
6224			 || active_scb->hscb->lun != scb->hscb->lun) {
6225				struct	ccb_hdr *ccbh;
6226				u_int	newtimeout;
6227
6228				xpt_print_path(scb->ccb->ccb_h.path);
6229				printf("Other SCB Timeout%s",
6230			 	       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
6231				       ? " again\n" : "\n");
6232				scb->flags |= SCB_OTHERTCL_TIMEOUT;
6233				newtimeout = MAX(active_scb->ccb->ccb_h.timeout,
6234						 scb->ccb->ccb_h.timeout);
6235				ccbh = &scb->ccb->ccb_h;
6236				scb->ccb->ccb_h.timeout_ch =
6237				    timeout(ahc_timeout, scb,
6238					    (newtimeout * hz) / 1000);
6239				splx(s);
6240				return;
6241			}
6242
6243			/* It's us */
6244			if ((scb->hscb->control & TARGET_SCB) != 0) {
6245
6246				/*
6247				 * Send back any queued up transactions
6248				 * and properly record the error condition.
6249				 */
6250				ahc_freeze_devq(ahc, scb->ccb->ccb_h.path);
6251				ahcsetccbstatus(scb->ccb, CAM_CMD_TIMEOUT);
6252				ahc_freeze_ccb(scb->ccb);
6253				ahc_done(ahc, scb);
6254
6255				/* Will clear us from the bus */
6256				restart_sequencer(ahc);
6257				return;
6258			}
6259
6260			ahc_set_recoveryscb(ahc, active_scb);
6261			ahc_outb(ahc, MSG_OUT, MSG_BUS_DEV_RESET);
6262			ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
6263			xpt_print_path(active_scb->ccb->ccb_h.path);
6264			printf("BDR message in message buffer\n");
6265			active_scb->flags |=  SCB_DEVICE_RESET;
6266			active_scb->ccb->ccb_h.timeout_ch =
6267			    timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz);
6268			unpause_sequencer(ahc);
6269		} else {
6270			int	 disconnected;
6271
6272			/* XXX Shouldn't panic.  Just punt instead */
6273			if ((scb->hscb->control & TARGET_SCB) != 0)
6274				panic("Timed-out target SCB but bus idle");
6275
6276			if (last_phase != P_BUSFREE
6277			 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
6278				/* XXX What happened to the SCB? */
6279				/* Hung target selection.  Goto busfree */
6280				printf("%s: Hung target selection\n",
6281				       ahc_name(ahc));
6282				restart_sequencer(ahc);
6283				return;
6284			}
6285
6286			if (ahc_search_qinfifo(ahc, target, channel, lun,
6287					       scb->hscb->tag, ROLE_INITIATOR,
6288					       /*status*/0, SEARCH_COUNT) > 0) {
6289				disconnected = FALSE;
6290			} else {
6291				disconnected = TRUE;
6292			}
6293
6294			if (disconnected) {
6295				u_int active_scb;
6296
6297				ahc_set_recoveryscb(ahc, scb);
6298				/*
6299				 * Simply set the MK_MESSAGE control bit.
6300				 */
6301				scb->hscb->control |= MK_MESSAGE;
6302				scb->flags |= SCB_QUEUED_MSG
6303					   |  SCB_DEVICE_RESET;
6304
6305				/*
6306				 * Mark the cached copy of this SCB in the
6307				 * disconnected list too, so that a reconnect
6308				 * at this point causes a BDR or abort.
6309				 */
6310				active_scb = ahc_inb(ahc, SCBPTR);
6311				if (ahc_search_disc_list(ahc, target,
6312							 channel, lun,
6313							 scb->hscb->tag,
6314							 /*stop_on_first*/TRUE,
6315							 /*remove*/FALSE,
6316							 /*save_state*/FALSE)) {
6317					u_int scb_control;
6318
6319					scb_control = ahc_inb(ahc, SCB_CONTROL);
6320					scb_control |= MK_MESSAGE;
6321					ahc_outb(ahc, SCB_CONTROL, scb_control);
6322				}
6323				ahc_outb(ahc, SCBPTR, active_scb);
6324
6325				/*
6326				 * Actually re-queue this SCB in case we can
6327				 * select the device before it reconnects.
6328				 * Clear out any entries in the QINFIFO first
6329				 * so we are the next SCB for this target
6330				 * to run.
6331				 */
6332				ahc_search_qinfifo(ahc,
6333						   SCB_GET_TARGET(ahc, scb),
6334						   channel, SCB_GET_LUN(scb),
6335						   SCB_LIST_NULL,
6336						   ROLE_INITIATOR,
6337						   CAM_REQUEUE_REQ,
6338						   SEARCH_COMPLETE);
6339				xpt_print_path(scb->ccb->ccb_h.path);
6340				printf("Queuing a BDR SCB\n");
6341				ahc->qinfifo[ahc->qinfifonext++] =
6342				    scb->hscb->tag;
6343				if ((ahc->features & AHC_QUEUE_REGS) != 0) {
6344					ahc_outb(ahc, HNSCB_QOFF,
6345						 ahc->qinfifonext);
6346				} else {
6347					ahc_outb(ahc, KERNEL_QINPOS,
6348						 ahc->qinfifonext);
6349				}
6350				scb->ccb->ccb_h.timeout_ch =
6351				    timeout(ahc_timeout, (caddr_t)scb, 2 * hz);
6352				unpause_sequencer(ahc);
6353			} else {
6354				/* Go "immediatly" to the bus reset */
6355				/* This shouldn't happen */
6356				ahc_set_recoveryscb(ahc, scb);
6357				xpt_print_path(scb->ccb->ccb_h.path);
6358				printf("SCB %d: Immediate reset.  "
6359					"Flags = 0x%x\n", scb->hscb->tag,
6360					scb->flags);
6361				goto bus_reset;
6362			}
6363		}
6364	}
6365	splx(s);
6366}
6367
6368static int
6369ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
6370		   int lun, u_int tag, role_t role, uint32_t status,
6371		   ahc_search_action action)
6372{
6373	struct	 scb *scbp;
6374	uint8_t qinpos;
6375	uint8_t qintail;
6376	uint8_t next, prev;
6377	uint8_t curscbptr;
6378	int	 found;
6379
6380	qinpos = ahc_inb(ahc, QINPOS);
6381	qintail = ahc->qinfifonext;
6382	found = 0;
6383
6384	if (action == SEARCH_COMPLETE) {
6385		/*
6386		 * Don't attempt to run any queued untagged transactions
6387		 * until we are done with the abort process.
6388		 */
6389		ahc_freeze_untagged_queues(ahc);
6390	}
6391
6392	/*
6393	 * Start with an empty queue.  Entries that are not chosen
6394	 * for removal will be re-added to the queue as we go.
6395	 */
6396	ahc->qinfifonext = qinpos;
6397
6398	while (qinpos != qintail) {
6399		scbp = &ahc->scb_data->scbarray[ahc->qinfifo[qinpos]];
6400		if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) {
6401			/*
6402			 * We found an scb that needs to be acted on.
6403			 */
6404			found++;
6405			switch (action) {
6406			case SEARCH_COMPLETE:
6407				if (ahc_ccb_status(scbp->ccb) == CAM_REQ_INPROG)
6408					ahcsetccbstatus(scbp->ccb, status);
6409				ahc_freeze_ccb(scbp->ccb);
6410				ahc_done(ahc, scbp);
6411				break;
6412			case SEARCH_COUNT:
6413				ahc->qinfifo[ahc->qinfifonext++] =
6414				    scbp->hscb->tag;
6415				break;
6416			case SEARCH_REMOVE:
6417				break;
6418			}
6419		} else {
6420			ahc->qinfifo[ahc->qinfifonext++] = scbp->hscb->tag;
6421		}
6422		qinpos++;
6423	}
6424
6425	if ((ahc->features & AHC_QUEUE_REGS) != 0) {
6426		ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
6427	} else {
6428		ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
6429	}
6430
6431	/*
6432	 * Search waiting for selection list.
6433	 */
6434	curscbptr = ahc_inb(ahc, SCBPTR);
6435	next = ahc_inb(ahc, WAITING_SCBH);  /* Start at head of list. */
6436	prev = SCB_LIST_NULL;
6437
6438	while (next != SCB_LIST_NULL) {
6439		uint8_t scb_index;
6440
6441		ahc_outb(ahc, SCBPTR, next);
6442		scb_index = ahc_inb(ahc, SCB_TAG);
6443		if (scb_index >= ahc->scb_data->numscbs) {
6444			panic("Waiting List inconsistency. "
6445			      "SCB index == %d, yet numscbs == %d.",
6446			      scb_index, ahc->scb_data->numscbs);
6447		}
6448		scbp = &ahc->scb_data->scbarray[scb_index];
6449		if (ahc_match_scb(ahc, scbp, target, channel,
6450				  lun, SCB_LIST_NULL, role)) {
6451			/*
6452			 * We found an scb that needs to be acted on.
6453			 */
6454			found++;
6455			switch (action) {
6456			case SEARCH_REMOVE:
6457				next = ahc_rem_wscb(ahc, next, prev);
6458				break;
6459			case SEARCH_COMPLETE:
6460				next = ahc_rem_wscb(ahc, next, prev);
6461				if (ahc_ccb_status(scbp->ccb) == CAM_REQ_INPROG)
6462					ahcsetccbstatus(scbp->ccb, status);
6463				ahc_freeze_ccb(scbp->ccb);
6464				ahc_done(ahc, scbp);
6465				break;
6466			case SEARCH_COUNT:
6467				prev = next;
6468				next = ahc_inb(ahc, SCB_NEXT);
6469				break;
6470			}
6471		} else {
6472
6473			prev = next;
6474			next = ahc_inb(ahc, SCB_NEXT);
6475		}
6476	}
6477	ahc_outb(ahc, SCBPTR, curscbptr);
6478
6479	if (action == SEARCH_COMPLETE)
6480		ahc_release_untagged_queues(ahc);
6481	return (found);
6482}
6483
6484
6485static void
6486ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
6487{
6488	union ccb *abort_ccb;
6489
6490	abort_ccb = ccb->cab.abort_ccb;
6491	switch (abort_ccb->ccb_h.func_code) {
6492	case XPT_ACCEPT_TARGET_IO:
6493	case XPT_IMMED_NOTIFY:
6494	case XPT_CONT_TARGET_IO:
6495	{
6496		struct tmode_tstate *tstate;
6497		struct tmode_lstate *lstate;
6498		struct ccb_hdr_slist *list;
6499		cam_status status;
6500
6501		status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate,
6502					     &lstate, TRUE);
6503
6504		if (status != CAM_REQ_CMP) {
6505			ccb->ccb_h.status = status;
6506			break;
6507		}
6508
6509		if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
6510			list = &lstate->accept_tios;
6511		else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
6512			list = &lstate->immed_notifies;
6513		else
6514			list = NULL;
6515
6516		if (list != NULL) {
6517			struct ccb_hdr *curelm;
6518			int found;
6519
6520			curelm = SLIST_FIRST(list);
6521			found = 0;
6522			if (curelm == &abort_ccb->ccb_h) {
6523				found = 1;
6524				SLIST_REMOVE_HEAD(list, sim_links.sle);
6525			} else {
6526				while(curelm != NULL) {
6527					struct ccb_hdr *nextelm;
6528
6529					nextelm =
6530					    SLIST_NEXT(curelm, sim_links.sle);
6531
6532					if (nextelm == &abort_ccb->ccb_h) {
6533						found = 1;
6534						SLIST_NEXT(curelm,
6535							   sim_links.sle) =
6536						    SLIST_NEXT(nextelm,
6537							       sim_links.sle);
6538						break;
6539					}
6540					curelm = nextelm;
6541				}
6542			}
6543
6544			if (found) {
6545				abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
6546				xpt_done(abort_ccb);
6547				ccb->ccb_h.status = CAM_REQ_CMP;
6548			} else {
6549				printf("Not found\n");
6550				ccb->ccb_h.status = CAM_PATH_INVALID;
6551			}
6552			break;
6553		}
6554		/* FALLTHROUGH */
6555	}
6556	case XPT_SCSI_IO:
6557		/* XXX Fully implement the hard ones */
6558		ccb->ccb_h.status = CAM_UA_ABORT;
6559		break;
6560	default:
6561		ccb->ccb_h.status = CAM_REQ_INVALID;
6562		break;
6563	}
6564	xpt_done(ccb);
6565}
6566
6567/*
6568 * Abort all SCBs that match the given description (target/channel/lun/tag),
6569 * setting their status to the passed in status if the status has not already
6570 * been modified from CAM_REQ_INPROG.  This routine assumes that the sequencer
6571 * is paused before it is called.
6572 */
6573static int
6574ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
6575	       int lun, u_int tag, role_t role, uint32_t status)
6576{
6577	struct	scb *scbp;
6578	u_int	active_scb;
6579	int	i;
6580	int	maxtarget;
6581	int	found;
6582
6583	/*
6584	 * Don't attempt to run any queued untagged transactions
6585	 * until we are done with the abort process.
6586	 */
6587	ahc_freeze_untagged_queues(ahc);
6588
6589	/* restore this when we're done */
6590	active_scb = ahc_inb(ahc, SCBPTR);
6591
6592	found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL,
6593				   role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
6594
6595	/*
6596	 * Clean out the busy target table for any untagged commands.
6597	 */
6598	i = 0;
6599	maxtarget = 16;
6600	if (target != CAM_TARGET_WILDCARD) {
6601		i = target;
6602		maxtarget = target + 1;
6603	}
6604
6605	for (;i < maxtarget; i++) {
6606		u_int scbid;
6607
6608		scbid = ahc_index_busy_tcl(ahc, BUILD_TCL(i << 4, 0),
6609					   /*unbusy*/FALSE);
6610		scbp = &ahc->scb_data->scbarray[scbid];
6611		if (scbid < ahc->scb_data->numscbs
6612		 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) {
6613			u_int minlun;
6614			u_int maxlun;
6615
6616			if (lun == CAM_LUN_WILDCARD) {
6617
6618				/*
6619				 * Unless we are using an SCB based
6620				 * busy targets table, there is only
6621				 * one table entry for all luns of
6622				 * a target.
6623				 */
6624				minlun = 0;
6625				maxlun = 1;
6626				if ((ahc->flags & AHC_SCB_BTT) != 0)
6627					maxlun = AHC_NUM_LUNS;
6628			} else {
6629				minlun = lun;
6630				maxlun = lun + 1;
6631			}
6632			while (minlun < maxlun) {
6633				ahc_index_busy_tcl(ahc, BUILD_TCL(i << 4,
6634						   minlun), /*unbusy*/TRUE);
6635				minlun++;
6636			}
6637		}
6638	}
6639
6640	/*
6641	 * Go through the disconnected list and remove any entries we
6642	 * have queued for completion, 0'ing their control byte too.
6643	 * We save the active SCB and restore it ourselves, so there
6644	 * is no reason for this search to restore it too.
6645	 */
6646	ahc_search_disc_list(ahc, target, channel, lun, tag,
6647			     /*stop_on_first*/FALSE, /*remove*/TRUE,
6648			     /*save_state*/FALSE);
6649
6650	/*
6651	 * Go through the hardware SCB array looking for commands that
6652	 * were active but not on any list.
6653	 */
6654	for(i = 0; i < ahc->scb_data->maxhscbs; i++) {
6655		u_int scbid;
6656
6657		ahc_outb(ahc, SCBPTR, i);
6658		scbid = ahc_inb(ahc, SCB_TAG);
6659		scbp = &ahc->scb_data->scbarray[scbid];
6660		if (scbid < ahc->scb_data->numscbs
6661		 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role))
6662			ahc_add_curscb_to_free_list(ahc);
6663	}
6664
6665	/*
6666	 * Go through the pending CCB list and look for
6667	 * commands for this target that are still active.
6668	 * These are other tagged commands that were
6669	 * disconnected when the reset occured.
6670	 */
6671	{
6672		struct ccb_hdr *ccb_h;
6673
6674		ccb_h = ahc->pending_ccbs.lh_first;
6675		while (ccb_h != NULL) {
6676			scbp = (struct scb *)ccb_h->ccb_scb_ptr;
6677			ccb_h = ccb_h->sim_links.le.le_next;
6678			if (ahc_match_scb(ahc, scbp, target, channel,
6679					  lun, tag, role)) {
6680				if (ahc_ccb_status(scbp->ccb) == CAM_REQ_INPROG)
6681					ahcsetccbstatus(scbp->ccb, status);
6682				ahc_freeze_ccb(scbp->ccb);
6683				ahc_done(ahc, scbp);
6684				found++;
6685			}
6686		}
6687	}
6688	ahc_outb(ahc, SCBPTR, active_scb);
6689	ahc_release_untagged_queues(ahc);
6690	return found;
6691}
6692
6693static int
6694ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel,
6695		     int lun, u_int tag, int stop_on_first, int remove,
6696		     int save_state)
6697{
6698	struct	scb *scbp;
6699	u_int	next;
6700	u_int	prev;
6701	u_int	count;
6702	u_int	active_scb;
6703
6704	count = 0;
6705	next = ahc_inb(ahc, DISCONNECTED_SCBH);
6706	prev = SCB_LIST_NULL;
6707
6708	if (save_state) {
6709		/* restore this when we're done */
6710		active_scb = ahc_inb(ahc, SCBPTR);
6711	} else
6712		/* Silence compiler */
6713		active_scb = SCB_LIST_NULL;
6714
6715	while (next != SCB_LIST_NULL) {
6716		u_int scb_index;
6717
6718		ahc_outb(ahc, SCBPTR, next);
6719		scb_index = ahc_inb(ahc, SCB_TAG);
6720		if (scb_index >= ahc->scb_data->numscbs) {
6721			panic("Disconnected List inconsistency. "
6722			      "SCB index == %d, yet numscbs == %d.",
6723			      scb_index, ahc->scb_data->numscbs);
6724		}
6725
6726		if (next == prev) {
6727			panic("Disconnected List Loop. "
6728			      "cur SCBPTR == %x, prev SCBPTR == %x.",
6729			      next, prev);
6730		}
6731		scbp = &ahc->scb_data->scbarray[scb_index];
6732		if (ahc_match_scb(ahc, scbp, target, channel, lun,
6733				  tag, ROLE_INITIATOR)) {
6734			count++;
6735			if (remove) {
6736				next =
6737				    ahc_rem_scb_from_disc_list(ahc, prev, next);
6738			} else {
6739				prev = next;
6740				next = ahc_inb(ahc, SCB_NEXT);
6741			}
6742			if (stop_on_first)
6743				break;
6744		} else {
6745			prev = next;
6746			next = ahc_inb(ahc, SCB_NEXT);
6747		}
6748	}
6749	if (save_state)
6750		ahc_outb(ahc, SCBPTR, active_scb);
6751	return (count);
6752}
6753
6754static u_int
6755ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr)
6756{
6757	u_int next;
6758
6759	ahc_outb(ahc, SCBPTR, scbptr);
6760	next = ahc_inb(ahc, SCB_NEXT);
6761
6762	ahc_outb(ahc, SCB_CONTROL, 0);
6763
6764	ahc_add_curscb_to_free_list(ahc);
6765
6766	if (prev != SCB_LIST_NULL) {
6767		ahc_outb(ahc, SCBPTR, prev);
6768		ahc_outb(ahc, SCB_NEXT, next);
6769	} else
6770		ahc_outb(ahc, DISCONNECTED_SCBH, next);
6771
6772	return (next);
6773}
6774
6775static void
6776ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
6777{
6778	/* Invalidate the tag so that ahc_find_scb doesn't think it's active */
6779	ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
6780
6781	ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH));
6782	ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR));
6783}
6784
6785/*
6786 * Manipulate the waiting for selection list and return the
6787 * scb that follows the one that we remove.
6788 */
6789static u_int
6790ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
6791{
6792	u_int curscb, next;
6793
6794	/*
6795	 * Select the SCB we want to abort and
6796	 * pull the next pointer out of it.
6797	 */
6798	curscb = ahc_inb(ahc, SCBPTR);
6799	ahc_outb(ahc, SCBPTR, scbpos);
6800	next = ahc_inb(ahc, SCB_NEXT);
6801
6802	/* Clear the necessary fields */
6803	ahc_outb(ahc, SCB_CONTROL, 0);
6804
6805	ahc_add_curscb_to_free_list(ahc);
6806
6807	/* update the waiting list */
6808	if (prev == SCB_LIST_NULL) {
6809		/* First in the list */
6810		ahc_outb(ahc, WAITING_SCBH, next);
6811
6812		/*
6813		 * Ensure we aren't attempting to perform
6814		 * selection for this entry.
6815		 */
6816		ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
6817	} else {
6818		/*
6819		 * Select the scb that pointed to us
6820		 * and update its next pointer.
6821		 */
6822		ahc_outb(ahc, SCBPTR, prev);
6823		ahc_outb(ahc, SCB_NEXT, next);
6824	}
6825
6826	/*
6827	 * Point us back at the original scb position.
6828	 */
6829	ahc_outb(ahc, SCBPTR, curscb);
6830	return next;
6831}
6832
6833static void
6834ahc_clear_intstat(struct ahc_softc *ahc)
6835{
6836	/* Clear any interrupt conditions this may have caused */
6837	ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
6838	ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
6839				|CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG|
6840				CLRREQINIT);
6841	ahc_outb(ahc, CLRINT, CLRSCSIINT);
6842}
6843
6844static void
6845ahc_reset_current_bus(struct ahc_softc *ahc)
6846{
6847	uint8_t scsiseq;
6848
6849	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST);
6850	scsiseq = ahc_inb(ahc, SCSISEQ);
6851	ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO);
6852	DELAY(AHC_BUSRESET_DELAY);
6853	/* Turn off the bus reset */
6854	ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO);
6855
6856	ahc_clear_intstat(ahc);
6857
6858	/* Re-enable reset interrupts */
6859	ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST);
6860}
6861
6862static int
6863ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
6864{
6865	struct	cam_path *path;
6866	u_int	initiator, target, max_scsiid;
6867	u_int	sblkctl;
6868	u_int	our_id;
6869	int	found;
6870	int	restart_needed;
6871	char	cur_channel;
6872
6873	ahc->pending_device = NULL;
6874
6875	pause_sequencer(ahc);
6876
6877	/*
6878	 * Run our command complete fifos to ensure that we perform
6879	 * completion processing on any commands that 'completed'
6880	 * before the reset occurred.
6881	 */
6882	ahc_run_qoutfifo(ahc);
6883	if ((ahc->flags & AHC_TARGETMODE) != 0) {
6884		ahc_run_tqinfifo(ahc, /*paused*/TRUE);
6885	}
6886
6887	/*
6888	 * Reset the bus if we are initiating this reset
6889	 */
6890	sblkctl = ahc_inb(ahc, SBLKCTL);
6891	cur_channel = 'A';
6892	if ((ahc->features & AHC_TWIN) != 0
6893	 && ((sblkctl & SELBUSB) != 0))
6894	    cur_channel = 'B';
6895	if (cur_channel != channel) {
6896		/* Case 1: Command for another bus is active
6897		 * Stealthily reset the other bus without
6898		 * upsetting the current bus.
6899		 */
6900		ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB);
6901		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
6902		ahc_outb(ahc, SCSISEQ,
6903			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
6904		if (initiate_reset)
6905			ahc_reset_current_bus(ahc);
6906		ahc_clear_intstat(ahc);
6907		ahc_outb(ahc, SBLKCTL, sblkctl);
6908		restart_needed = FALSE;
6909	} else {
6910		/* Case 2: A command from this bus is active or we're idle */
6911		ahc_clear_msg_state(ahc);
6912		ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
6913		ahc_outb(ahc, SCSISEQ,
6914			 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
6915		if (initiate_reset)
6916			ahc_reset_current_bus(ahc);
6917		ahc_clear_intstat(ahc);
6918
6919		/*
6920		 * Since we are going to restart the sequencer, avoid
6921		 * a race in the sequencer that could cause corruption
6922		 * of our Q pointers by starting over from index 1.
6923		 */
6924		ahc->qoutfifonext = 0;
6925		if ((ahc->features & AHC_QUEUE_REGS) != 0)
6926			ahc_outb(ahc, SDSCB_QOFF, 0);
6927		else
6928			ahc_outb(ahc, QOUTPOS, 0);
6929		if ((ahc->flags & AHC_TARGETMODE) != 0) {
6930			ahc->tqinfifonext = 1;
6931			ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1);
6932			ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
6933			if ((ahc->features & AHC_HS_MAILBOX) != 0) {
6934				u_int hs_mailbox;
6935
6936				hs_mailbox = ahc_inb(ahc, HS_MAILBOX);
6937				hs_mailbox &= ~HOST_TQINPOS;
6938				ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
6939			}
6940		}
6941		restart_needed = TRUE;
6942	}
6943
6944	/*
6945	 * Clean up all the state information for the
6946	 * pending transactions on this bus.
6947	 */
6948	found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel,
6949			       CAM_LUN_WILDCARD, SCB_LIST_NULL,
6950			       ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
6951	if (channel == 'B') {
6952		path = ahc->path_b;
6953		our_id = ahc->our_id_b;
6954	} else {
6955		path = ahc->path;
6956		our_id = ahc->our_id;
6957	}
6958
6959	max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7;
6960
6961	/*
6962	 * Send an immediate notify ccb to all target more peripheral
6963	 * drivers affected by this action.
6964	 */
6965	for (target = 0; target <= max_scsiid; target++) {
6966		struct tmode_tstate* tstate;
6967		u_int lun;
6968
6969		tstate = ahc->enabled_targets[target];
6970		if (tstate == NULL)
6971			continue;
6972		for (lun = 0; lun <= 7; lun++) {
6973			struct tmode_lstate* lstate;
6974
6975			lstate = tstate->enabled_luns[lun];
6976			if (lstate == NULL)
6977				continue;
6978
6979			ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD,
6980					       EVENT_TYPE_BUS_RESET, /*arg*/0);
6981			ahc_send_lstate_events(ahc, lstate);
6982		}
6983	}
6984
6985	/* Notify the XPT that a bus reset occurred */
6986	xpt_async(AC_BUS_RESET, path, NULL);
6987
6988	/*
6989	 * Revert to async/narrow transfers until we renegotiate.
6990	 */
6991	for (target = 0; target <= max_scsiid; target++) {
6992
6993		if (ahc->enabled_targets[target] == NULL)
6994			continue;
6995		for (initiator = 0; initiator <= max_scsiid; initiator++) {
6996			struct ahc_devinfo devinfo;
6997
6998			ahc_compile_devinfo(&devinfo, target, initiator,
6999					    CAM_LUN_WILDCARD,
7000					    channel, ROLE_UNKNOWN);
7001			ahc_set_width(ahc, &devinfo, path,
7002				      MSG_EXT_WDTR_BUS_8_BIT,
7003				      AHC_TRANS_CUR, /*paused*/TRUE);
7004			ahc_set_syncrate(ahc, &devinfo, path,
7005					 /*syncrate*/NULL, /*period*/0,
7006					 /*offset*/0, /*ppr_options*/0,
7007					 AHC_TRANS_CUR, /*paused*/TRUE);
7008		}
7009	}
7010
7011	if (restart_needed)
7012		restart_sequencer(ahc);
7013	else
7014		unpause_sequencer(ahc);
7015	return found;
7016}
7017
7018static int
7019ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
7020	      char channel, int lun, u_int tag, role_t role)
7021{
7022	int targ = SCB_GET_TARGET(ahc, scb);
7023	char chan = SCB_GET_CHANNEL(ahc, scb);
7024	int slun = SCB_GET_LUN(scb);
7025	int match;
7026
7027	match = ((chan == channel) || (channel == ALL_CHANNELS));
7028	if (match != 0)
7029		match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
7030	if (match != 0)
7031		match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
7032	if (match != 0) {
7033		int group;
7034
7035		group = XPT_FC_GROUP(scb->ccb->ccb_h.func_code);
7036		if (role == ROLE_INITIATOR) {
7037			match = (group == XPT_FC_GROUP_COMMON)
7038			      && ((tag == scb->hscb->tag)
7039			       || (tag == SCB_LIST_NULL));
7040		} else if (role == ROLE_TARGET) {
7041			match = (group == XPT_FC_GROUP_TMODE)
7042			      && ((tag == scb->ccb->csio.tag_id)
7043			       || (tag == SCB_LIST_NULL));
7044		}
7045	}
7046
7047	return match;
7048}
7049
7050static void
7051ahc_construct_sdtr(struct ahc_softc *ahc, u_int period, u_int offset)
7052{
7053	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
7054	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN;
7055	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR;
7056	ahc->msgout_buf[ahc->msgout_index++] = period;
7057	ahc->msgout_buf[ahc->msgout_index++] = offset;
7058	ahc->msgout_len += 5;
7059}
7060
7061static void
7062ahc_construct_wdtr(struct ahc_softc *ahc, u_int bus_width)
7063{
7064	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
7065	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN;
7066	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR;
7067	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
7068	ahc->msgout_len += 4;
7069}
7070
7071static void
7072ahc_construct_ppr(struct ahc_softc *ahc, u_int period, u_int offset,
7073		  u_int bus_width, u_int ppr_options)
7074{
7075	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED;
7076	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN;
7077	ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR;
7078	ahc->msgout_buf[ahc->msgout_index++] = period;
7079	ahc->msgout_buf[ahc->msgout_index++] = 0;
7080	ahc->msgout_buf[ahc->msgout_index++] = offset;
7081	ahc->msgout_buf[ahc->msgout_index++] = bus_width;
7082	ahc->msgout_buf[ahc->msgout_index++] = ppr_options;
7083	ahc->msgout_len += 8;
7084}
7085
7086static void
7087ahc_calc_residual(struct scb *scb)
7088{
7089	struct hardware_scb *hscb;
7090	struct status_pkt *spkt;
7091	uint32_t resid;
7092
7093	/*
7094	 * 5 cases.
7095	 * 1) No residual.
7096	 *    SG_RESID_VALID clear in sgptr.
7097	 * 2) Transferless command
7098	 * 3) Never performed any transfers.
7099	 *    sgptr has SG_FULL_RESID set.
7100	 * 4) No residual but target did not
7101	 *    save data pointers after the
7102	 *    last transfer, so sgptr was
7103	 *    never updated.
7104	 * 5) We have a partial residual.
7105	 *    Use residual_sgptr to determine
7106	 *    where we are.
7107	 */
7108
7109	hscb = scb->hscb;
7110	if ((hscb->sgptr & SG_RESID_VALID) == 0)
7111		/* Case 1 */
7112		return;
7113	hscb->sgptr &= ~SG_RESID_VALID;
7114
7115	if ((hscb->sgptr & SG_LIST_NULL) != 0)
7116		/* Case 2 */
7117		return;
7118
7119	spkt = &hscb->shared_data.status;
7120	if ((hscb->sgptr & SG_FULL_RESID) != 0)
7121		/* Case 3 */
7122		resid = scb->ccb->csio.dxfer_len;
7123	else if ((spkt->residual_sg_ptr & SG_LIST_NULL) != 0)
7124		/* Case 4 */
7125		return;
7126	else if ((spkt->residual_sg_ptr & ~SG_PTR_MASK) != 0)
7127		panic("Bogus resid sgptr value 0x%x\n", spkt->residual_sg_ptr);
7128	else {
7129		struct ahc_dma_seg *sg;
7130
7131		/*
7132		 * Remainder of the SG where the transfer
7133		 * stopped.
7134		 */
7135		resid = spkt->residual_datacnt & AHC_SG_LEN_MASK;
7136		sg = ahc_sg_bus_to_virt(scb,
7137					spkt->residual_sg_ptr & SG_PTR_MASK);
7138
7139		/* The residual sg_ptr always points to the next sg */
7140		sg--;
7141
7142		/*
7143		 * Add up the contents of all residual
7144		 * SG segments that are after the SG where
7145		 * the transfer stopped.
7146		 */
7147		while ((sg->len & AHC_DMA_LAST_SEG) == 0) {
7148			sg++;
7149			resid += sg->len & AHC_SG_LEN_MASK;
7150		}
7151	}
7152	if ((scb->flags & SCB_SENSE) == 0) {
7153
7154		scb->ccb->csio.resid = resid;
7155	} else {
7156
7157		scb->ccb->csio.sense_resid = resid;
7158	}
7159
7160#ifdef AHC_DEBUG
7161	if (ahc_debug & AHC_SHOWMISC) {
7162		xpt_print_path(scb->ccb->ccb_h.path);
7163		printf("Handled Residual of %d bytes\n", resid);
7164	}
7165#endif
7166}
7167
7168static void
7169ahc_update_pending_syncrates(struct ahc_softc *ahc)
7170{
7171	struct	ccb_hdr *ccbh;
7172	int	pending_ccb_count;
7173	int	i;
7174	u_int	saved_scbptr;
7175
7176	/*
7177	 * Traverse the pending SCB list and ensure that all of the
7178	 * SCBs there have the proper settings.
7179	 */
7180	ccbh = LIST_FIRST(&ahc->pending_ccbs);
7181	pending_ccb_count = 0;
7182	while (ccbh != NULL) {
7183		struct ahc_devinfo devinfo;
7184		union  ccb *ccb;
7185		struct scb *pending_scb;
7186		struct hardware_scb *pending_hscb;
7187		struct ahc_initiator_tinfo *tinfo;
7188		struct tmode_tstate *tstate;
7189		u_int  our_id, remote_id;
7190
7191		ccb = (union ccb*)ccbh;
7192		pending_scb = (struct scb *)ccbh->ccb_scb_ptr;
7193		pending_hscb = pending_scb->hscb;
7194		if (ccbh->func_code == XPT_CONT_TARGET_IO) {
7195			our_id = ccb->ccb_h.target_id;
7196			remote_id = ccb->ctio.init_id;
7197		} else {
7198			our_id = SCB_IS_SCSIBUS_B(ahc, pending_scb)
7199			       ? ahc->our_id_b : ahc->our_id;
7200			remote_id = ccb->ccb_h.target_id;
7201		}
7202		ahc_compile_devinfo(&devinfo, our_id, remote_id,
7203				    SCB_GET_LUN(pending_scb),
7204				    SCB_GET_CHANNEL(ahc, pending_scb),
7205				    ROLE_UNKNOWN);
7206		tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
7207					    our_id, remote_id, &tstate);
7208		pending_hscb->control &= ~ULTRAENB;
7209		if ((tstate->ultraenb & devinfo.target_mask) != 0)
7210			pending_hscb->control |= ULTRAENB;
7211		pending_hscb->scsirate = tinfo->scsirate;
7212		pending_hscb->scsioffset = tinfo->current.offset;
7213		pending_ccb_count++;
7214		ccbh = LIST_NEXT(ccbh, sim_links.le);
7215	}
7216
7217	if (pending_ccb_count == 0)
7218		return;
7219
7220	saved_scbptr = ahc_inb(ahc, SCBPTR);
7221	/* Ensure that the hscbs down on the card match the new information */
7222	for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
7223		u_int scb_tag;
7224
7225		ahc_outb(ahc, SCBPTR, i);
7226		scb_tag = ahc_inb(ahc, SCB_TAG);
7227		if (scb_tag != SCB_LIST_NULL) {
7228			struct	ahc_devinfo devinfo;
7229			union  ccb *ccb;
7230			struct	scb *pending_scb;
7231			struct	hardware_scb *pending_hscb;
7232			struct	ahc_initiator_tinfo *tinfo;
7233			struct	tmode_tstate *tstate;
7234			u_int	our_id, remote_id;
7235			u_int	control;
7236
7237			pending_scb = &ahc->scb_data->scbarray[scb_tag];
7238			if (pending_scb->flags == SCB_FREE)
7239				continue;
7240			pending_hscb = pending_scb->hscb;
7241			ccb = pending_scb->ccb;
7242			if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
7243				our_id = ccb->ccb_h.target_id;
7244				remote_id = ccb->ctio.init_id;
7245			} else {
7246				our_id = SCB_IS_SCSIBUS_B(ahc, pending_scb)
7247				       ? ahc->our_id_b : ahc->our_id;
7248				remote_id = ccb->ccb_h.target_id;
7249			}
7250			ahc_compile_devinfo(&devinfo, our_id, remote_id,
7251					    SCB_GET_LUN(pending_scb),
7252					    SCB_GET_CHANNEL(ahc, pending_scb),
7253					    ROLE_UNKNOWN);
7254			tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
7255						    our_id, remote_id, &tstate);
7256			control = ahc_inb(ahc, SCB_CONTROL);
7257			control &= ~ULTRAENB;
7258			if ((tstate->ultraenb & devinfo.target_mask) != 0)
7259				control |= ULTRAENB;
7260			ahc_outb(ahc, SCB_CONTROL, control);
7261			ahc_outb(ahc, SCB_SCSIRATE, tinfo->scsirate);
7262			ahc_outb(ahc, SCB_SCSIOFFSET, tinfo->current.offset);
7263		}
7264	}
7265	ahc_outb(ahc, SCBPTR, saved_scbptr);
7266}
7267
7268#if UNUSED
7269static void
7270ahc_dump_targcmd(struct target_cmd *cmd)
7271{
7272	uint8_t *byte;
7273	uint8_t *last_byte;
7274	int i;
7275
7276	byte = &cmd->initiator_channel;
7277	/* Debugging info for received commands */
7278	last_byte = &cmd[1].initiator_channel;
7279
7280	i = 0;
7281	while (byte < last_byte) {
7282		if (i == 0)
7283			printf("\t");
7284		printf("%#x", *byte++);
7285		i++;
7286		if (i == 8) {
7287			printf("\n");
7288			i = 0;
7289		} else {
7290			printf(", ");
7291		}
7292	}
7293}
7294#endif
7295
7296static void
7297ahc_shutdown(void *arg, int howto)
7298{
7299	struct	ahc_softc *ahc;
7300	int	i;
7301	u_int	sxfrctl1_a, sxfrctl1_b;
7302
7303	ahc = (struct ahc_softc *)arg;
7304
7305	pause_sequencer(ahc);
7306
7307	/*
7308	 * Preserve the value of the SXFRCTL1 register for all channels.
7309	 * It contains settings that affect termination and we don't want
7310	 * to disturb the integrity of the bus during shutdown in case
7311	 * we are in a multi-initiator setup.
7312	 */
7313	sxfrctl1_b = 0;
7314	if ((ahc->features & AHC_TWIN) != 0) {
7315		u_int sblkctl;
7316
7317		sblkctl = ahc_inb(ahc, SBLKCTL);
7318		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
7319		sxfrctl1_b = ahc_inb(ahc, SXFRCTL1);
7320		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
7321	}
7322
7323	sxfrctl1_a = ahc_inb(ahc, SXFRCTL1);
7324
7325	/* This will reset most registers to 0, but not all */
7326	ahc_reset(ahc);
7327
7328	if ((ahc->features & AHC_TWIN) != 0) {
7329		u_int sblkctl;
7330
7331		sblkctl = ahc_inb(ahc, SBLKCTL);
7332		ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
7333		ahc_outb(ahc, SXFRCTL1, sxfrctl1_b);
7334		ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
7335	}
7336	ahc_outb(ahc, SXFRCTL1, sxfrctl1_a);
7337
7338	ahc_outb(ahc, SCSISEQ, 0);
7339	ahc_outb(ahc, SXFRCTL0, 0);
7340	ahc_outb(ahc, DSPCISTATUS, 0);
7341
7342	for (i = TARG_SCSIRATE; i < HA_274_BIOSCTRL; i++)
7343		ahc_outb(ahc, i, 0);
7344}
7345
7346/*
7347 * Add a target mode event to this lun's queue
7348 */
7349static void
7350ahc_queue_lstate_event(struct ahc_softc *ahc, struct tmode_lstate *lstate,
7351		       u_int initiator_id, u_int event_type, u_int event_arg)
7352{
7353	struct ahc_tmode_event *event;
7354	int pending;
7355
7356	xpt_freeze_devq(lstate->path, /*count*/1);
7357	if (lstate->event_w_idx >= lstate->event_r_idx)
7358		pending = lstate->event_w_idx - lstate->event_r_idx;
7359	else
7360		pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1
7361			- (lstate->event_r_idx - lstate->event_w_idx);
7362
7363	if (event_type == EVENT_TYPE_BUS_RESET
7364	 || event_type == MSG_BUS_DEV_RESET) {
7365		/*
7366		 * Any earlier events are irrelevant, so reset our buffer.
7367		 * This has the effect of allowing us to deal with reset
7368		 * floods (an external device holding down the reset line)
7369		 * without losing the event that is really interesting.
7370		 */
7371		lstate->event_r_idx = 0;
7372		lstate->event_w_idx = 0;
7373		xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
7374	}
7375
7376	if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) {
7377		xpt_print_path(lstate->path);
7378		printf("immediate event %x:%x lost\n",
7379		       lstate->event_buffer[lstate->event_r_idx].event_type,
7380		       lstate->event_buffer[lstate->event_r_idx].event_arg);
7381		lstate->event_r_idx++;
7382		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
7383			lstate->event_r_idx = 0;
7384		xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
7385	}
7386
7387	event = &lstate->event_buffer[lstate->event_w_idx];
7388	event->initiator_id = initiator_id;
7389	event->event_type = event_type;
7390	event->event_arg = event_arg;
7391	lstate->event_w_idx++;
7392	if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
7393		lstate->event_w_idx = 0;
7394}
7395
7396/*
7397 * Send any target mode events queued up waiting
7398 * for immediate notify resources.
7399 */
7400static void
7401ahc_send_lstate_events(struct ahc_softc *ahc, struct tmode_lstate *lstate)
7402{
7403	struct ccb_hdr *ccbh;
7404	struct ccb_immed_notify *inot;
7405
7406	while (lstate->event_r_idx != lstate->event_w_idx
7407	    && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
7408		struct ahc_tmode_event *event;
7409
7410		event = &lstate->event_buffer[lstate->event_r_idx];
7411		SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
7412		inot = (struct ccb_immed_notify *)ccbh;
7413		switch (event->event_type) {
7414		case EVENT_TYPE_BUS_RESET:
7415			ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
7416			break;
7417		default:
7418			ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
7419			inot->message_args[0] = event->event_type;
7420			inot->message_args[1] = event->event_arg;
7421			break;
7422		}
7423		inot->initiator_id = event->initiator_id;
7424		inot->sense_len = 0;
7425		xpt_done((union ccb *)inot);
7426		lstate->event_r_idx++;
7427		if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
7428			lstate->event_r_idx = 0;
7429	}
7430}
7431