1/*-
2 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
3 * Copyright (c) 2000-2001 Adaptec Corporation
4 * All rights reserved.
5 *
6 * TERMS AND CONDITIONS OF USE
7 *
8 * Redistribution and use in source form, with or without modification, are
9 * permitted provided that redistributions of source code must retain the
10 * above copyright notice, this list of conditions and the following disclaimer.
11 *
12 * This software is provided `as is' by Adaptec and any express or implied
13 * warranties, including, but not limited to, the implied warranties of
14 * merchantability and fitness for a particular purpose, are disclaimed. In no
15 * event shall Adaptec be liable for any direct, indirect, incidental, special,
16 * exemplary or consequential damages (including, but not limited to,
17 * procurement of substitute goods or services; loss of use, data, or profits;
18 * or business interruptions) however caused and on any theory of liability,
19 * whether in contract, strict liability, or tort (including negligence or
20 * otherwise) arising in any way out of the use of this driver software, even
21 * if advised of the possibility of such damage.
22 *
23 * SCSI I2O host adapter driver
24 *
25 *	V1.10 2004/05/05 scottl@freebsd.org
26 *		- Massive cleanup of the driver to remove dead code and
27 *		  non-conformant style.
28 *		- Removed most i386-specific code to make it more portable.
29 *		- Converted to the bus_space API.
30 *	V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
31 *		- The 2000S and 2005S do not initialize on some machines,
32 *		  increased timeout to 255ms from 50ms for the StatusGet
33 *		  command.
34 *	V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
35 *		- I knew this one was too good to be true. The error return
36 *		  on ioctl commands needs to be compared to CAM_REQ_CMP, not
37 *		  to the bit masked status.
38 *	V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
39 *		- The 2005S that was supported is affectionately called the
40 *		  Conjoined BAR Firmware. In order to support RAID-5 in a
41 *		  16MB low-cost configuration, Firmware was forced to go
42 *		  to a Split BAR Firmware. This requires a separate IOP and
43 *		  Messaging base address.
44 *	V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
45 *		- Handle support for 2005S Zero Channel RAID solution.
46 *		- System locked up if the Adapter locked up. Do not try
47 *		  to send other commands if the resetIOP command fails. The
48 *		  fail outstanding command discovery loop was flawed as the
49 *		  removal of the command from the list prevented discovering
50 *		  all the commands.
51 *		- Comment changes to clarify driver.
52 *		- SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
53 *		- We do not use the AC_FOUND_DEV event because of I2O.
54 *		  Removed asr_async.
55 *	V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
56 *			 lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
57 *		- Removed support for PM1554, PM2554 and PM2654 in Mode-0
58 *		  mode as this is confused with competitor adapters in run
59 *		  mode.
60 *		- critical locking needed in ASR_ccbAdd and ASR_ccbRemove
61 *		  to prevent operating system panic.
62 *		- moved default major number to 154 from 97.
63 *	V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
64 *		- The controller is not actually an ASR (Adaptec SCSI RAID)
65 *		  series that is visible, it's more of an internal code name.
66 *		  remove any visible references within reason for now.
67 *		- bus_ptr->LUN was not correctly zeroed when initially
68 *		  allocated causing a possible panic of the operating system
69 *		  during boot.
70 *	V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
71 *		- Code always fails for ASR_getTid affecting performance.
72 *		- initiated a set of changes that resulted from a formal
73 *		  code inspection by Mark_Salyzyn@adaptec.com,
74 *		  George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
75 *		  Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
76 *		  Their findings were focussed on the LCT & TID handler, and
77 *		  all resulting changes were to improve code readability,
78 *		  consistency or have a positive effect on performance.
79 *	V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
80 *		- Passthrough returned an incorrect error.
81 *		- Passthrough did not migrate the intrinsic scsi layer wakeup
82 *		  on command completion.
83 *		- generate control device nodes using make_dev and delete_dev.
84 *		- Performance affected by TID caching reallocing.
85 *		- Made suggested changes by Justin_Gibbs@adaptec.com
86 *			- use splcam instead of splbio.
87 *			- use cam_imask instead of bio_imask.
88 *			- use u_int8_t instead of u_char.
89 *			- use u_int16_t instead of u_short.
90 *			- use u_int32_t instead of u_long where appropriate.
91 *			- use 64 bit context handler instead of 32 bit.
92 *			- create_ccb should only allocate the worst case
93 *			  requirements for the driver since CAM may evolve
94 *			  making union ccb much larger than needed here.
95 *			  renamed create_ccb to asr_alloc_ccb.
96 *			- go nutz justifying all debug prints as macros
97 *			  defined at the top and remove unsightly ifdefs.
98 *			- INLINE STATIC viewed as confusing. Historically
99 *			  utilized to affect code performance and debug
100 *			  issues in OS, Compiler or OEM specific situations.
101 *	V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
102 *		- Ported from FreeBSD 2.2.X DPT I2O driver.
103 *			changed struct scsi_xfer to union ccb/struct ccb_hdr
104 *			changed variable name xs to ccb
105 *			changed struct scsi_link to struct cam_path
106 *			changed struct scsibus_data to struct cam_sim
107 *			stopped using fordriver for holding on to the TID
108 *			use proprietary packet creation instead of scsi_inquire
109 *			CAM layer sends synchronize commands.
110 */
111
112#include <sys/cdefs.h>
113#include <sys/param.h>	/* TRUE=1 and FALSE=0 defined here */
114#include <sys/kernel.h>
115#include <sys/module.h>
116#include <sys/systm.h>
117#include <sys/malloc.h>
118#include <sys/conf.h>
119#include <sys/ioccom.h>
120#include <sys/priv.h>
121#include <sys/proc.h>
122#include <sys/bus.h>
123#include <machine/resource.h>
124#include <machine/bus.h>
125#include <sys/rman.h>
126#include <sys/stat.h>
127#include <sys/bus_dma.h>
128
129#include <cam/cam.h>
130#include <cam/cam_ccb.h>
131#include <cam/cam_sim.h>
132#include <cam/cam_xpt_sim.h>
133
134#include <cam/scsi/scsi_all.h>
135#include <cam/scsi/scsi_message.h>
136
137#include <vm/vm.h>
138#include <vm/pmap.h>
139
140#if defined(__i386__)
141#include "opt_asr.h"
142#include <i386/include/cputypes.h>
143
144#if defined(ASR_COMPAT)
145#define ASR_IOCTL_COMPAT
146#endif /* ASR_COMPAT */
147#endif
148#include <machine/vmparam.h>
149
150#include <dev/pci/pcivar.h>
151#include <dev/pci/pcireg.h>
152
153#define	osdSwap4(x) ((u_long)ntohl((u_long)(x)))
154#define	KVTOPHYS(x) vtophys(x)
155#include	<dev/asr/dptalign.h>
156#include	<dev/asr/i2oexec.h>
157#include	<dev/asr/i2obscsi.h>
158#include	<dev/asr/i2odpt.h>
159#include	<dev/asr/i2oadptr.h>
160
161#include	<dev/asr/sys_info.h>
162
163__FBSDID("$FreeBSD$");
164
165#define	ASR_VERSION	1
166#define	ASR_REVISION	'1'
167#define	ASR_SUBREVISION '0'
168#define	ASR_MONTH	5
169#define	ASR_DAY		5
170#define	ASR_YEAR	(2004 - 1980)
171
172/*
173 *	Debug macros to reduce the unsightly ifdefs
174 */
175#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
176static __inline void
177debug_asr_message(PI2O_MESSAGE_FRAME message)
178{
179	u_int32_t * pointer = (u_int32_t *)message;
180	u_int32_t   length = I2O_MESSAGE_FRAME_getMessageSize(message);
181	u_int32_t   counter = 0;
182
183	while (length--) {
184		printf("%08lx%c", (u_long)*(pointer++),
185		  (((++counter & 7) == 0) || (length == 0)) ? '\n' : ' ');
186	}
187}
188#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
189
190#ifdef DEBUG_ASR
191  /* Breaks on none STDC based compilers :-( */
192#define debug_asr_printf(fmt,args...)	printf(fmt, ##args)
193#define debug_asr_dump_message(message)	debug_asr_message(message)
194#define debug_asr_print_path(ccb)	xpt_print_path(ccb->ccb_h.path);
195#else /* DEBUG_ASR */
196#define debug_asr_printf(fmt,args...)
197#define debug_asr_dump_message(message)
198#define debug_asr_print_path(ccb)
199#endif /* DEBUG_ASR */
200
201/*
202 *	If DEBUG_ASR_CMD is defined:
203 *		0 - Display incoming SCSI commands
204 *		1 - add in a quick character before queueing.
205 *		2 - add in outgoing message frames.
206 */
207#if (defined(DEBUG_ASR_CMD))
208#define debug_asr_cmd_printf(fmt,args...)     printf(fmt,##args)
209static __inline void
210debug_asr_dump_ccb(union ccb *ccb)
211{
212	u_int8_t	*cp = (unsigned char *)&(ccb->csio.cdb_io);
213	int		len = ccb->csio.cdb_len;
214
215	while (len) {
216		debug_asr_cmd_printf (" %02x", *(cp++));
217		--len;
218	}
219}
220#if (DEBUG_ASR_CMD > 0)
221#define debug_asr_cmd1_printf		       debug_asr_cmd_printf
222#else
223#define debug_asr_cmd1_printf(fmt,args...)
224#endif
225#if (DEBUG_ASR_CMD > 1)
226#define debug_asr_cmd2_printf			debug_asr_cmd_printf
227#define debug_asr_cmd2_dump_message(message)	debug_asr_message(message)
228#else
229#define debug_asr_cmd2_printf(fmt,args...)
230#define debug_asr_cmd2_dump_message(message)
231#endif
232#else /* DEBUG_ASR_CMD */
233#define debug_asr_cmd_printf(fmt,args...)
234#define debug_asr_dump_ccb(ccb)
235#define debug_asr_cmd1_printf(fmt,args...)
236#define debug_asr_cmd2_printf(fmt,args...)
237#define debug_asr_cmd2_dump_message(message)
238#endif /* DEBUG_ASR_CMD */
239
240#if (defined(DEBUG_ASR_USR_CMD))
241#define debug_usr_cmd_printf(fmt,args...)   printf(fmt,##args)
242#define debug_usr_cmd_dump_message(message) debug_usr_message(message)
243#else /* DEBUG_ASR_USR_CMD */
244#define debug_usr_cmd_printf(fmt,args...)
245#define debug_usr_cmd_dump_message(message)
246#endif /* DEBUG_ASR_USR_CMD */
247
248#ifdef ASR_IOCTL_COMPAT
249#define	dsDescription_size 46	/* Snug as a bug in a rug */
250#endif /* ASR_IOCTL_COMPAT */
251
252#include "dev/asr/dptsig.h"
253
254static dpt_sig_S ASR_sig = {
255	{ 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
256	PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
257	OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, ADF_ALL_SC5,
258	0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
259	ASR_MONTH, ASR_DAY, ASR_YEAR,
260/*	 01234567890123456789012345678901234567890123456789	< 50 chars */
261	"Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
262	/*		 ^^^^^ asr_attach alters these to match OS */
263};
264
265/* Configuration Definitions */
266
267#define	SG_SIZE		 58	/* Scatter Gather list Size		 */
268#define	MAX_TARGET_ID	 126	/* Maximum Target ID supported		 */
269#define	MAX_LUN		 255	/* Maximum LUN Supported		 */
270#define	MAX_CHANNEL	 7	/* Maximum Channel # Supported by driver */
271#define	MAX_INBOUND	 2000	/* Max CCBs, Also Max Queue Size	 */
272#define	MAX_OUTBOUND	 256	/* Maximum outbound frames/adapter	 */
273#define	MAX_INBOUND_SIZE 512	/* Maximum inbound frame size		 */
274#define	MAX_MAP		 4194304L /* Maximum mapping size of IOP	 */
275				/* Also serves as the minimum map for	 */
276				/* the 2005S zero channel RAID product	 */
277
278/* I2O register set */
279#define	I2O_REG_STATUS		0x30
280#define	I2O_REG_MASK		0x34
281#define	I2O_REG_TOFIFO		0x40
282#define	I2O_REG_FROMFIFO	0x44
283
284#define	Mask_InterruptsDisabled	0x08
285
286/*
287 * A MIX of performance and space considerations for TID lookups
288 */
289typedef u_int16_t tid_t;
290
291typedef struct {
292	u_int32_t size;		/* up to MAX_LUN    */
293	tid_t	  TID[1];
294} lun2tid_t;
295
296typedef struct {
297	u_int32_t   size;	/* up to MAX_TARGET */
298	lun2tid_t * LUN[1];
299} target2lun_t;
300
301/*
302 *	To ensure that we only allocate and use the worst case ccb here, lets
303 *	make our own local ccb union. If asr_alloc_ccb is utilized for another
304 *	ccb type, ensure that you add the additional structures into our local
305 *	ccb union. To ensure strict type checking, we will utilize the local
306 *	ccb definition wherever possible.
307 */
308union asr_ccb {
309	struct ccb_hdr	    ccb_h;  /* For convenience */
310	struct ccb_scsiio   csio;
311	struct ccb_setasync csa;
312};
313
314struct Asr_status_mem {
315	I2O_EXEC_STATUS_GET_REPLY	status;
316	U32				rstatus;
317};
318
319/**************************************************************************
320** ASR Host Adapter structure - One Structure For Each Host Adapter That **
321**  Is Configured Into The System.  The Structure Supplies Configuration **
322**  Information, Status Info, Queue Info And An Active CCB List Pointer. **
323***************************************************************************/
324
325typedef struct Asr_softc {
326	device_t		ha_dev;
327	u_int16_t		ha_irq;
328	u_long			ha_Base;       /* base port for each board */
329	bus_size_t		ha_blinkLED;
330	bus_space_handle_t	ha_i2o_bhandle;
331	bus_space_tag_t		ha_i2o_btag;
332	bus_space_handle_t	ha_frame_bhandle;
333	bus_space_tag_t		ha_frame_btag;
334	I2O_IOP_ENTRY		ha_SystemTable;
335	LIST_HEAD(,ccb_hdr)	ha_ccb;	       /* ccbs in use		   */
336
337	bus_dma_tag_t		ha_parent_dmat;
338	bus_dma_tag_t		ha_statusmem_dmat;
339	bus_dmamap_t		ha_statusmem_dmamap;
340	struct Asr_status_mem * ha_statusmem;
341	u_int32_t		ha_rstatus_phys;
342	u_int32_t		ha_status_phys;
343	struct cam_path	      * ha_path[MAX_CHANNEL+1];
344	struct cam_sim	      * ha_sim[MAX_CHANNEL+1];
345	struct resource	      * ha_mem_res;
346	struct resource	      * ha_mes_res;
347	struct resource	      * ha_irq_res;
348	void		      * ha_intr;
349	PI2O_LCT		ha_LCT;	       /* Complete list of devices */
350#define le_type	  IdentityTag[0]
351#define I2O_BSA	    0x20
352#define I2O_FCA	    0x40
353#define I2O_SCSI    0x00
354#define I2O_PORT    0x80
355#define I2O_UNKNOWN 0x7F
356#define le_bus	  IdentityTag[1]
357#define le_target IdentityTag[2]
358#define le_lun	  IdentityTag[3]
359	target2lun_t	      * ha_targets[MAX_CHANNEL+1];
360	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
361	u_long			ha_Msgs_Phys;
362
363	u_int8_t		ha_in_reset;
364#define HA_OPERATIONAL	    0
365#define HA_IN_RESET	    1
366#define HA_OFF_LINE	    2
367#define HA_OFF_LINE_RECOVERY 3
368	/* Configuration information */
369	/* The target id maximums we take */
370	u_int8_t		ha_MaxBus;     /* Maximum bus */
371	u_int8_t		ha_MaxId;      /* Maximum target ID */
372	u_int8_t		ha_MaxLun;     /* Maximum target LUN */
373	u_int8_t		ha_SgSize;     /* Max SG elements */
374	u_int8_t		ha_pciBusNum;
375	u_int8_t		ha_pciDeviceNum;
376	u_int8_t		ha_adapter_target[MAX_CHANNEL+1];
377	u_int16_t		ha_QueueSize;  /* Max outstanding commands */
378	u_int16_t		ha_Msgs_Count;
379
380	/* Links into other parents and HBAs */
381	STAILQ_ENTRY(Asr_softc) ha_next;       /* HBA list */
382	struct cdev *ha_devt;
383} Asr_softc_t;
384
385static STAILQ_HEAD(, Asr_softc) Asr_softc_list =
386	STAILQ_HEAD_INITIALIZER(Asr_softc_list);
387
388static __inline void
389set_ccb_timeout_ch(union asr_ccb *ccb, struct callout_handle ch)
390{
391	ccb->ccb_h.sim_priv.entries[0].ptr = ch.callout;
392}
393
394static __inline struct callout_handle
395get_ccb_timeout_ch(union asr_ccb *ccb)
396{
397	struct callout_handle ch;
398
399	ch.callout = ccb->ccb_h.sim_priv.entries[0].ptr;
400	return ch;
401}
402
403/*
404 *	Prototypes of the routines we have in this object.
405 */
406
407/* I2O HDM interface */
408static int	asr_probe(device_t dev);
409static int	asr_attach(device_t dev);
410
411static int	asr_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag,
412			  struct thread *td);
413static int	asr_open(struct cdev *dev, int32_t flags, int32_t ifmt,
414			 struct thread *td);
415static int	asr_close(struct cdev *dev, int flags, int ifmt, struct thread *td);
416static int	asr_intr(Asr_softc_t *sc);
417static void	asr_timeout(void *arg);
418static int	ASR_init(Asr_softc_t *sc);
419static int	ASR_acquireLct(Asr_softc_t *sc);
420static int	ASR_acquireHrt(Asr_softc_t *sc);
421static void	asr_action(struct cam_sim *sim, union ccb *ccb);
422static void	asr_poll(struct cam_sim *sim);
423static int	ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message);
424
425/*
426 *	Here is the auto-probe structure used to nest our tests appropriately
427 *	during the startup phase of the operating system.
428 */
429static device_method_t asr_methods[] = {
430	DEVMETHOD(device_probe,	 asr_probe),
431	DEVMETHOD(device_attach, asr_attach),
432	{ 0, 0 }
433};
434
435static driver_t asr_driver = {
436	"asr",
437	asr_methods,
438	sizeof(Asr_softc_t)
439};
440
441static devclass_t asr_devclass;
442DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
443MODULE_DEPEND(asr, pci, 1, 1, 1);
444MODULE_DEPEND(asr, cam, 1, 1, 1);
445
446/*
447 * devsw for asr hba driver
448 *
449 * only ioctl is used. the sd driver provides all other access.
450 */
451static struct cdevsw asr_cdevsw = {
452	.d_version =	D_VERSION,
453	.d_flags =	D_NEEDGIANT,
454	.d_open =	asr_open,
455	.d_close =	asr_close,
456	.d_ioctl =	asr_ioctl,
457	.d_name =	"asr",
458};
459
460/* I2O support routines */
461
462static __inline u_int32_t
463asr_get_FromFIFO(Asr_softc_t *sc)
464{
465	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
466				 I2O_REG_FROMFIFO));
467}
468
469static __inline u_int32_t
470asr_get_ToFIFO(Asr_softc_t *sc)
471{
472	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
473				 I2O_REG_TOFIFO));
474}
475
476static __inline u_int32_t
477asr_get_intr(Asr_softc_t *sc)
478{
479	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
480				 I2O_REG_MASK));
481}
482
483static __inline u_int32_t
484asr_get_status(Asr_softc_t *sc)
485{
486	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
487				 I2O_REG_STATUS));
488}
489
490static __inline void
491asr_set_FromFIFO(Asr_softc_t *sc, u_int32_t val)
492{
493	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_FROMFIFO,
494			  val);
495}
496
497static __inline void
498asr_set_ToFIFO(Asr_softc_t *sc, u_int32_t val)
499{
500	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_TOFIFO,
501			  val);
502}
503
504static __inline void
505asr_set_intr(Asr_softc_t *sc, u_int32_t val)
506{
507	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_MASK,
508			  val);
509}
510
511static __inline void
512asr_set_frame(Asr_softc_t *sc, void *frame, u_int32_t offset, int len)
513{
514	bus_space_write_region_4(sc->ha_frame_btag, sc->ha_frame_bhandle,
515				 offset, (u_int32_t *)frame, len);
516}
517
518/*
519 *	Fill message with default.
520 */
521static PI2O_MESSAGE_FRAME
522ASR_fillMessage(void *Message, u_int16_t size)
523{
524	PI2O_MESSAGE_FRAME Message_Ptr;
525
526	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
527	bzero(Message_Ptr, size);
528	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
529	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
530	  (size + sizeof(U32) - 1) >> 2);
531	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
532	KASSERT(Message_Ptr != NULL, ("Message_Ptr == NULL"));
533	return (Message_Ptr);
534} /* ASR_fillMessage */
535
536#define	EMPTY_QUEUE (0xffffffff)
537
538static __inline U32
539ASR_getMessage(Asr_softc_t *sc)
540{
541	U32	MessageOffset;
542
543	MessageOffset = asr_get_ToFIFO(sc);
544	if (MessageOffset == EMPTY_QUEUE)
545		MessageOffset = asr_get_ToFIFO(sc);
546
547	return (MessageOffset);
548} /* ASR_getMessage */
549
550/* Issue a polled command */
551static U32
552ASR_initiateCp(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
553{
554	U32	Mask = 0xffffffff;
555	U32	MessageOffset;
556	u_int	Delay = 1500;
557
558	/*
559	 * ASR_initiateCp is only used for synchronous commands and will
560	 * be made more resiliant to adapter delays since commands like
561	 * resetIOP can cause the adapter to be deaf for a little time.
562	 */
563	while (((MessageOffset = ASR_getMessage(sc)) == EMPTY_QUEUE)
564	 && (--Delay != 0)) {
565		DELAY (10000);
566	}
567	if (MessageOffset != EMPTY_QUEUE) {
568		asr_set_frame(sc, Message, MessageOffset,
569			      I2O_MESSAGE_FRAME_getMessageSize(Message));
570		/*
571		 *	Disable the Interrupts
572		 */
573		Mask = asr_get_intr(sc);
574		asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
575		asr_set_ToFIFO(sc, MessageOffset);
576	}
577	return (Mask);
578} /* ASR_initiateCp */
579
580/*
581 *	Reset the adapter.
582 */
583static U32
584ASR_resetIOP(Asr_softc_t *sc)
585{
586	I2O_EXEC_IOP_RESET_MESSAGE	 Message;
587	PI2O_EXEC_IOP_RESET_MESSAGE	 Message_Ptr;
588	U32			       * Reply_Ptr;
589	U32				 Old;
590
591	/*
592	 *  Build up our copy of the Message.
593	 */
594	Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(&Message,
595	  sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
596	I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
597	/*
598	 *  Reset the Reply Status
599	 */
600	Reply_Ptr = &sc->ha_statusmem->rstatus;
601	*Reply_Ptr = 0;
602	I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
603	    sc->ha_rstatus_phys);
604	/*
605	 *	Send the Message out
606	 */
607	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
608	     0xffffffff) {
609		/*
610		 * Wait for a response (Poll), timeouts are dangerous if
611		 * the card is truly responsive. We assume response in 2s.
612		 */
613		u_int8_t Delay = 200;
614
615		while ((*Reply_Ptr == 0) && (--Delay != 0)) {
616			DELAY (10000);
617		}
618		/*
619		 *	Re-enable the interrupts.
620		 */
621		asr_set_intr(sc, Old);
622		KASSERT(*Reply_Ptr != 0, ("*Reply_Ptr == 0"));
623		return(*Reply_Ptr);
624	}
625	KASSERT(Old != 0xffffffff, ("Old == -1"));
626	return (0);
627} /* ASR_resetIOP */
628
629/*
630 *	Get the curent state of the adapter
631 */
632static PI2O_EXEC_STATUS_GET_REPLY
633ASR_getStatus(Asr_softc_t *sc)
634{
635	I2O_EXEC_STATUS_GET_MESSAGE	Message;
636	PI2O_EXEC_STATUS_GET_MESSAGE	Message_Ptr;
637	PI2O_EXEC_STATUS_GET_REPLY	buffer;
638	U32				Old;
639
640	/*
641	 *  Build up our copy of the Message.
642	 */
643	Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(&Message,
644	    sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
645	I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
646	    I2O_EXEC_STATUS_GET);
647	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
648	    sc->ha_status_phys);
649	/* This one is a Byte Count */
650	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
651	    sizeof(I2O_EXEC_STATUS_GET_REPLY));
652	/*
653	 *  Reset the Reply Status
654	 */
655	buffer = &sc->ha_statusmem->status;
656	bzero(buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
657	/*
658	 *	Send the Message out
659	 */
660	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
661	    0xffffffff) {
662		/*
663		 *	Wait for a response (Poll), timeouts are dangerous if
664		 * the card is truly responsive. We assume response in 50ms.
665		 */
666		u_int8_t Delay = 255;
667
668		while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
669			if (--Delay == 0) {
670				buffer = NULL;
671				break;
672			}
673			DELAY (1000);
674		}
675		/*
676		 *	Re-enable the interrupts.
677		 */
678		asr_set_intr(sc, Old);
679		return (buffer);
680	}
681	return (NULL);
682} /* ASR_getStatus */
683
684/*
685 *	Check if the device is a SCSI I2O HBA, and add it to the list.
686 */
687
688/*
689 * Probe for ASR controller.  If we find it, we will use it.
690 * virtual adapters.
691 */
692static int
693asr_probe(device_t dev)
694{
695	u_int32_t id;
696
697	id = (pci_get_device(dev) << 16) | pci_get_vendor(dev);
698	if ((id == 0xA5011044) || (id == 0xA5111044)) {
699		device_set_desc(dev, "Adaptec Caching SCSI RAID");
700		return (BUS_PROBE_DEFAULT);
701	}
702	return (ENXIO);
703} /* asr_probe */
704
705static __inline union asr_ccb *
706asr_alloc_ccb(Asr_softc_t *sc)
707{
708	union asr_ccb *new_ccb;
709
710	if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb),
711	  M_DEVBUF, M_WAITOK | M_ZERO)) != NULL) {
712		new_ccb->ccb_h.pinfo.priority = 1;
713		new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
714		new_ccb->ccb_h.spriv_ptr0 = sc;
715	}
716	return (new_ccb);
717} /* asr_alloc_ccb */
718
719static __inline void
720asr_free_ccb(union asr_ccb *free_ccb)
721{
722	free(free_ccb, M_DEVBUF);
723} /* asr_free_ccb */
724
725/*
726 *	Print inquiry data `carefully'
727 */
728static void
729ASR_prstring(u_int8_t *s, int len)
730{
731	while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
732		printf ("%c", *(s++));
733	}
734} /* ASR_prstring */
735
736/*
737 *	Send a message synchronously and without Interrupt to a ccb.
738 */
739static int
740ASR_queue_s(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message)
741{
742	int		s;
743	U32		Mask;
744	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
745
746	/*
747	 * We do not need any (optional byteswapping) method access to
748	 * the Initiator context field.
749	 */
750	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
751
752	/* Prevent interrupt service */
753	s = splcam ();
754	Mask = asr_get_intr(sc);
755	asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
756
757	if (ASR_queue(sc, Message) == EMPTY_QUEUE) {
758		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
759		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
760	}
761
762	/*
763	 * Wait for this board to report a finished instruction.
764	 */
765	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
766		(void)asr_intr (sc);
767	}
768
769	/* Re-enable Interrupts */
770	asr_set_intr(sc, Mask);
771	splx(s);
772
773	return (ccb->ccb_h.status);
774} /* ASR_queue_s */
775
776/*
777 *	Send a message synchronously to an Asr_softc_t.
778 */
779static int
780ASR_queue_c(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
781{
782	union asr_ccb	*ccb;
783	int		status;
784
785	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
786		return (CAM_REQUEUE_REQ);
787	}
788
789	status = ASR_queue_s (ccb, Message);
790
791	asr_free_ccb(ccb);
792
793	return (status);
794} /* ASR_queue_c */
795
796/*
797 *	Add the specified ccb to the active queue
798 */
799static __inline void
800ASR_ccbAdd(Asr_softc_t *sc, union asr_ccb *ccb)
801{
802	int s;
803
804	s = splcam();
805	LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
806	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
807		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
808			/*
809			 * RAID systems can take considerable time to
810			 * complete some commands given the large cache
811			 * flashes switching from write back to write thru.
812			 */
813			ccb->ccb_h.timeout = 6 * 60 * 1000;
814		}
815		set_ccb_timeout_ch(ccb, timeout(asr_timeout, (caddr_t)ccb,
816		  (ccb->ccb_h.timeout * hz) / 1000));
817	}
818	splx(s);
819} /* ASR_ccbAdd */
820
821/*
822 *	Remove the specified ccb from the active queue.
823 */
824static __inline void
825ASR_ccbRemove(Asr_softc_t *sc, union asr_ccb *ccb)
826{
827	int s;
828
829	s = splcam();
830	untimeout(asr_timeout, (caddr_t)ccb, get_ccb_timeout_ch(ccb));
831	LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
832	splx(s);
833} /* ASR_ccbRemove */
834
835/*
836 *	Fail all the active commands, so they get re-issued by the operating
837 *	system.
838 */
839static void
840ASR_failActiveCommands(Asr_softc_t *sc)
841{
842	struct ccb_hdr	*ccb;
843	int		s;
844
845	s = splcam();
846	/*
847	 *	We do not need to inform the CAM layer that we had a bus
848	 * reset since we manage it on our own, this also prevents the
849	 * SCSI_DELAY settling that would be required on other systems.
850	 * The `SCSI_DELAY' has already been handled by the card via the
851	 * acquisition of the LCT table while we are at CAM priority level.
852	 *  for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
853	 *	xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
854	 *  }
855	 */
856	while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) {
857		ASR_ccbRemove (sc, (union asr_ccb *)ccb);
858
859		ccb->status &= ~CAM_STATUS_MASK;
860		ccb->status |= CAM_REQUEUE_REQ;
861		/* Nothing Transfered */
862		((struct ccb_scsiio *)ccb)->resid
863		  = ((struct ccb_scsiio *)ccb)->dxfer_len;
864
865		if (ccb->path) {
866			xpt_done ((union ccb *)ccb);
867		} else {
868			wakeup (ccb);
869		}
870	}
871	splx(s);
872} /* ASR_failActiveCommands */
873
874/*
875 *	The following command causes the HBA to reset the specific bus
876 */
877static void
878ASR_resetBus(Asr_softc_t *sc, int bus)
879{
880	I2O_HBA_BUS_RESET_MESSAGE	Message;
881	I2O_HBA_BUS_RESET_MESSAGE	*Message_Ptr;
882	PI2O_LCT_ENTRY			Device;
883
884	Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(&Message,
885	  sizeof(I2O_HBA_BUS_RESET_MESSAGE));
886	I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
887	  I2O_HBA_BUS_RESET);
888	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
889	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
890	  ++Device) {
891		if (((Device->le_type & I2O_PORT) != 0)
892		 && (Device->le_bus == bus)) {
893			I2O_MESSAGE_FRAME_setTargetAddress(
894			  &Message_Ptr->StdMessageFrame,
895			  I2O_LCT_ENTRY_getLocalTID(Device));
896			/* Asynchronous command, with no expectations */
897			(void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
898			break;
899		}
900	}
901} /* ASR_resetBus */
902
903static __inline int
904ASR_getBlinkLedCode(Asr_softc_t *sc)
905{
906	U8	blink;
907
908	if (sc == NULL)
909		return (0);
910
911	blink = bus_space_read_1(sc->ha_frame_btag,
912				 sc->ha_frame_bhandle, sc->ha_blinkLED + 1);
913	if (blink != 0xBC)
914		return (0);
915
916	blink = bus_space_read_1(sc->ha_frame_btag,
917				 sc->ha_frame_bhandle, sc->ha_blinkLED);
918	return (blink);
919} /* ASR_getBlinkCode */
920
921/*
922 *	Determine the address of an TID lookup. Must be done at high priority
923 *	since the address can be changed by other threads of execution.
924 *
925 *	Returns NULL pointer if not indexible (but will attempt to generate
926 *	an index if `new_entry' flag is set to TRUE).
927 *
928 *	All addressible entries are to be guaranteed zero if never initialized.
929 */
930static tid_t *
931ASR_getTidAddress(Asr_softc_t *sc, int bus, int target, int lun, int new_entry)
932{
933	target2lun_t	*bus_ptr;
934	lun2tid_t	*target_ptr;
935	unsigned	new_size;
936
937	/*
938	 *	Validity checking of incoming parameters. More of a bound
939	 * expansion limit than an issue with the code dealing with the
940	 * values.
941	 *
942	 *	sc must be valid before it gets here, so that check could be
943	 * dropped if speed a critical issue.
944	 */
945	if ((sc == NULL)
946	 || (bus > MAX_CHANNEL)
947	 || (target > sc->ha_MaxId)
948	 || (lun > sc->ha_MaxLun)) {
949		debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
950		  (u_long)sc, bus, target, lun);
951		return (NULL);
952	}
953	/*
954	 *	See if there is an associated bus list.
955	 *
956	 *	for performance, allocate in size of BUS_CHUNK chunks.
957	 *	BUS_CHUNK must be a power of two. This is to reduce
958	 *	fragmentation effects on the allocations.
959	 */
960#define BUS_CHUNK 8
961	new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
962	if ((bus_ptr = sc->ha_targets[bus]) == NULL) {
963		/*
964		 *	Allocate a new structure?
965		 *		Since one element in structure, the +1
966		 *		needed for size has been abstracted.
967		 */
968		if ((new_entry == FALSE)
969		 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc (
970		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
971		    M_TEMP, M_WAITOK | M_ZERO))
972		   == NULL)) {
973			debug_asr_printf("failed to allocate bus list\n");
974			return (NULL);
975		}
976		bus_ptr->size = new_size + 1;
977	} else if (bus_ptr->size <= new_size) {
978		target2lun_t * new_bus_ptr;
979
980		/*
981		 *	Reallocate a new structure?
982		 *		Since one element in structure, the +1
983		 *		needed for size has been abstracted.
984		 */
985		if ((new_entry == FALSE)
986		 || ((new_bus_ptr = (target2lun_t *)malloc (
987		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
988		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
989			debug_asr_printf("failed to reallocate bus list\n");
990			return (NULL);
991		}
992		/*
993		 *	Copy the whole thing, safer, simpler coding
994		 * and not really performance critical at this point.
995		 */
996		bcopy(bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
997		    + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
998		sc->ha_targets[bus] = new_bus_ptr;
999		free(bus_ptr, M_TEMP);
1000		bus_ptr = new_bus_ptr;
1001		bus_ptr->size = new_size + 1;
1002	}
1003	/*
1004	 *	We now have the bus list, lets get to the target list.
1005	 *	Since most systems have only *one* lun, we do not allocate
1006	 *	in chunks as above, here we allow one, then in chunk sizes.
1007	 *	TARGET_CHUNK must be a power of two. This is to reduce
1008	 *	fragmentation effects on the allocations.
1009	 */
1010#define TARGET_CHUNK 8
1011	if ((new_size = lun) != 0) {
1012		new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
1013	}
1014	if ((target_ptr = bus_ptr->LUN[target]) == NULL) {
1015		/*
1016		 *	Allocate a new structure?
1017		 *		Since one element in structure, the +1
1018		 *		needed for size has been abstracted.
1019		 */
1020		if ((new_entry == FALSE)
1021		 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc (
1022		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1023		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
1024			debug_asr_printf("failed to allocate target list\n");
1025			return (NULL);
1026		}
1027		target_ptr->size = new_size + 1;
1028	} else if (target_ptr->size <= new_size) {
1029		lun2tid_t * new_target_ptr;
1030
1031		/*
1032		 *	Reallocate a new structure?
1033		 *		Since one element in structure, the +1
1034		 *		needed for size has been abstracted.
1035		 */
1036		if ((new_entry == FALSE)
1037		 || ((new_target_ptr = (lun2tid_t *)malloc (
1038		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1039		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
1040			debug_asr_printf("failed to reallocate target list\n");
1041			return (NULL);
1042		}
1043		/*
1044		 *	Copy the whole thing, safer, simpler coding
1045		 * and not really performance critical at this point.
1046		 */
1047		bcopy(target_ptr, new_target_ptr, sizeof(*target_ptr)
1048		    + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1049		bus_ptr->LUN[target] = new_target_ptr;
1050		free(target_ptr, M_TEMP);
1051		target_ptr = new_target_ptr;
1052		target_ptr->size = new_size + 1;
1053	}
1054	/*
1055	 *	Now, acquire the TID address from the LUN indexed list.
1056	 */
1057	return (&(target_ptr->TID[lun]));
1058} /* ASR_getTidAddress */
1059
1060/*
1061 *	Get a pre-existing TID relationship.
1062 *
1063 *	If the TID was never set, return (tid_t)-1.
1064 *
1065 *	should use mutex rather than spl.
1066 */
1067static __inline tid_t
1068ASR_getTid(Asr_softc_t *sc, int bus, int target, int lun)
1069{
1070	tid_t	*tid_ptr;
1071	int	s;
1072	tid_t	retval;
1073
1074	s = splcam();
1075	if (((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, FALSE)) == NULL)
1076	/* (tid_t)0 or (tid_t)-1 indicate no TID */
1077	 || (*tid_ptr == (tid_t)0)) {
1078		splx(s);
1079		return ((tid_t)-1);
1080	}
1081	retval = *tid_ptr;
1082	splx(s);
1083	return (retval);
1084} /* ASR_getTid */
1085
1086/*
1087 *	Set a TID relationship.
1088 *
1089 *	If the TID was not set, return (tid_t)-1.
1090 *
1091 *	should use mutex rather than spl.
1092 */
1093static __inline tid_t
1094ASR_setTid(Asr_softc_t *sc, int bus, int target, int lun, tid_t	TID)
1095{
1096	tid_t	*tid_ptr;
1097	int	s;
1098
1099	if (TID != (tid_t)-1) {
1100		if (TID == 0) {
1101			return ((tid_t)-1);
1102		}
1103		s = splcam();
1104		if ((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, TRUE))
1105		 == NULL) {
1106			splx(s);
1107			return ((tid_t)-1);
1108		}
1109		*tid_ptr = TID;
1110		splx(s);
1111	}
1112	return (TID);
1113} /* ASR_setTid */
1114
1115/*-------------------------------------------------------------------------*/
1116/*		      Function ASR_rescan				   */
1117/*-------------------------------------------------------------------------*/
1118/* The Parameters Passed To This Function Are :				   */
1119/*     Asr_softc_t *	 : HBA miniport driver's adapter data storage.	   */
1120/*									   */
1121/* This Function Will rescan the adapter and resynchronize any data	   */
1122/*									   */
1123/* Return : 0 For OK, Error Code Otherwise				   */
1124/*-------------------------------------------------------------------------*/
1125
1126static int
1127ASR_rescan(Asr_softc_t *sc)
1128{
1129	int bus;
1130	int error;
1131
1132	/*
1133	 * Re-acquire the LCT table and synchronize us to the adapter.
1134	 */
1135	if ((error = ASR_acquireLct(sc)) == 0) {
1136		error = ASR_acquireHrt(sc);
1137	}
1138
1139	if (error != 0) {
1140		return error;
1141	}
1142
1143	bus = sc->ha_MaxBus;
1144	/* Reset all existing cached TID lookups */
1145	do {
1146		int target, event = 0;
1147
1148		/*
1149		 *	Scan for all targets on this bus to see if they
1150		 * got affected by the rescan.
1151		 */
1152		for (target = 0; target <= sc->ha_MaxId; ++target) {
1153			int lun;
1154
1155			/* Stay away from the controller ID */
1156			if (target == sc->ha_adapter_target[bus]) {
1157				continue;
1158			}
1159			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1160				PI2O_LCT_ENTRY Device;
1161				tid_t	       TID = (tid_t)-1;
1162				tid_t	       LastTID;
1163
1164				/*
1165				 * See if the cached TID changed. Search for
1166				 * the device in our new LCT.
1167				 */
1168				for (Device = sc->ha_LCT->LCTEntry;
1169				  Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1170				   + I2O_LCT_getTableSize(sc->ha_LCT));
1171				  ++Device) {
1172					if ((Device->le_type != I2O_UNKNOWN)
1173					 && (Device->le_bus == bus)
1174					 && (Device->le_target == target)
1175					 && (Device->le_lun == lun)
1176					 && (I2O_LCT_ENTRY_getUserTID(Device)
1177					  == 0xFFF)) {
1178						TID = I2O_LCT_ENTRY_getLocalTID(
1179						  Device);
1180						break;
1181					}
1182				}
1183				/*
1184				 * Indicate to the OS that the label needs
1185				 * to be recalculated, or that the specific
1186				 * open device is no longer valid (Merde)
1187				 * because the cached TID changed.
1188				 */
1189				LastTID = ASR_getTid (sc, bus, target, lun);
1190				if (LastTID != TID) {
1191					struct cam_path * path;
1192
1193					if (xpt_create_path(&path,
1194					  /*periph*/NULL,
1195					  cam_sim_path(sc->ha_sim[bus]),
1196					  target, lun) != CAM_REQ_CMP) {
1197						if (TID == (tid_t)-1) {
1198							event |= AC_LOST_DEVICE;
1199						} else {
1200							event |= AC_INQ_CHANGED
1201							       | AC_GETDEV_CHANGED;
1202						}
1203					} else {
1204						if (TID == (tid_t)-1) {
1205							xpt_async(
1206							  AC_LOST_DEVICE,
1207							  path, NULL);
1208						} else if (LastTID == (tid_t)-1) {
1209							struct ccb_getdev ccb;
1210
1211							xpt_setup_ccb(
1212							  &(ccb.ccb_h),
1213							  path, /*priority*/5);
1214							xpt_async(
1215							  AC_FOUND_DEVICE,
1216							  path,
1217							  &ccb);
1218						} else {
1219							xpt_async(
1220							  AC_INQ_CHANGED,
1221							  path, NULL);
1222							xpt_async(
1223							  AC_GETDEV_CHANGED,
1224							  path, NULL);
1225						}
1226					}
1227				}
1228				/*
1229				 *	We have the option of clearing the
1230				 * cached TID for it to be rescanned, or to
1231				 * set it now even if the device never got
1232				 * accessed. We chose the later since we
1233				 * currently do not use the condition that
1234				 * the TID ever got cached.
1235				 */
1236				ASR_setTid (sc, bus, target, lun, TID);
1237			}
1238		}
1239		/*
1240		 *	The xpt layer can not handle multiple events at the
1241		 * same call.
1242		 */
1243		if (event & AC_LOST_DEVICE) {
1244			xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1245		}
1246		if (event & AC_INQ_CHANGED) {
1247			xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1248		}
1249		if (event & AC_GETDEV_CHANGED) {
1250			xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1251		}
1252	} while (--bus >= 0);
1253	return (error);
1254} /* ASR_rescan */
1255
1256/*-------------------------------------------------------------------------*/
1257/*		      Function ASR_reset				   */
1258/*-------------------------------------------------------------------------*/
1259/* The Parameters Passed To This Function Are :				   */
1260/*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
1261/*									   */
1262/* This Function Will reset the adapter and resynchronize any data	   */
1263/*									   */
1264/* Return : None							   */
1265/*-------------------------------------------------------------------------*/
1266
1267static int
1268ASR_reset(Asr_softc_t *sc)
1269{
1270	int s, retVal;
1271
1272	s = splcam();
1273	if ((sc->ha_in_reset == HA_IN_RESET)
1274	 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1275		splx (s);
1276		return (EBUSY);
1277	}
1278	/*
1279	 *	Promotes HA_OPERATIONAL to HA_IN_RESET,
1280	 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1281	 */
1282	++(sc->ha_in_reset);
1283	if (ASR_resetIOP(sc) == 0) {
1284		debug_asr_printf ("ASR_resetIOP failed\n");
1285		/*
1286		 *	We really need to take this card off-line, easier said
1287		 * than make sense. Better to keep retrying for now since if a
1288		 * UART cable is connected the blinkLEDs the adapter is now in
1289		 * a hard state requiring action from the monitor commands to
1290		 * the HBA to continue. For debugging waiting forever is a
1291		 * good thing. In a production system, however, one may wish
1292		 * to instead take the card off-line ...
1293		 */
1294		/* Wait Forever */
1295		while (ASR_resetIOP(sc) == 0);
1296	}
1297	retVal = ASR_init (sc);
1298	splx (s);
1299	if (retVal != 0) {
1300		debug_asr_printf ("ASR_init failed\n");
1301		sc->ha_in_reset = HA_OFF_LINE;
1302		return (ENXIO);
1303	}
1304	if (ASR_rescan (sc) != 0) {
1305		debug_asr_printf ("ASR_rescan failed\n");
1306	}
1307	ASR_failActiveCommands (sc);
1308	if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1309		printf ("asr%d: Brining adapter back on-line\n",
1310		  sc->ha_path[0]
1311		    ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1312		    : 0);
1313	}
1314	sc->ha_in_reset = HA_OPERATIONAL;
1315	return (0);
1316} /* ASR_reset */
1317
1318/*
1319 *	Device timeout handler.
1320 */
1321static void
1322asr_timeout(void *arg)
1323{
1324	union asr_ccb	*ccb = (union asr_ccb *)arg;
1325	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1326	int		s;
1327
1328	debug_asr_print_path(ccb);
1329	debug_asr_printf("timed out");
1330
1331	/*
1332	 *	Check if the adapter has locked up?
1333	 */
1334	if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1335		/* Reset Adapter */
1336		printf ("asr%d: Blink LED 0x%x resetting adapter\n",
1337		  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1338		if (ASR_reset (sc) == ENXIO) {
1339			/* Try again later */
1340			set_ccb_timeout_ch(ccb, timeout(asr_timeout,
1341			  (caddr_t)ccb,
1342			  (ccb->ccb_h.timeout * hz) / 1000));
1343		}
1344		return;
1345	}
1346	/*
1347	 *	Abort does not function on the ASR card!!! Walking away from
1348	 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1349	 * our best bet, followed by a complete adapter reset if that fails.
1350	 */
1351	s = splcam();
1352	/* Check if we already timed out once to raise the issue */
1353	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1354		debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1355		if (ASR_reset (sc) == ENXIO) {
1356			set_ccb_timeout_ch(ccb, timeout(asr_timeout,
1357			  (caddr_t)ccb,
1358			  (ccb->ccb_h.timeout * hz) / 1000));
1359		}
1360		splx(s);
1361		return;
1362	}
1363	debug_asr_printf ("\nresetting bus\n");
1364	/* If the BUS reset does not take, then an adapter reset is next! */
1365	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1366	ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1367	set_ccb_timeout_ch(ccb, timeout(asr_timeout, (caddr_t)ccb,
1368	  (ccb->ccb_h.timeout * hz) / 1000));
1369	ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1370	xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1371	splx(s);
1372} /* asr_timeout */
1373
1374/*
1375 * send a message asynchronously
1376 */
1377static int
1378ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
1379{
1380	U32		MessageOffset;
1381	union asr_ccb	*ccb;
1382
1383	debug_asr_printf("Host Command Dump:\n");
1384	debug_asr_dump_message(Message);
1385
1386	ccb = (union asr_ccb *)(long)
1387	  I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1388
1389	if ((MessageOffset = ASR_getMessage(sc)) != EMPTY_QUEUE) {
1390		asr_set_frame(sc, Message, MessageOffset,
1391			      I2O_MESSAGE_FRAME_getMessageSize(Message));
1392		if (ccb) {
1393			ASR_ccbAdd (sc, ccb);
1394		}
1395		/* Post the command */
1396		asr_set_ToFIFO(sc, MessageOffset);
1397	} else {
1398		if (ASR_getBlinkLedCode(sc)) {
1399			/*
1400			 *	Unlikely we can do anything if we can't grab a
1401			 * message frame :-(, but lets give it a try.
1402			 */
1403			(void)ASR_reset(sc);
1404		}
1405	}
1406	return (MessageOffset);
1407} /* ASR_queue */
1408
1409
1410/* Simple Scatter Gather elements */
1411#define	SG(SGL,Index,Flags,Buffer,Size)				   \
1412	I2O_FLAGS_COUNT_setCount(				   \
1413	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1414	  Size);						   \
1415	I2O_FLAGS_COUNT_setFlags(				   \
1416	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1417	  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags));	   \
1418	I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(		   \
1419	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]),		   \
1420	  (Buffer == NULL) ? 0 : KVTOPHYS(Buffer))
1421
1422/*
1423 *	Retrieve Parameter Group.
1424 */
1425static void *
1426ASR_getParams(Asr_softc_t *sc, tid_t TID, int Group, void *Buffer,
1427	      unsigned BufferSize)
1428{
1429	struct paramGetMessage {
1430		I2O_UTIL_PARAMS_GET_MESSAGE M;
1431		char
1432		   F[sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1433		struct Operations {
1434			I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1435			I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1436		}			     O;
1437	}				Message;
1438	struct Operations		*Operations_Ptr;
1439	I2O_UTIL_PARAMS_GET_MESSAGE	*Message_Ptr;
1440	struct ParamBuffer {
1441		I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1442		I2O_PARAM_READ_OPERATION_RESULT	    Read;
1443		char				    Info[1];
1444	}				*Buffer_Ptr;
1445
1446	Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(&Message,
1447	  sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1448	    + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1449	Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1450	  + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1451	  + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1452	bzero(Operations_Ptr, sizeof(struct Operations));
1453	I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1454	  &(Operations_Ptr->Header), 1);
1455	I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1456	  &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1457	I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1458	  &(Operations_Ptr->Template[0]), 0xFFFF);
1459	I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1460	  &(Operations_Ptr->Template[0]), Group);
1461	Buffer_Ptr = (struct ParamBuffer *)Buffer;
1462	bzero(Buffer_Ptr, BufferSize);
1463
1464	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1465	  I2O_VERSION_11
1466	  + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1467	    / sizeof(U32)) << 4));
1468	I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1469	  TID);
1470	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1471	  I2O_UTIL_PARAMS_GET);
1472	/*
1473	 *  Set up the buffers as scatter gather elements.
1474	 */
1475	SG(&(Message_Ptr->SGL), 0,
1476	  I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1477	  Operations_Ptr, sizeof(struct Operations));
1478	SG(&(Message_Ptr->SGL), 1,
1479	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1480	  Buffer_Ptr, BufferSize);
1481
1482	if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1483	 && (Buffer_Ptr->Header.ResultCount)) {
1484		return ((void *)(Buffer_Ptr->Info));
1485	}
1486	return (NULL);
1487} /* ASR_getParams */
1488
1489/*
1490 *	Acquire the LCT information.
1491 */
1492static int
1493ASR_acquireLct(Asr_softc_t *sc)
1494{
1495	PI2O_EXEC_LCT_NOTIFY_MESSAGE	Message_Ptr;
1496	PI2O_SGE_SIMPLE_ELEMENT		sg;
1497	int				MessageSizeInBytes;
1498	caddr_t				v;
1499	int				len;
1500	I2O_LCT				Table;
1501	PI2O_LCT_ENTRY			Entry;
1502
1503	/*
1504	 *	sc value assumed valid
1505	 */
1506	MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1507	    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1508	if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc(
1509	    MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) {
1510		return (ENOMEM);
1511	}
1512	(void)ASR_fillMessage((void *)Message_Ptr, MessageSizeInBytes);
1513	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1514	    (I2O_VERSION_11 + (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1515	    sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)));
1516	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1517	    I2O_EXEC_LCT_NOTIFY);
1518	I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1519	    I2O_CLASS_MATCH_ANYCLASS);
1520	/*
1521	 *	Call the LCT table to determine the number of device entries
1522	 * to reserve space for.
1523	 */
1524	SG(&(Message_Ptr->SGL), 0,
1525	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1526	  sizeof(I2O_LCT));
1527	/*
1528	 *	since this code is reused in several systems, code efficiency
1529	 * is greater by using a shift operation rather than a divide by
1530	 * sizeof(u_int32_t).
1531	 */
1532	I2O_LCT_setTableSize(&Table,
1533	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1534	(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1535	/*
1536	 *	Determine the size of the LCT table.
1537	 */
1538	if (sc->ha_LCT) {
1539		free(sc->ha_LCT, M_TEMP);
1540	}
1541	/*
1542	 *	malloc only generates contiguous memory when less than a
1543	 * page is expected. We must break the request up into an SG list ...
1544	 */
1545	if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1546	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1547	 || (len > (128 * 1024))) {	/* Arbitrary */
1548		free(Message_Ptr, M_TEMP);
1549		return (EINVAL);
1550	}
1551	if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK)) == NULL) {
1552		free(Message_Ptr, M_TEMP);
1553		return (ENOMEM);
1554	}
1555	/*
1556	 *	since this code is reused in several systems, code efficiency
1557	 * is greater by using a shift operation rather than a divide by
1558	 * sizeof(u_int32_t).
1559	 */
1560	I2O_LCT_setTableSize(sc->ha_LCT,
1561	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1562	/*
1563	 *	Convert the access to the LCT table into a SG list.
1564	 */
1565	sg = Message_Ptr->SGL.u.Simple;
1566	v = (caddr_t)(sc->ha_LCT);
1567	for (;;) {
1568		int next, base, span;
1569
1570		span = 0;
1571		next = base = KVTOPHYS(v);
1572		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1573
1574		/* How far can we go contiguously */
1575		while ((len > 0) && (base == next)) {
1576			int size;
1577
1578			next = trunc_page(base) + PAGE_SIZE;
1579			size = next - base;
1580			if (size > len) {
1581				size = len;
1582			}
1583			span += size;
1584			v += size;
1585			len -= size;
1586			base = KVTOPHYS(v);
1587		}
1588
1589		/* Construct the Flags */
1590		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1591		{
1592			int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1593			if (len <= 0) {
1594				rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1595				    | I2O_SGL_FLAGS_LAST_ELEMENT
1596				    | I2O_SGL_FLAGS_END_OF_BUFFER);
1597			}
1598			I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1599		}
1600
1601		if (len <= 0) {
1602			break;
1603		}
1604
1605		/*
1606		 * Incrementing requires resizing of the packet.
1607		 */
1608		++sg;
1609		MessageSizeInBytes += sizeof(*sg);
1610		I2O_MESSAGE_FRAME_setMessageSize(
1611		  &(Message_Ptr->StdMessageFrame),
1612		  I2O_MESSAGE_FRAME_getMessageSize(
1613		    &(Message_Ptr->StdMessageFrame))
1614		  + (sizeof(*sg) / sizeof(U32)));
1615		{
1616			PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1617
1618			if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1619			    malloc(MessageSizeInBytes, M_TEMP, M_WAITOK))
1620			    == NULL) {
1621				free(sc->ha_LCT, M_TEMP);
1622				sc->ha_LCT = NULL;
1623				free(Message_Ptr, M_TEMP);
1624				return (ENOMEM);
1625			}
1626			span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1627			bcopy(Message_Ptr, NewMessage_Ptr, span);
1628			free(Message_Ptr, M_TEMP);
1629			sg = (PI2O_SGE_SIMPLE_ELEMENT)
1630			  (((caddr_t)NewMessage_Ptr) + span);
1631			Message_Ptr = NewMessage_Ptr;
1632		}
1633	}
1634	{	int retval;
1635
1636		retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1637		free(Message_Ptr, M_TEMP);
1638		if (retval != CAM_REQ_CMP) {
1639			return (ENODEV);
1640		}
1641	}
1642	/* If the LCT table grew, lets truncate accesses */
1643	if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1644		I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1645	}
1646	for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1647	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1648	  ++Entry) {
1649		Entry->le_type = I2O_UNKNOWN;
1650		switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1651
1652		case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1653			Entry->le_type = I2O_BSA;
1654			break;
1655
1656		case I2O_CLASS_SCSI_PERIPHERAL:
1657			Entry->le_type = I2O_SCSI;
1658			break;
1659
1660		case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1661			Entry->le_type = I2O_FCA;
1662			break;
1663
1664		case I2O_CLASS_BUS_ADAPTER_PORT:
1665			Entry->le_type = I2O_PORT | I2O_SCSI;
1666			/* FALLTHRU */
1667		case I2O_CLASS_FIBRE_CHANNEL_PORT:
1668			if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1669			  I2O_CLASS_FIBRE_CHANNEL_PORT) {
1670				Entry->le_type = I2O_PORT | I2O_FCA;
1671			}
1672		{	struct ControllerInfo {
1673				I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1674				I2O_PARAM_READ_OPERATION_RESULT	    Read;
1675				I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1676			} Buffer;
1677			PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1678
1679			Entry->le_bus = 0xff;
1680			Entry->le_target = 0xff;
1681			Entry->le_lun = 0xff;
1682
1683			if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1684			  ASR_getParams(sc,
1685			    I2O_LCT_ENTRY_getLocalTID(Entry),
1686			    I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1687			    &Buffer, sizeof(struct ControllerInfo))) == NULL) {
1688				continue;
1689			}
1690			Entry->le_target
1691			  = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1692			    Info);
1693			Entry->le_lun = 0;
1694		}	/* FALLTHRU */
1695		default:
1696			continue;
1697		}
1698		{	struct DeviceInfo {
1699				I2O_PARAM_RESULTS_LIST_HEADER	Header;
1700				I2O_PARAM_READ_OPERATION_RESULT Read;
1701				I2O_DPT_DEVICE_INFO_SCALAR	Info;
1702			} Buffer;
1703			PI2O_DPT_DEVICE_INFO_SCALAR	 Info;
1704
1705			Entry->le_bus = 0xff;
1706			Entry->le_target = 0xff;
1707			Entry->le_lun = 0xff;
1708
1709			if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1710			  ASR_getParams(sc,
1711			    I2O_LCT_ENTRY_getLocalTID(Entry),
1712			    I2O_DPT_DEVICE_INFO_GROUP_NO,
1713			    &Buffer, sizeof(struct DeviceInfo))) == NULL) {
1714				continue;
1715			}
1716			Entry->le_type
1717			  |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1718			Entry->le_bus
1719			  = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1720			if ((Entry->le_bus > sc->ha_MaxBus)
1721			 && (Entry->le_bus <= MAX_CHANNEL)) {
1722				sc->ha_MaxBus = Entry->le_bus;
1723			}
1724			Entry->le_target
1725			  = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1726			Entry->le_lun
1727			  = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1728		}
1729	}
1730	/*
1731	 *	A zero return value indicates success.
1732	 */
1733	return (0);
1734} /* ASR_acquireLct */
1735
1736/*
1737 * Initialize a message frame.
1738 * We assume that the CDB has already been set up, so all we do here is
1739 * generate the Scatter Gather list.
1740 */
1741static PI2O_MESSAGE_FRAME
1742ASR_init_message(union asr_ccb *ccb, PI2O_MESSAGE_FRAME	Message)
1743{
1744	PI2O_MESSAGE_FRAME	Message_Ptr;
1745	PI2O_SGE_SIMPLE_ELEMENT sg;
1746	Asr_softc_t		*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1747	vm_size_t		size, len;
1748	caddr_t			v;
1749	U32			MessageSize;
1750	int			next, span, base, rw;
1751	int			target = ccb->ccb_h.target_id;
1752	int			lun = ccb->ccb_h.target_lun;
1753	int			bus =cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1754	tid_t			TID;
1755
1756	/* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1757	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
1758	bzero(Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
1759	      sizeof(I2O_SG_ELEMENT)));
1760
1761	if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1762		PI2O_LCT_ENTRY Device;
1763
1764		TID = 0;
1765		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1766		    (((U32 *)sc->ha_LCT) + I2O_LCT_getTableSize(sc->ha_LCT));
1767		    ++Device) {
1768			if ((Device->le_type != I2O_UNKNOWN)
1769			 && (Device->le_bus == bus)
1770			 && (Device->le_target == target)
1771			 && (Device->le_lun == lun)
1772			 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1773				TID = I2O_LCT_ENTRY_getLocalTID(Device);
1774				ASR_setTid(sc, Device->le_bus,
1775					   Device->le_target, Device->le_lun,
1776					   TID);
1777				break;
1778			}
1779		}
1780	}
1781	if (TID == (tid_t)0) {
1782		return (NULL);
1783	}
1784	I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1785	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1786	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1787	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1788	  (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1789		/ sizeof(U32)) << 4));
1790	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1791	  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1792	  - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1793	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1794	I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1795	I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1796	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1797	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1798	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1799	    I2O_SCB_FLAG_ENABLE_DISCONNECT
1800	  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1801	  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1802	/*
1803	 * We do not need any (optional byteswapping) method access to
1804	 * the Initiator & Transaction context field.
1805	 */
1806	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1807
1808	I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1809	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1810	/*
1811	 * copy the cdb over
1812	 */
1813	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1814	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1815	bcopy(&(ccb->csio.cdb_io),
1816	    ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB,
1817	    ccb->csio.cdb_len);
1818
1819	/*
1820	 * Given a buffer describing a transfer, set up a scatter/gather map
1821	 * in a ccb to map that SCSI transfer.
1822	 */
1823
1824	rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1825
1826	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1827	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1828	  (ccb->csio.dxfer_len)
1829	    ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1830		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1831		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1832		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1833		    : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1834		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1835		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1836		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
1837	    :	      (I2O_SCB_FLAG_ENABLE_DISCONNECT
1838		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1839		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
1840
1841	/*
1842	 * Given a transfer described by a `data', fill in the SG list.
1843	 */
1844	sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
1845
1846	len = ccb->csio.dxfer_len;
1847	v = ccb->csio.data_ptr;
1848	KASSERT(ccb->csio.dxfer_len >= 0, ("csio.dxfer_len < 0"));
1849	MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
1850	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
1851	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
1852	while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1853	  Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
1854		span = 0;
1855		next = base = KVTOPHYS(v);
1856		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1857
1858		/* How far can we go contiguously */
1859		while ((len > 0) && (base == next)) {
1860			next = trunc_page(base) + PAGE_SIZE;
1861			size = next - base;
1862			if (size > len) {
1863				size = len;
1864			}
1865			span += size;
1866			v += size;
1867			len -= size;
1868			base = KVTOPHYS(v);
1869		}
1870
1871		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1872		if (len == 0) {
1873			rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
1874		}
1875		I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
1876		  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
1877		++sg;
1878		MessageSize += sizeof(*sg) / sizeof(U32);
1879	}
1880	/* We always do the request sense ... */
1881	if ((span = ccb->csio.sense_len) == 0) {
1882		span = sizeof(ccb->csio.sense_data);
1883	}
1884	SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1885	  &(ccb->csio.sense_data), span);
1886	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1887	  MessageSize + (sizeof(*sg) / sizeof(U32)));
1888	return (Message_Ptr);
1889} /* ASR_init_message */
1890
1891/*
1892 *	Reset the adapter.
1893 */
1894static U32
1895ASR_initOutBound(Asr_softc_t *sc)
1896{
1897	struct initOutBoundMessage {
1898		I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
1899		U32			       R;
1900	}				Message;
1901	PI2O_EXEC_OUTBOUND_INIT_MESSAGE	Message_Ptr;
1902	U32				*volatile Reply_Ptr;
1903	U32				Old;
1904
1905	/*
1906	 *  Build up our copy of the Message.
1907	 */
1908	Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(&Message,
1909	  sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
1910	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1911	  I2O_EXEC_OUTBOUND_INIT);
1912	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
1913	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
1914	  sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
1915	/*
1916	 *  Reset the Reply Status
1917	 */
1918	*(Reply_Ptr = (U32 *)((char *)Message_Ptr
1919	  + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
1920	SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
1921	  sizeof(U32));
1922	/*
1923	 *	Send the Message out
1924	 */
1925	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
1926	    0xffffffff) {
1927		u_long size, addr;
1928
1929		/*
1930		 *	Wait for a response (Poll).
1931		 */
1932		while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
1933		/*
1934		 *	Re-enable the interrupts.
1935		 */
1936		asr_set_intr(sc, Old);
1937		/*
1938		 *	Populate the outbound table.
1939		 */
1940		if (sc->ha_Msgs == NULL) {
1941
1942			/* Allocate the reply frames */
1943			size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1944			  * sc->ha_Msgs_Count;
1945
1946			/*
1947			 *	contigmalloc only works reliably at
1948			 * initialization time.
1949			 */
1950			if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1951			  contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
1952			    0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) != NULL) {
1953				bzero(sc->ha_Msgs, size);
1954				sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
1955			}
1956		}
1957
1958		/* Initialize the outbound FIFO */
1959		if (sc->ha_Msgs != NULL)
1960		for(size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
1961		    size; --size) {
1962			asr_set_FromFIFO(sc, addr);
1963			addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
1964		}
1965		return (*Reply_Ptr);
1966	}
1967	return (0);
1968} /* ASR_initOutBound */
1969
1970/*
1971 *	Set the system table
1972 */
1973static int
1974ASR_setSysTab(Asr_softc_t *sc)
1975{
1976	PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
1977	PI2O_SET_SYSTAB_HEADER	      SystemTable;
1978	Asr_softc_t		    * ha, *next;
1979	PI2O_SGE_SIMPLE_ELEMENT	      sg;
1980	int			      retVal;
1981
1982	if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc (
1983	  sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO)) == NULL) {
1984		return (ENOMEM);
1985	}
1986	STAILQ_FOREACH(ha, &Asr_softc_list, ha_next) {
1987		++SystemTable->NumberEntries;
1988	}
1989	if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc (
1990	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1991	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
1992	  M_TEMP, M_WAITOK)) == NULL) {
1993		free(SystemTable, M_TEMP);
1994		return (ENOMEM);
1995	}
1996	(void)ASR_fillMessage((void *)Message_Ptr,
1997	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1998	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
1999	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2000	  (I2O_VERSION_11 +
2001	  (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2002			/ sizeof(U32)) << 4)));
2003	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2004	  I2O_EXEC_SYS_TAB_SET);
2005	/*
2006	 *	Call the LCT table to determine the number of device entries
2007	 * to reserve space for.
2008	 *	since this code is reused in several systems, code efficiency
2009	 * is greater by using a shift operation rather than a divide by
2010	 * sizeof(u_int32_t).
2011	 */
2012	sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
2013	  + ((I2O_MESSAGE_FRAME_getVersionOffset(
2014	      &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
2015	SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2016	++sg;
2017	STAILQ_FOREACH_SAFE(ha, &Asr_softc_list, ha_next, next) {
2018		SG(sg, 0,
2019		  ((next)
2020		    ? (I2O_SGL_FLAGS_DIR)
2021		    : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2022		  &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2023		++sg;
2024	}
2025	SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2026	SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2027	    | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2028	retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2029	free(Message_Ptr, M_TEMP);
2030	free(SystemTable, M_TEMP);
2031	return (retVal);
2032} /* ASR_setSysTab */
2033
2034static int
2035ASR_acquireHrt(Asr_softc_t *sc)
2036{
2037	I2O_EXEC_HRT_GET_MESSAGE	Message;
2038	I2O_EXEC_HRT_GET_MESSAGE	*Message_Ptr;
2039	struct {
2040		I2O_HRT	      Header;
2041		I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2042	}				Hrt;
2043	u_int8_t			NumberOfEntries;
2044	PI2O_HRT_ENTRY			Entry;
2045
2046	bzero(&Hrt, sizeof (Hrt));
2047	Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(&Message,
2048	  sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2049	  + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2050	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2051	  (I2O_VERSION_11
2052	  + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2053		   / sizeof(U32)) << 4)));
2054	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2055	  I2O_EXEC_HRT_GET);
2056
2057	/*
2058	 *  Set up the buffers as scatter gather elements.
2059	 */
2060	SG(&(Message_Ptr->SGL), 0,
2061	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2062	  &Hrt, sizeof(Hrt));
2063	if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2064		return (ENODEV);
2065	}
2066	if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2067	  > (MAX_CHANNEL + 1)) {
2068		NumberOfEntries = MAX_CHANNEL + 1;
2069	}
2070	for (Entry = Hrt.Header.HRTEntry;
2071	  NumberOfEntries != 0;
2072	  ++Entry, --NumberOfEntries) {
2073		PI2O_LCT_ENTRY Device;
2074
2075		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2076		  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2077		  ++Device) {
2078			if (I2O_LCT_ENTRY_getLocalTID(Device)
2079			  == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2080				Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2081				  Entry) >> 16;
2082				if ((Device->le_bus > sc->ha_MaxBus)
2083				 && (Device->le_bus <= MAX_CHANNEL)) {
2084					sc->ha_MaxBus = Device->le_bus;
2085				}
2086			}
2087		}
2088	}
2089	return (0);
2090} /* ASR_acquireHrt */
2091
2092/*
2093 *	Enable the adapter.
2094 */
2095static int
2096ASR_enableSys(Asr_softc_t *sc)
2097{
2098	I2O_EXEC_SYS_ENABLE_MESSAGE	Message;
2099	PI2O_EXEC_SYS_ENABLE_MESSAGE	Message_Ptr;
2100
2101	Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(&Message,
2102	  sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2103	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2104	  I2O_EXEC_SYS_ENABLE);
2105	return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2106} /* ASR_enableSys */
2107
2108/*
2109 *	Perform the stages necessary to initialize the adapter
2110 */
2111static int
2112ASR_init(Asr_softc_t *sc)
2113{
2114	return ((ASR_initOutBound(sc) == 0)
2115	 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2116	 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2117} /* ASR_init */
2118
2119/*
2120 *	Send a Synchronize Cache command to the target device.
2121 */
2122static void
2123ASR_sync(Asr_softc_t *sc, int bus, int target, int lun)
2124{
2125	tid_t TID;
2126
2127	/*
2128	 * We will not synchronize the device when there are outstanding
2129	 * commands issued by the OS (this is due to a locked up device,
2130	 * as the OS normally would flush all outstanding commands before
2131	 * issuing a shutdown or an adapter reset).
2132	 */
2133	if ((sc != NULL)
2134	 && (LIST_FIRST(&(sc->ha_ccb)) != NULL)
2135	 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2136	 && (TID != (tid_t)0)) {
2137		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2138		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2139
2140		Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message;
2141		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2142		    - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2143
2144		I2O_MESSAGE_FRAME_setVersionOffset(
2145		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2146		  I2O_VERSION_11
2147		    | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2148		    - sizeof(I2O_SG_ELEMENT))
2149			/ sizeof(U32)) << 4));
2150		I2O_MESSAGE_FRAME_setMessageSize(
2151		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2152		  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2153		  - sizeof(I2O_SG_ELEMENT))
2154			/ sizeof(U32));
2155		I2O_MESSAGE_FRAME_setInitiatorAddress (
2156		  (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2157		I2O_MESSAGE_FRAME_setFunction(
2158		  (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2159		I2O_MESSAGE_FRAME_setTargetAddress(
2160		  (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2161		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2162		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2163		  I2O_SCSI_SCB_EXEC);
2164		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2165		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2166		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2167		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2168		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2169		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2170		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2171		  DPT_ORGANIZATION_ID);
2172		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2173		Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2174		Message_Ptr->CDB[1] = (lun << 5);
2175
2176		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2177		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2178		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2179		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2180		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2181
2182		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2183
2184	}
2185}
2186
2187static void
2188ASR_synchronize(Asr_softc_t *sc)
2189{
2190	int bus, target, lun;
2191
2192	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2193		for (target = 0; target <= sc->ha_MaxId; ++target) {
2194			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2195				ASR_sync(sc,bus,target,lun);
2196			}
2197		}
2198	}
2199}
2200
2201/*
2202 *	Reset the HBA, targets and BUS.
2203 *		Currently this resets *all* the SCSI busses.
2204 */
2205static __inline void
2206asr_hbareset(Asr_softc_t *sc)
2207{
2208	ASR_synchronize(sc);
2209	(void)ASR_reset(sc);
2210} /* asr_hbareset */
2211
2212/*
2213 *	A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2214 * limit and a reduction in error checking (in the pre 4.0 case).
2215 */
2216static int
2217asr_pci_map_mem(device_t dev, Asr_softc_t *sc)
2218{
2219	int		rid;
2220	u_int32_t	p, l, s;
2221
2222	/*
2223	 * I2O specification says we must find first *memory* mapped BAR
2224	 */
2225	for (rid = 0; rid < 4; rid++) {
2226		p = pci_read_config(dev, PCIR_BAR(rid), sizeof(p));
2227		if ((p & 1) == 0) {
2228			break;
2229		}
2230	}
2231	/*
2232	 *	Give up?
2233	 */
2234	if (rid >= 4) {
2235		rid = 0;
2236	}
2237	rid = PCIR_BAR(rid);
2238	p = pci_read_config(dev, rid, sizeof(p));
2239	pci_write_config(dev, rid, -1, sizeof(p));
2240	l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2241	pci_write_config(dev, rid, p, sizeof(p));
2242	if (l > MAX_MAP) {
2243		l = MAX_MAP;
2244	}
2245	/*
2246	 * The 2005S Zero Channel RAID solution is not a perfect PCI
2247	 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2248	 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2249	 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2250	 * accessible via BAR0, the messaging registers are accessible
2251	 * via BAR1. If the subdevice code is 50 to 59 decimal.
2252	 */
2253	s = pci_read_config(dev, PCIR_DEVVENDOR, sizeof(s));
2254	if (s != 0xA5111044) {
2255		s = pci_read_config(dev, PCIR_SUBVEND_0, sizeof(s));
2256		if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2257		 && (ADPTDOMINATOR_SUB_ID_START <= s)
2258		 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2259			l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2260		}
2261	}
2262	p &= ~15;
2263	sc->ha_mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2264	  p, p + l, l, RF_ACTIVE);
2265	if (sc->ha_mem_res == NULL) {
2266		return (0);
2267	}
2268	sc->ha_Base = rman_get_start(sc->ha_mem_res);
2269	sc->ha_i2o_bhandle = rman_get_bushandle(sc->ha_mem_res);
2270	sc->ha_i2o_btag = rman_get_bustag(sc->ha_mem_res);
2271
2272	if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2273		if ((rid += sizeof(u_int32_t)) >= PCIR_BAR(4)) {
2274			return (0);
2275		}
2276		p = pci_read_config(dev, rid, sizeof(p));
2277		pci_write_config(dev, rid, -1, sizeof(p));
2278		l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2279		pci_write_config(dev, rid, p, sizeof(p));
2280		if (l > MAX_MAP) {
2281			l = MAX_MAP;
2282		}
2283		p &= ~15;
2284		sc->ha_mes_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2285		  p, p + l, l, RF_ACTIVE);
2286		if (sc->ha_mes_res == NULL) {
2287			return (0);
2288		}
2289		sc->ha_frame_bhandle = rman_get_bushandle(sc->ha_mes_res);
2290		sc->ha_frame_btag = rman_get_bustag(sc->ha_mes_res);
2291	} else {
2292		sc->ha_frame_bhandle = sc->ha_i2o_bhandle;
2293		sc->ha_frame_btag = sc->ha_i2o_btag;
2294	}
2295	return (1);
2296} /* asr_pci_map_mem */
2297
2298/*
2299 *	A simplified copy of the real pci_map_int with additional
2300 * registration requirements.
2301 */
2302static int
2303asr_pci_map_int(device_t dev, Asr_softc_t *sc)
2304{
2305	int rid = 0;
2306
2307	sc->ha_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2308	  RF_ACTIVE | RF_SHAREABLE);
2309	if (sc->ha_irq_res == NULL) {
2310		return (0);
2311	}
2312	if (bus_setup_intr(dev, sc->ha_irq_res, INTR_TYPE_CAM | INTR_ENTROPY,
2313	  NULL, (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) {
2314		return (0);
2315	}
2316	sc->ha_irq = pci_read_config(dev, PCIR_INTLINE, sizeof(char));
2317	return (1);
2318} /* asr_pci_map_int */
2319
2320static void
2321asr_status_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2322{
2323	Asr_softc_t *sc;
2324
2325	if (error)
2326		return;
2327
2328	sc = (Asr_softc_t *)arg;
2329
2330	/* XXX
2331	 * The status word can be at a 64-bit address, but the existing
2332	 * accessor macros simply cannot manipulate 64-bit addresses.
2333	 */
2334	sc->ha_status_phys = (u_int32_t)segs[0].ds_addr +
2335	    offsetof(struct Asr_status_mem, status);
2336	sc->ha_rstatus_phys = (u_int32_t)segs[0].ds_addr +
2337	    offsetof(struct Asr_status_mem, rstatus);
2338}
2339
2340static int
2341asr_alloc_dma(Asr_softc_t *sc)
2342{
2343	device_t dev;
2344
2345	dev = sc->ha_dev;
2346
2347	if (bus_dma_tag_create(bus_get_dma_tag(dev),	/* PCI parent */
2348			       1, 0,			/* algnmnt, boundary */
2349			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2350			       BUS_SPACE_MAXADDR,	/* highaddr */
2351			       NULL, NULL,		/* filter, filterarg */
2352			       BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2353			       BUS_SPACE_UNRESTRICTED,	/* nsegments */
2354			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2355			       0,			/* flags */
2356			       NULL, NULL,		/* lockfunc, lockarg */
2357			       &sc->ha_parent_dmat)) {
2358		device_printf(dev, "Cannot allocate parent DMA tag\n");
2359		return (ENOMEM);
2360	}
2361
2362	if (bus_dma_tag_create(sc->ha_parent_dmat,	/* parent */
2363			       1, 0,			/* algnmnt, boundary */
2364			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2365			       BUS_SPACE_MAXADDR,	/* highaddr */
2366			       NULL, NULL,		/* filter, filterarg */
2367			       sizeof(sc->ha_statusmem),/* maxsize */
2368			       1,			/* nsegments */
2369			       sizeof(sc->ha_statusmem),/* maxsegsize */
2370			       0,			/* flags */
2371			       NULL, NULL,		/* lockfunc, lockarg */
2372			       &sc->ha_statusmem_dmat)) {
2373		device_printf(dev, "Cannot allocate status DMA tag\n");
2374		bus_dma_tag_destroy(sc->ha_parent_dmat);
2375		return (ENOMEM);
2376	}
2377
2378	if (bus_dmamem_alloc(sc->ha_statusmem_dmat, (void **)&sc->ha_statusmem,
2379	    BUS_DMA_NOWAIT, &sc->ha_statusmem_dmamap)) {
2380		device_printf(dev, "Cannot allocate status memory\n");
2381		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2382		bus_dma_tag_destroy(sc->ha_parent_dmat);
2383		return (ENOMEM);
2384	}
2385	(void)bus_dmamap_load(sc->ha_statusmem_dmat, sc->ha_statusmem_dmamap,
2386	    sc->ha_statusmem, sizeof(sc->ha_statusmem), asr_status_cb, sc, 0);
2387
2388	return (0);
2389}
2390
2391static void
2392asr_release_dma(Asr_softc_t *sc)
2393{
2394
2395	if (sc->ha_rstatus_phys != 0)
2396		bus_dmamap_unload(sc->ha_statusmem_dmat,
2397		    sc->ha_statusmem_dmamap);
2398	if (sc->ha_statusmem != NULL)
2399		bus_dmamem_free(sc->ha_statusmem_dmat, sc->ha_statusmem,
2400		    sc->ha_statusmem_dmamap);
2401	if (sc->ha_statusmem_dmat != NULL)
2402		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2403	if (sc->ha_parent_dmat != NULL)
2404		bus_dma_tag_destroy(sc->ha_parent_dmat);
2405}
2406
2407/*
2408 *	Attach the devices, and virtual devices to the driver list.
2409 */
2410static int
2411asr_attach(device_t dev)
2412{
2413	PI2O_EXEC_STATUS_GET_REPLY status;
2414	PI2O_LCT_ENTRY		 Device;
2415	Asr_softc_t		 *sc;
2416	struct scsi_inquiry_data *iq;
2417	int			 bus, size, unit;
2418	int			 error;
2419
2420	sc = device_get_softc(dev);
2421	unit = device_get_unit(dev);
2422	sc->ha_dev = dev;
2423
2424	if (STAILQ_EMPTY(&Asr_softc_list)) {
2425		/*
2426		 *	Fixup the OS revision as saved in the dptsig for the
2427		 *	engine (dptioctl.h) to pick up.
2428		 */
2429		bcopy(osrelease, &ASR_sig.dsDescription[16], 5);
2430	}
2431	/*
2432	 *	Initialize the software structure
2433	 */
2434	LIST_INIT(&(sc->ha_ccb));
2435	/* Link us into the HA list */
2436	STAILQ_INSERT_TAIL(&Asr_softc_list, sc, ha_next);
2437
2438	/*
2439	 *	This is the real McCoy!
2440	 */
2441	if (!asr_pci_map_mem(dev, sc)) {
2442		device_printf(dev, "could not map memory\n");
2443		return(ENXIO);
2444	}
2445	/* Enable if not formerly enabled */
2446	pci_enable_busmaster(dev);
2447
2448	sc->ha_pciBusNum = pci_get_bus(dev);
2449	sc->ha_pciDeviceNum = (pci_get_slot(dev) << 3) | pci_get_function(dev);
2450
2451	if ((error = asr_alloc_dma(sc)) != 0)
2452		return (error);
2453
2454	/* Check if the device is there? */
2455	if (ASR_resetIOP(sc) == 0) {
2456		device_printf(dev, "Cannot reset adapter\n");
2457		asr_release_dma(sc);
2458		return (EIO);
2459	}
2460	status = &sc->ha_statusmem->status;
2461	if (ASR_getStatus(sc) == NULL) {
2462		device_printf(dev, "could not initialize hardware\n");
2463		asr_release_dma(sc);
2464		return(ENODEV);
2465	}
2466	sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2467	sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2468	sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2469	sc->ha_SystemTable.IopState = status->IopState;
2470	sc->ha_SystemTable.MessengerType = status->MessengerType;
2471	sc->ha_SystemTable.InboundMessageFrameSize = status->InboundMFrameSize;
2472	sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow =
2473	    (U32)(sc->ha_Base + I2O_REG_TOFIFO);	/* XXX 64-bit */
2474
2475	if (!asr_pci_map_int(dev, (void *)sc)) {
2476		device_printf(dev, "could not map interrupt\n");
2477		asr_release_dma(sc);
2478		return(ENXIO);
2479	}
2480
2481	/* Adjust the maximim inbound count */
2482	if (((sc->ha_QueueSize =
2483	    I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) >
2484	    MAX_INBOUND) || (sc->ha_QueueSize == 0)) {
2485		sc->ha_QueueSize = MAX_INBOUND;
2486	}
2487
2488	/* Adjust the maximum outbound count */
2489	if (((sc->ha_Msgs_Count =
2490	    I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) >
2491	    MAX_OUTBOUND) || (sc->ha_Msgs_Count == 0)) {
2492		sc->ha_Msgs_Count = MAX_OUTBOUND;
2493	}
2494	if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2495		sc->ha_Msgs_Count = sc->ha_QueueSize;
2496	}
2497
2498	/* Adjust the maximum SG size to adapter */
2499	if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(status) <<
2500	    2)) > MAX_INBOUND_SIZE) {
2501		size = MAX_INBOUND_SIZE;
2502	}
2503	sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2504	  + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2505
2506	/*
2507	 *	Only do a bus/HBA reset on the first time through. On this
2508	 * first time through, we do not send a flush to the devices.
2509	 */
2510	if (ASR_init(sc) == 0) {
2511		struct BufferInfo {
2512			I2O_PARAM_RESULTS_LIST_HEADER	    Header;
2513			I2O_PARAM_READ_OPERATION_RESULT	    Read;
2514			I2O_DPT_EXEC_IOP_BUFFERS_SCALAR	    Info;
2515		} Buffer;
2516		PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2517#define FW_DEBUG_BLED_OFFSET 8
2518
2519		if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2520		    ASR_getParams(sc, 0, I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2521		    &Buffer, sizeof(struct BufferInfo))) != NULL) {
2522			sc->ha_blinkLED = FW_DEBUG_BLED_OFFSET +
2523			    I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info);
2524		}
2525		if (ASR_acquireLct(sc) == 0) {
2526			(void)ASR_acquireHrt(sc);
2527		}
2528	} else {
2529		device_printf(dev, "failed to initialize\n");
2530		asr_release_dma(sc);
2531		return(ENXIO);
2532	}
2533	/*
2534	 *	Add in additional probe responses for more channels. We
2535	 * are reusing the variable `target' for a channel loop counter.
2536	 * Done here because of we need both the acquireLct and
2537	 * acquireHrt data.
2538	 */
2539	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2540	    (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) {
2541		if (Device->le_type == I2O_UNKNOWN) {
2542			continue;
2543		}
2544		if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2545			if (Device->le_target > sc->ha_MaxId) {
2546				sc->ha_MaxId = Device->le_target;
2547			}
2548			if (Device->le_lun > sc->ha_MaxLun) {
2549				sc->ha_MaxLun = Device->le_lun;
2550			}
2551		}
2552		if (((Device->le_type & I2O_PORT) != 0)
2553		 && (Device->le_bus <= MAX_CHANNEL)) {
2554			/* Do not increase MaxId for efficiency */
2555			sc->ha_adapter_target[Device->le_bus] =
2556			    Device->le_target;
2557		}
2558	}
2559
2560	/*
2561	 *	Print the HBA model number as inquired from the card.
2562	 */
2563
2564	device_printf(dev, " ");
2565
2566	if ((iq = (struct scsi_inquiry_data *)malloc(
2567	    sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO)) !=
2568	    NULL) {
2569		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2570		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2571		int					posted = 0;
2572
2573		Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message;
2574		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2575		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2576
2577		I2O_MESSAGE_FRAME_setVersionOffset(
2578		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_VERSION_11 |
2579		    (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2580		    - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4));
2581		I2O_MESSAGE_FRAME_setMessageSize(
2582		    (PI2O_MESSAGE_FRAME)Message_Ptr,
2583		    (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2584		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) /
2585		    sizeof(U32));
2586		I2O_MESSAGE_FRAME_setInitiatorAddress(
2587		    (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2588		I2O_MESSAGE_FRAME_setFunction(
2589		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2590		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode(
2591		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2592		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2593		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2594		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2595		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2596		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2597		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2598		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2599		    DPT_ORGANIZATION_ID);
2600		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2601		Message_Ptr->CDB[0] = INQUIRY;
2602		Message_Ptr->CDB[4] =
2603		    (unsigned char)sizeof(struct scsi_inquiry_data);
2604		if (Message_Ptr->CDB[4] == 0) {
2605			Message_Ptr->CDB[4] = 255;
2606		}
2607
2608		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2609		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2610		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2611		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2612		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2613
2614		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2615		  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2616		  sizeof(struct scsi_inquiry_data));
2617		SG(&(Message_Ptr->SGL), 0,
2618		  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2619		  iq, sizeof(struct scsi_inquiry_data));
2620		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2621
2622		if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2623			printf (" ");
2624			ASR_prstring (iq->vendor, 8);
2625			++posted;
2626		}
2627		if (iq->product[0] && (iq->product[0] != ' ')) {
2628			printf (" ");
2629			ASR_prstring (iq->product, 16);
2630			++posted;
2631		}
2632		if (iq->revision[0] && (iq->revision[0] != ' ')) {
2633			printf (" FW Rev. ");
2634			ASR_prstring (iq->revision, 4);
2635			++posted;
2636		}
2637		free(iq, M_TEMP);
2638		if (posted) {
2639			printf (",");
2640		}
2641	}
2642	printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2643	  (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2644
2645	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2646		struct cam_devq	  * devq;
2647		int		    QueueSize = sc->ha_QueueSize;
2648
2649		if (QueueSize > MAX_INBOUND) {
2650			QueueSize = MAX_INBOUND;
2651		}
2652
2653		/*
2654		 *	Create the device queue for our SIM(s).
2655		 */
2656		if ((devq = cam_simq_alloc(QueueSize)) == NULL) {
2657			continue;
2658		}
2659
2660		/*
2661		 *	Construct our first channel SIM entry
2662		 */
2663		sc->ha_sim[bus] = cam_sim_alloc(asr_action, asr_poll, "asr", sc,
2664						unit, &Giant,
2665						1, QueueSize, devq);
2666		if (sc->ha_sim[bus] == NULL) {
2667			continue;
2668		}
2669
2670		if (xpt_bus_register(sc->ha_sim[bus], dev, bus) != CAM_SUCCESS){
2671			cam_sim_free(sc->ha_sim[bus],
2672			  /*free_devq*/TRUE);
2673			sc->ha_sim[bus] = NULL;
2674			continue;
2675		}
2676
2677		if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2678		    cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2679		    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2680			xpt_bus_deregister( cam_sim_path(sc->ha_sim[bus]));
2681			cam_sim_free(sc->ha_sim[bus], /*free_devq*/TRUE);
2682			sc->ha_sim[bus] = NULL;
2683			continue;
2684		}
2685	}
2686
2687	/*
2688	 *	Generate the device node information
2689	 */
2690	sc->ha_devt = make_dev(&asr_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
2691			       "asr%d", unit);
2692	if (sc->ha_devt != NULL)
2693		(void)make_dev_alias(sc->ha_devt, "rdpti%d", unit);
2694	sc->ha_devt->si_drv1 = sc;
2695	return(0);
2696} /* asr_attach */
2697
2698static void
2699asr_poll(struct cam_sim *sim)
2700{
2701	asr_intr(cam_sim_softc(sim));
2702} /* asr_poll */
2703
2704static void
2705asr_action(struct cam_sim *sim, union ccb  *ccb)
2706{
2707	struct Asr_softc *sc;
2708
2709	debug_asr_printf("asr_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb,
2710			 ccb->ccb_h.func_code);
2711
2712	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2713
2714	ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2715
2716	switch ((int)ccb->ccb_h.func_code) {
2717
2718	/* Common cases first */
2719	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2720	{
2721		struct Message {
2722			char M[MAX_INBOUND_SIZE];
2723		} Message;
2724		PI2O_MESSAGE_FRAME   Message_Ptr;
2725
2726		/* Reject incoming commands while we are resetting the card */
2727		if (sc->ha_in_reset != HA_OPERATIONAL) {
2728			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2729			if (sc->ha_in_reset >= HA_OFF_LINE) {
2730				/* HBA is now off-line */
2731				ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2732			} else {
2733				/* HBA currently resetting, try again later. */
2734				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2735			}
2736			debug_asr_cmd_printf (" e\n");
2737			xpt_done(ccb);
2738			debug_asr_cmd_printf (" q\n");
2739			break;
2740		}
2741		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2742			printf(
2743			  "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2744			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2745			  ccb->csio.cdb_io.cdb_bytes[0],
2746			  cam_sim_bus(sim),
2747			  ccb->ccb_h.target_id,
2748			  ccb->ccb_h.target_lun);
2749		}
2750		debug_asr_cmd_printf("(%d,%d,%d,%d)", cam_sim_unit(sim),
2751				     cam_sim_bus(sim), ccb->ccb_h.target_id,
2752				     ccb->ccb_h.target_lun);
2753		debug_asr_dump_ccb(ccb);
2754
2755		if ((Message_Ptr = ASR_init_message((union asr_ccb *)ccb,
2756		  (PI2O_MESSAGE_FRAME)&Message)) != NULL) {
2757			debug_asr_cmd2_printf ("TID=%x:\n",
2758			  PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2759			    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2760			debug_asr_cmd2_dump_message(Message_Ptr);
2761			debug_asr_cmd1_printf (" q");
2762
2763			if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
2764				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2765				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2766				debug_asr_cmd_printf (" E\n");
2767				xpt_done(ccb);
2768			}
2769			debug_asr_cmd_printf(" Q\n");
2770			break;
2771		}
2772		/*
2773		 *	We will get here if there is no valid TID for the device
2774		 * referenced in the scsi command packet.
2775		 */
2776		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2777		ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2778		debug_asr_cmd_printf (" B\n");
2779		xpt_done(ccb);
2780		break;
2781	}
2782
2783	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
2784		/* Rese HBA device ... */
2785		asr_hbareset (sc);
2786		ccb->ccb_h.status = CAM_REQ_CMP;
2787		xpt_done(ccb);
2788		break;
2789
2790#if (defined(REPORT_LUNS))
2791	case REPORT_LUNS:
2792#endif
2793	case XPT_ABORT:			/* Abort the specified CCB */
2794		/* XXX Implement */
2795		ccb->ccb_h.status = CAM_REQ_INVALID;
2796		xpt_done(ccb);
2797		break;
2798
2799	case XPT_SET_TRAN_SETTINGS:
2800		/* XXX Implement */
2801		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2802		xpt_done(ccb);
2803		break;
2804
2805	case XPT_GET_TRAN_SETTINGS:
2806	/* Get default/user set transfer settings for the target */
2807	{
2808		struct	ccb_trans_settings *cts = &(ccb->cts);
2809		struct ccb_trans_settings_scsi *scsi =
2810		    &cts->proto_specific.scsi;
2811		struct ccb_trans_settings_spi *spi =
2812		    &cts->xport_specific.spi;
2813
2814		if (cts->type == CTS_TYPE_USER_SETTINGS) {
2815			cts->protocol = PROTO_SCSI;
2816			cts->protocol_version = SCSI_REV_2;
2817			cts->transport = XPORT_SPI;
2818			cts->transport_version = 2;
2819
2820			scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2821			spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2822			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2823			spi->sync_period = 6; /* 40MHz */
2824			spi->sync_offset = 15;
2825			spi->valid = CTS_SPI_VALID_SYNC_RATE
2826				   | CTS_SPI_VALID_SYNC_OFFSET
2827				   | CTS_SPI_VALID_BUS_WIDTH
2828				   | CTS_SPI_VALID_DISC;
2829			scsi->valid = CTS_SCSI_VALID_TQ;
2830
2831			ccb->ccb_h.status = CAM_REQ_CMP;
2832		} else {
2833			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2834		}
2835		xpt_done(ccb);
2836		break;
2837	}
2838
2839	case XPT_CALC_GEOMETRY:
2840	{
2841		struct	  ccb_calc_geometry *ccg;
2842		u_int32_t size_mb;
2843		u_int32_t secs_per_cylinder;
2844
2845		ccg = &(ccb->ccg);
2846		size_mb = ccg->volume_size
2847			/ ((1024L * 1024L) / ccg->block_size);
2848
2849		if (size_mb > 4096) {
2850			ccg->heads = 255;
2851			ccg->secs_per_track = 63;
2852		} else if (size_mb > 2048) {
2853			ccg->heads = 128;
2854			ccg->secs_per_track = 63;
2855		} else if (size_mb > 1024) {
2856			ccg->heads = 65;
2857			ccg->secs_per_track = 63;
2858		} else {
2859			ccg->heads = 64;
2860			ccg->secs_per_track = 32;
2861		}
2862		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2863		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2864		ccb->ccb_h.status = CAM_REQ_CMP;
2865		xpt_done(ccb);
2866		break;
2867	}
2868
2869	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
2870		ASR_resetBus (sc, cam_sim_bus(sim));
2871		ccb->ccb_h.status = CAM_REQ_CMP;
2872		xpt_done(ccb);
2873		break;
2874
2875	case XPT_TERM_IO:		/* Terminate the I/O process */
2876		/* XXX Implement */
2877		ccb->ccb_h.status = CAM_REQ_INVALID;
2878		xpt_done(ccb);
2879		break;
2880
2881	case XPT_PATH_INQ:		/* Path routing inquiry */
2882	{
2883		struct ccb_pathinq *cpi = &(ccb->cpi);
2884
2885		cpi->version_num = 1; /* XXX??? */
2886		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2887		cpi->target_sprt = 0;
2888		/* Not necessary to reset bus, done by HDM initialization */
2889		cpi->hba_misc = PIM_NOBUSRESET;
2890		cpi->hba_eng_cnt = 0;
2891		cpi->max_target = sc->ha_MaxId;
2892		cpi->max_lun = sc->ha_MaxLun;
2893		cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
2894		cpi->bus_id = cam_sim_bus(sim);
2895		cpi->base_transfer_speed = 3300;
2896		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2897		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
2898		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2899		cpi->unit_number = cam_sim_unit(sim);
2900		cpi->ccb_h.status = CAM_REQ_CMP;
2901                cpi->transport = XPORT_SPI;
2902                cpi->transport_version = 2;
2903                cpi->protocol = PROTO_SCSI;
2904                cpi->protocol_version = SCSI_REV_2;
2905		xpt_done(ccb);
2906		break;
2907	}
2908	default:
2909		ccb->ccb_h.status = CAM_REQ_INVALID;
2910		xpt_done(ccb);
2911		break;
2912	}
2913} /* asr_action */
2914
2915/*
2916 * Handle processing of current CCB as pointed to by the Status.
2917 */
2918static int
2919asr_intr(Asr_softc_t *sc)
2920{
2921	int processed;
2922
2923	for(processed = 0; asr_get_status(sc) & Mask_InterruptsDisabled;
2924	    processed = 1) {
2925		union asr_ccb			   *ccb;
2926		u_int				    dsc;
2927		U32				    ReplyOffset;
2928		PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
2929
2930		if (((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)
2931		 && ((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)) {
2932			break;
2933		}
2934		Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
2935		  - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
2936		/*
2937		 * We do not need any (optional byteswapping) method access to
2938		 * the Initiator context field.
2939		 */
2940		ccb = (union asr_ccb *)(long)
2941		  I2O_MESSAGE_FRAME_getInitiatorContext64(
2942		    &(Reply->StdReplyFrame.StdMessageFrame));
2943		if (I2O_MESSAGE_FRAME_getMsgFlags(
2944		  &(Reply->StdReplyFrame.StdMessageFrame))
2945		  & I2O_MESSAGE_FLAGS_FAIL) {
2946			I2O_UTIL_NOP_MESSAGE	Message;
2947			PI2O_UTIL_NOP_MESSAGE	Message_Ptr;
2948			U32			MessageOffset;
2949
2950			MessageOffset = (u_long)
2951			  I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
2952			    (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
2953			/*
2954			 *  Get the Original Message Frame's address, and get
2955			 * it's Transaction Context into our space. (Currently
2956			 * unused at original authorship, but better to be
2957			 * safe than sorry). Straight copy means that we
2958			 * need not concern ourselves with the (optional
2959			 * byteswapping) method access.
2960			 */
2961			Reply->StdReplyFrame.TransactionContext =
2962			    bus_space_read_4(sc->ha_frame_btag,
2963			    sc->ha_frame_bhandle, MessageOffset +
2964			    offsetof(I2O_SINGLE_REPLY_MESSAGE_FRAME,
2965			    TransactionContext));
2966			/*
2967			 *	For 64 bit machines, we need to reconstruct the
2968			 * 64 bit context.
2969			 */
2970			ccb = (union asr_ccb *)(long)
2971			  I2O_MESSAGE_FRAME_getInitiatorContext64(
2972			    &(Reply->StdReplyFrame.StdMessageFrame));
2973			/*
2974			 * Unique error code for command failure.
2975			 */
2976			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
2977			  &(Reply->StdReplyFrame), (u_int16_t)-2);
2978			/*
2979			 *  Modify the message frame to contain a NOP and
2980			 * re-issue it to the controller.
2981			 */
2982			Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
2983			    &Message, sizeof(I2O_UTIL_NOP_MESSAGE));
2984#if (I2O_UTIL_NOP != 0)
2985				I2O_MESSAGE_FRAME_setFunction (
2986				  &(Message_Ptr->StdMessageFrame),
2987				  I2O_UTIL_NOP);
2988#endif
2989			/*
2990			 *  Copy the packet out to the Original Message
2991			 */
2992			asr_set_frame(sc, Message_Ptr, MessageOffset,
2993				      sizeof(I2O_UTIL_NOP_MESSAGE));
2994			/*
2995			 *  Issue the NOP
2996			 */
2997			asr_set_ToFIFO(sc, MessageOffset);
2998		}
2999
3000		/*
3001		 *	Asynchronous command with no return requirements,
3002		 * and a generic handler for immunity against odd error
3003		 * returns from the adapter.
3004		 */
3005		if (ccb == NULL) {
3006			/*
3007			 * Return Reply so that it can be used for the
3008			 * next command
3009			 */
3010			asr_set_FromFIFO(sc, ReplyOffset);
3011			continue;
3012		}
3013
3014		/* Welease Wadjah! (and stop timeouts) */
3015		ASR_ccbRemove (sc, ccb);
3016
3017		dsc = I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
3018		    &(Reply->StdReplyFrame));
3019		ccb->csio.scsi_status = dsc & I2O_SCSI_DEVICE_DSC_MASK;
3020		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3021		switch (dsc) {
3022
3023		case I2O_SCSI_DSC_SUCCESS:
3024			ccb->ccb_h.status |= CAM_REQ_CMP;
3025			break;
3026
3027		case I2O_SCSI_DSC_CHECK_CONDITION:
3028			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR |
3029			    CAM_AUTOSNS_VALID;
3030			break;
3031
3032		case I2O_SCSI_DSC_BUSY:
3033			/* FALLTHRU */
3034		case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3035			/* FALLTHRU */
3036		case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3037			/* FALLTHRU */
3038		case I2O_SCSI_HBA_DSC_BUS_BUSY:
3039			ccb->ccb_h.status |= CAM_SCSI_BUSY;
3040			break;
3041
3042		case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3043			ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3044			break;
3045
3046		case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3047			/* FALLTHRU */
3048		case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3049			/* FALLTHRU */
3050		case I2O_SCSI_HBA_DSC_LUN_INVALID:
3051			/* FALLTHRU */
3052		case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3053			ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3054			break;
3055
3056		case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3057			/* FALLTHRU */
3058		case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3059			ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3060			break;
3061
3062		default:
3063			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3064			break;
3065		}
3066		if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3067			ccb->csio.resid -=
3068			  I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3069			    Reply);
3070		}
3071
3072		/* Sense data in reply packet */
3073		if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3074			u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3075
3076			if (size) {
3077				if (size > sizeof(ccb->csio.sense_data)) {
3078					size = sizeof(ccb->csio.sense_data);
3079				}
3080				if (size > I2O_SCSI_SENSE_DATA_SZ) {
3081					size = I2O_SCSI_SENSE_DATA_SZ;
3082				}
3083				if ((ccb->csio.sense_len)
3084				 && (size > ccb->csio.sense_len)) {
3085					size = ccb->csio.sense_len;
3086				}
3087				if (size < ccb->csio.sense_len) {
3088					ccb->csio.sense_resid =
3089					    ccb->csio.sense_len - size;
3090				} else {
3091					ccb->csio.sense_resid = 0;
3092				}
3093				bzero(&(ccb->csio.sense_data),
3094				    sizeof(ccb->csio.sense_data));
3095				bcopy(Reply->SenseData,
3096				      &(ccb->csio.sense_data), size);
3097			}
3098		}
3099
3100		/*
3101		 * Return Reply so that it can be used for the next command
3102		 * since we have no more need for it now
3103		 */
3104		asr_set_FromFIFO(sc, ReplyOffset);
3105
3106		if (ccb->ccb_h.path) {
3107			xpt_done ((union ccb *)ccb);
3108		} else {
3109			wakeup (ccb);
3110		}
3111	}
3112	return (processed);
3113} /* asr_intr */
3114
3115#undef QueueSize	/* Grrrr */
3116#undef SG_Size		/* Grrrr */
3117
3118/*
3119 *	Meant to be included at the bottom of asr.c !!!
3120 */
3121
3122/*
3123 *	Included here as hard coded. Done because other necessary include
3124 *	files utilize C++ comment structures which make them a nuisance to
3125 *	included here just to pick up these three typedefs.
3126 */
3127typedef U32   DPT_TAG_T;
3128typedef U32   DPT_MSG_T;
3129typedef U32   DPT_RTN_T;
3130
3131#undef SCSI_RESET	/* Conflicts with "scsi/scsiconf.h" defintion */
3132#include	"dev/asr/osd_unix.h"
3133
3134#define	asr_unit(dev)	  dev2unit(dev)
3135
3136static u_int8_t ASR_ctlr_held;
3137
3138static int
3139asr_open(struct cdev *dev, int32_t flags, int32_t ifmt, struct thread *td)
3140{
3141	int		 s;
3142	int		 error;
3143
3144	if (dev->si_drv1 == NULL) {
3145		return (ENODEV);
3146	}
3147	s = splcam ();
3148	if (ASR_ctlr_held) {
3149		error = EBUSY;
3150	} else if ((error = priv_check(td, PRIV_DRIVER)) == 0) {
3151		++ASR_ctlr_held;
3152	}
3153	splx(s);
3154	return (error);
3155} /* asr_open */
3156
3157static int
3158asr_close(struct cdev *dev, int flags, int ifmt, struct thread *td)
3159{
3160
3161	ASR_ctlr_held = 0;
3162	return (0);
3163} /* asr_close */
3164
3165
3166/*-------------------------------------------------------------------------*/
3167/*		      Function ASR_queue_i				   */
3168/*-------------------------------------------------------------------------*/
3169/* The Parameters Passed To This Function Are :				   */
3170/*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
3171/*     PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command	   */
3172/*	I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure	   */
3173/*									   */
3174/* This Function Will Take The User Request Packet And Convert It To An	   */
3175/* I2O MSG And Send It Off To The Adapter.				   */
3176/*									   */
3177/* Return : 0 For OK, Error Code Otherwise				   */
3178/*-------------------------------------------------------------------------*/
3179static int
3180ASR_queue_i(Asr_softc_t	*sc, PI2O_MESSAGE_FRAME	Packet)
3181{
3182	union asr_ccb				   * ccb;
3183	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply;
3184	PI2O_MESSAGE_FRAME			     Message_Ptr;
3185	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply_Ptr;
3186	int					     MessageSizeInBytes;
3187	int					     ReplySizeInBytes;
3188	int					     error;
3189	int					     s;
3190	/* Scatter Gather buffer list */
3191	struct ioctlSgList_S {
3192		SLIST_ENTRY(ioctlSgList_S) link;
3193		caddr_t			   UserSpace;
3194		I2O_FLAGS_COUNT		   FlagsCount;
3195		char			   KernelSpace[sizeof(long)];
3196	}					   * elm;
3197	/* Generates a `first' entry */
3198	SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3199
3200	if (ASR_getBlinkLedCode(sc)) {
3201		debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3202		  ASR_getBlinkLedCode(sc));
3203		return (EIO);
3204	}
3205	/* Copy in the message into a local allocation */
3206	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (
3207	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3208		debug_usr_cmd_printf (
3209		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3210		return (ENOMEM);
3211	}
3212	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3213	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3214		free(Message_Ptr, M_TEMP);
3215		debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3216		return (error);
3217	}
3218	/* Acquire information to determine type of packet */
3219	MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3220	/* The offset of the reply information within the user packet */
3221	Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3222	  + MessageSizeInBytes);
3223
3224	/* Check if the message is a synchronous initialization command */
3225	s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3226	free(Message_Ptr, M_TEMP);
3227	switch (s) {
3228
3229	case I2O_EXEC_IOP_RESET:
3230	{	U32 status;
3231
3232		status = ASR_resetIOP(sc);
3233		ReplySizeInBytes = sizeof(status);
3234		debug_usr_cmd_printf ("resetIOP done\n");
3235		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3236		  ReplySizeInBytes));
3237	}
3238
3239	case I2O_EXEC_STATUS_GET:
3240	{	PI2O_EXEC_STATUS_GET_REPLY status;
3241
3242		status = &sc->ha_statusmem->status;
3243		if (ASR_getStatus(sc) == NULL) {
3244			debug_usr_cmd_printf ("getStatus failed\n");
3245			return (ENXIO);
3246		}
3247		ReplySizeInBytes = sizeof(status);
3248		debug_usr_cmd_printf ("getStatus done\n");
3249		return (copyout ((caddr_t)status, (caddr_t)Reply,
3250		  ReplySizeInBytes));
3251	}
3252
3253	case I2O_EXEC_OUTBOUND_INIT:
3254	{	U32 status;
3255
3256		status = ASR_initOutBound(sc);
3257		ReplySizeInBytes = sizeof(status);
3258		debug_usr_cmd_printf ("intOutBound done\n");
3259		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3260		  ReplySizeInBytes));
3261	}
3262	}
3263
3264	/* Determine if the message size is valid */
3265	if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3266	 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3267		debug_usr_cmd_printf ("Packet size %d incorrect\n",
3268		  MessageSizeInBytes);
3269		return (EINVAL);
3270	}
3271
3272	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (MessageSizeInBytes,
3273	  M_TEMP, M_WAITOK)) == NULL) {
3274		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3275		  MessageSizeInBytes);
3276		return (ENOMEM);
3277	}
3278	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3279	  MessageSizeInBytes)) != 0) {
3280		free(Message_Ptr, M_TEMP);
3281		debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3282		  MessageSizeInBytes, error);
3283		return (error);
3284	}
3285
3286	/* Check the size of the reply frame, and start constructing */
3287
3288	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3289	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3290		free(Message_Ptr, M_TEMP);
3291		debug_usr_cmd_printf (
3292		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3293		return (ENOMEM);
3294	}
3295	if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3296	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3297		free(Reply_Ptr, M_TEMP);
3298		free(Message_Ptr, M_TEMP);
3299		debug_usr_cmd_printf (
3300		  "Failed to copy in reply frame, errno=%d\n",
3301		  error);
3302		return (error);
3303	}
3304	ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3305	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3306	free(Reply_Ptr, M_TEMP);
3307	if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3308		free(Message_Ptr, M_TEMP);
3309		debug_usr_cmd_printf (
3310		  "Failed to copy in reply frame[%d], errno=%d\n",
3311		  ReplySizeInBytes, error);
3312		return (EINVAL);
3313	}
3314
3315	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3316	  ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3317	    ? ReplySizeInBytes : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3318	  M_TEMP, M_WAITOK)) == NULL) {
3319		free(Message_Ptr, M_TEMP);
3320		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3321		  ReplySizeInBytes);
3322		return (ENOMEM);
3323	}
3324	(void)ASR_fillMessage((void *)Reply_Ptr, ReplySizeInBytes);
3325	Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3326	  = Message_Ptr->InitiatorContext;
3327	Reply_Ptr->StdReplyFrame.TransactionContext
3328	  = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3329	I2O_MESSAGE_FRAME_setMsgFlags(
3330	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3331	  I2O_MESSAGE_FRAME_getMsgFlags(
3332	    &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3333	      | I2O_MESSAGE_FLAGS_REPLY);
3334
3335	/* Check if the message is a special case command */
3336	switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3337	case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3338		if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3339		  Message_Ptr) & 0xF0) >> 2)) {
3340			free(Message_Ptr, M_TEMP);
3341			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3342			  &(Reply_Ptr->StdReplyFrame),
3343			  (ASR_setSysTab(sc) != CAM_REQ_CMP));
3344			I2O_MESSAGE_FRAME_setMessageSize(
3345			  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3346			  sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3347			error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3348			  ReplySizeInBytes);
3349			free(Reply_Ptr, M_TEMP);
3350			return (error);
3351		}
3352	}
3353
3354	/* Deal in the general case */
3355	/* First allocate and optionally copy in each scatter gather element */
3356	SLIST_INIT(&sgList);
3357	if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3358		PI2O_SGE_SIMPLE_ELEMENT sg;
3359
3360		/*
3361		 *	since this code is reused in several systems, code
3362		 * efficiency is greater by using a shift operation rather
3363		 * than a divide by sizeof(u_int32_t).
3364		 */
3365		sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3366		  + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3367		    >> 2));
3368		while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3369		  + MessageSizeInBytes)) {
3370			caddr_t v;
3371			int	len;
3372
3373			if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3374			 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3375				error = EINVAL;
3376				break;
3377			}
3378			len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3379			debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3380			  sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3381			  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3382				Message_Ptr) & 0xF0) >> 2)),
3383			  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3384
3385			if ((elm = (struct ioctlSgList_S *)malloc (
3386			  sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3387			  M_TEMP, M_WAITOK)) == NULL) {
3388				debug_usr_cmd_printf (
3389				  "Failed to allocate SG[%d]\n", len);
3390				error = ENOMEM;
3391				break;
3392			}
3393			SLIST_INSERT_HEAD(&sgList, elm, link);
3394			elm->FlagsCount = sg->FlagsCount;
3395			elm->UserSpace = (caddr_t)
3396			  (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3397			v = elm->KernelSpace;
3398			/* Copy in outgoing data (DIR bit could be invalid) */
3399			if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3400			  != 0) {
3401				break;
3402			}
3403			/*
3404			 *	If the buffer is not contiguous, lets
3405			 * break up the scatter/gather entries.
3406			 */
3407			while ((len > 0)
3408			 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3409			  (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3410				int next, base, span;
3411
3412				span = 0;
3413				next = base = KVTOPHYS(v);
3414				I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3415				  base);
3416
3417				/* How far can we go physically contiguously */
3418				while ((len > 0) && (base == next)) {
3419					int size;
3420
3421					next = trunc_page(base) + PAGE_SIZE;
3422					size = next - base;
3423					if (size > len) {
3424						size = len;
3425					}
3426					span += size;
3427					v += size;
3428					len -= size;
3429					base = KVTOPHYS(v);
3430				}
3431
3432				/* Construct the Flags */
3433				I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3434				  span);
3435				{
3436					int flags = I2O_FLAGS_COUNT_getFlags(
3437					  &(elm->FlagsCount));
3438					/* Any remaining length? */
3439					if (len > 0) {
3440					    flags &=
3441						~(I2O_SGL_FLAGS_END_OF_BUFFER
3442						 | I2O_SGL_FLAGS_LAST_ELEMENT);
3443					}
3444					I2O_FLAGS_COUNT_setFlags(
3445					  &(sg->FlagsCount), flags);
3446				}
3447
3448				debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3449				  sg - (PI2O_SGE_SIMPLE_ELEMENT)
3450				    ((char *)Message_Ptr
3451				  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3452					Message_Ptr) & 0xF0) >> 2)),
3453				  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3454				  span);
3455				if (len <= 0) {
3456					break;
3457				}
3458
3459				/*
3460				 * Incrementing requires resizing of the
3461				 * packet, and moving up the existing SG
3462				 * elements.
3463				 */
3464				++sg;
3465				MessageSizeInBytes += sizeof(*sg);
3466				I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3467				  I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3468				  + (sizeof(*sg) / sizeof(U32)));
3469				{
3470					PI2O_MESSAGE_FRAME NewMessage_Ptr;
3471
3472					if ((NewMessage_Ptr
3473					  = (PI2O_MESSAGE_FRAME)
3474					    malloc (MessageSizeInBytes,
3475					     M_TEMP, M_WAITOK)) == NULL) {
3476						debug_usr_cmd_printf (
3477						  "Failed to acquire frame[%d] memory\n",
3478						  MessageSizeInBytes);
3479						error = ENOMEM;
3480						break;
3481					}
3482					span = ((caddr_t)sg)
3483					     - (caddr_t)Message_Ptr;
3484					bcopy(Message_Ptr,NewMessage_Ptr, span);
3485					bcopy((caddr_t)(sg-1),
3486					  ((caddr_t)NewMessage_Ptr) + span,
3487					  MessageSizeInBytes - span);
3488					free(Message_Ptr, M_TEMP);
3489					sg = (PI2O_SGE_SIMPLE_ELEMENT)
3490					  (((caddr_t)NewMessage_Ptr) + span);
3491					Message_Ptr = NewMessage_Ptr;
3492				}
3493			}
3494			if ((error)
3495			 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3496			  & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3497				break;
3498			}
3499			++sg;
3500		}
3501		if (error) {
3502			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3503				SLIST_REMOVE_HEAD(&sgList, link);
3504				free(elm, M_TEMP);
3505			}
3506			free(Reply_Ptr, M_TEMP);
3507			free(Message_Ptr, M_TEMP);
3508			return (error);
3509		}
3510	}
3511
3512	debug_usr_cmd_printf ("Inbound: ");
3513	debug_usr_cmd_dump_message(Message_Ptr);
3514
3515	/* Send the command */
3516	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
3517		/* Free up in-kernel buffers */
3518		while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3519			SLIST_REMOVE_HEAD(&sgList, link);
3520			free(elm, M_TEMP);
3521		}
3522		free(Reply_Ptr, M_TEMP);
3523		free(Message_Ptr, M_TEMP);
3524		return (ENOMEM);
3525	}
3526
3527	/*
3528	 * We do not need any (optional byteswapping) method access to
3529	 * the Initiator context field.
3530	 */
3531	I2O_MESSAGE_FRAME_setInitiatorContext64(
3532	  (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3533
3534	(void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3535
3536	free(Message_Ptr, M_TEMP);
3537
3538	/*
3539	 * Wait for the board to report a finished instruction.
3540	 */
3541	s = splcam();
3542	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3543		if (ASR_getBlinkLedCode(sc)) {
3544			/* Reset Adapter */
3545			printf ("asr%d: Blink LED 0x%x resetting adapter\n",
3546			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3547			  ASR_getBlinkLedCode(sc));
3548			if (ASR_reset (sc) == ENXIO) {
3549				/* Command Cleanup */
3550				ASR_ccbRemove(sc, ccb);
3551			}
3552			splx(s);
3553			/* Free up in-kernel buffers */
3554			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3555				SLIST_REMOVE_HEAD(&sgList, link);
3556				free(elm, M_TEMP);
3557			}
3558			free(Reply_Ptr, M_TEMP);
3559			asr_free_ccb(ccb);
3560			return (EIO);
3561		}
3562		/* Check every second for BlinkLed */
3563		/* There is no PRICAM, but outwardly PRIBIO is functional */
3564		tsleep(ccb, PRIBIO, "asr", hz);
3565	}
3566	splx(s);
3567
3568	debug_usr_cmd_printf ("Outbound: ");
3569	debug_usr_cmd_dump_message(Reply_Ptr);
3570
3571	I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3572	  &(Reply_Ptr->StdReplyFrame),
3573	  (ccb->ccb_h.status != CAM_REQ_CMP));
3574
3575	if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3576	  - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3577		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3578		  ccb->csio.dxfer_len - ccb->csio.resid);
3579	}
3580	if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3581	 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3582	 - I2O_SCSI_SENSE_DATA_SZ))) {
3583		int size = ReplySizeInBytes
3584		  - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3585		  - I2O_SCSI_SENSE_DATA_SZ;
3586
3587		if (size > sizeof(ccb->csio.sense_data)) {
3588			size = sizeof(ccb->csio.sense_data);
3589		}
3590		if (size < ccb->csio.sense_len) {
3591			ccb->csio.sense_resid = ccb->csio.sense_len - size;
3592		} else {
3593			ccb->csio.sense_resid = 0;
3594		}
3595		bzero(&(ccb->csio.sense_data), sizeof(ccb->csio.sense_data));
3596		bcopy(&(ccb->csio.sense_data), Reply_Ptr->SenseData, size);
3597		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3598		    Reply_Ptr, size);
3599	}
3600
3601	/* Free up in-kernel buffers */
3602	while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3603		/* Copy out as necessary */
3604		if ((error == 0)
3605		/* DIR bit considered `valid', error due to ignorance works */
3606		 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3607		  & I2O_SGL_FLAGS_DIR) == 0)) {
3608			error = copyout((caddr_t)(elm->KernelSpace),
3609			  elm->UserSpace,
3610			  I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3611		}
3612		SLIST_REMOVE_HEAD(&sgList, link);
3613		free(elm, M_TEMP);
3614	}
3615	if (error == 0) {
3616	/* Copy reply frame to user space */
3617		error = copyout((caddr_t)Reply_Ptr, (caddr_t)Reply,
3618				ReplySizeInBytes);
3619	}
3620	free(Reply_Ptr, M_TEMP);
3621	asr_free_ccb(ccb);
3622
3623	return (error);
3624} /* ASR_queue_i */
3625
3626/*----------------------------------------------------------------------*/
3627/*			    Function asr_ioctl			       */
3628/*----------------------------------------------------------------------*/
3629/* The parameters passed to this function are :				*/
3630/*     dev  : Device number.						*/
3631/*     cmd  : Ioctl Command						*/
3632/*     data : User Argument Passed In.					*/
3633/*     flag : Mode Parameter						*/
3634/*     proc : Process Parameter						*/
3635/*									*/
3636/* This function is the user interface into this adapter driver		*/
3637/*									*/
3638/* Return : zero if OK, error code if not				*/
3639/*----------------------------------------------------------------------*/
3640
3641static int
3642asr_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
3643{
3644	Asr_softc_t	*sc = dev->si_drv1;
3645	int		i, error = 0;
3646#ifdef ASR_IOCTL_COMPAT
3647	int		j;
3648#endif /* ASR_IOCTL_COMPAT */
3649
3650	if (sc != NULL)
3651	switch(cmd) {
3652
3653	case DPT_SIGNATURE:
3654#ifdef ASR_IOCTL_COMPAT
3655#if (dsDescription_size != 50)
3656	case DPT_SIGNATURE + ((50 - dsDescription_size) << 16):
3657#endif
3658		if (cmd & 0xFFFF0000) {
3659			bcopy(&ASR_sig, data, sizeof(dpt_sig_S));
3660			return (0);
3661		}
3662	/* Traditional version of the ioctl interface */
3663	case DPT_SIGNATURE & 0x0000FFFF:
3664#endif
3665		return (copyout((caddr_t)(&ASR_sig), *((caddr_t *)data),
3666				sizeof(dpt_sig_S)));
3667
3668	/* Traditional version of the ioctl interface */
3669	case DPT_CTRLINFO & 0x0000FFFF:
3670	case DPT_CTRLINFO: {
3671		struct {
3672			u_int16_t length;
3673			u_int16_t drvrHBAnum;
3674			u_int32_t baseAddr;
3675			u_int16_t blinkState;
3676			u_int8_t  pciBusNum;
3677			u_int8_t  pciDeviceNum;
3678			u_int16_t hbaFlags;
3679			u_int16_t Interrupt;
3680			u_int32_t reserved1;
3681			u_int32_t reserved2;
3682			u_int32_t reserved3;
3683		} CtlrInfo;
3684
3685		bzero(&CtlrInfo, sizeof(CtlrInfo));
3686		CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t);
3687		CtlrInfo.drvrHBAnum = asr_unit(dev);
3688		CtlrInfo.baseAddr = sc->ha_Base;
3689		i = ASR_getBlinkLedCode (sc);
3690		if (i == -1)
3691			i = 0;
3692
3693		CtlrInfo.blinkState = i;
3694		CtlrInfo.pciBusNum = sc->ha_pciBusNum;
3695		CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum;
3696#define	FLG_OSD_PCI_VALID 0x0001
3697#define	FLG_OSD_DMA	  0x0002
3698#define	FLG_OSD_I2O	  0x0004
3699		CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID|FLG_OSD_DMA|FLG_OSD_I2O;
3700		CtlrInfo.Interrupt = sc->ha_irq;
3701#ifdef ASR_IOCTL_COMPAT
3702		if (cmd & 0xffff0000)
3703			bcopy(&CtlrInfo, data, sizeof(CtlrInfo));
3704		else
3705#endif /* ASR_IOCTL_COMPAT */
3706		error = copyout(&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo));
3707	}	return (error);
3708
3709	/* Traditional version of the ioctl interface */
3710	case DPT_SYSINFO & 0x0000FFFF:
3711	case DPT_SYSINFO: {
3712		sysInfo_S	Info;
3713#ifdef ASR_IOCTL_COMPAT
3714		char	      * cp;
3715		/* Kernel Specific ptok `hack' */
3716#define		ptok(a) ((char *)(uintptr_t)(a) + KERNBASE)
3717
3718		bzero(&Info, sizeof(Info));
3719
3720		/* Appears I am the only person in the Kernel doing this */
3721		outb (0x70, 0x12);
3722		i = inb(0x71);
3723		j = i >> 4;
3724		if (i == 0x0f) {
3725			outb (0x70, 0x19);
3726			j = inb (0x71);
3727		}
3728		Info.drive0CMOS = j;
3729
3730		j = i & 0x0f;
3731		if (i == 0x0f) {
3732			outb (0x70, 0x1a);
3733			j = inb (0x71);
3734		}
3735		Info.drive1CMOS = j;
3736
3737		Info.numDrives = *((char *)ptok(0x475));
3738#else /* ASR_IOCTL_COMPAT */
3739		bzero(&Info, sizeof(Info));
3740#endif /* ASR_IOCTL_COMPAT */
3741
3742		Info.processorFamily = ASR_sig.dsProcessorFamily;
3743#if defined(__i386__)
3744		switch (cpu) {
3745		case CPU_386SX: case CPU_386:
3746			Info.processorType = PROC_386; break;
3747		case CPU_486SX: case CPU_486:
3748			Info.processorType = PROC_486; break;
3749		case CPU_586:
3750			Info.processorType = PROC_PENTIUM; break;
3751		case CPU_686:
3752			Info.processorType = PROC_SEXIUM; break;
3753		}
3754#endif
3755
3756		Info.osType = OS_BSDI_UNIX;
3757		Info.osMajorVersion = osrelease[0] - '0';
3758		Info.osMinorVersion = osrelease[2] - '0';
3759		/* Info.osRevision = 0; */
3760		/* Info.osSubRevision = 0; */
3761		Info.busType = SI_PCI_BUS;
3762		Info.flags = SI_OSversionValid|SI_BusTypeValid|SI_NO_SmartROM;
3763
3764#ifdef ASR_IOCTL_COMPAT
3765		Info.flags |= SI_CMOS_Valid | SI_NumDrivesValid;
3766		/* Go Out And Look For I2O SmartROM */
3767		for(j = 0xC8000; j < 0xE0000; j += 2048) {
3768			int k;
3769
3770			cp = ptok(j);
3771			if (*((unsigned short *)cp) != 0xAA55) {
3772				continue;
3773			}
3774			j += (cp[2] * 512) - 2048;
3775			if ((*((u_long *)(cp + 6))
3776			  != ('S' + (' ' * 256) + (' ' * 65536L)))
3777			 || (*((u_long *)(cp + 10))
3778			  != ('I' + ('2' * 256) + ('0' * 65536L)))) {
3779				continue;
3780			}
3781			cp += 0x24;
3782			for (k = 0; k < 64; ++k) {
3783				if (*((unsigned short *)cp)
3784				 == (' ' + ('v' * 256))) {
3785					break;
3786				}
3787			}
3788			if (k < 64) {
3789				Info.smartROMMajorVersion
3790				    = *((unsigned char *)(cp += 4)) - '0';
3791				Info.smartROMMinorVersion
3792				    = *((unsigned char *)(cp += 2));
3793				Info.smartROMRevision
3794				    = *((unsigned char *)(++cp));
3795				Info.flags |= SI_SmartROMverValid;
3796				Info.flags &= ~SI_NO_SmartROM;
3797				break;
3798			}
3799		}
3800		/* Get The Conventional Memory Size From CMOS */
3801		outb (0x70, 0x16);
3802		j = inb (0x71);
3803		j <<= 8;
3804		outb (0x70, 0x15);
3805		j |= inb(0x71);
3806		Info.conventionalMemSize = j;
3807
3808		/* Get The Extended Memory Found At Power On From CMOS */
3809		outb (0x70, 0x31);
3810		j = inb (0x71);
3811		j <<= 8;
3812		outb (0x70, 0x30);
3813		j |= inb(0x71);
3814		Info.extendedMemSize = j;
3815		Info.flags |= SI_MemorySizeValid;
3816
3817		/* Copy Out The Info Structure To The User */
3818		if (cmd & 0xFFFF0000)
3819			bcopy(&Info, data, sizeof(Info));
3820		else
3821#endif /* ASR_IOCTL_COMPAT */
3822		error = copyout(&Info, *(caddr_t *)data, sizeof(Info));
3823		return (error); }
3824
3825		/* Get The BlinkLED State */
3826	case DPT_BLINKLED:
3827		i = ASR_getBlinkLedCode (sc);
3828		if (i == -1)
3829			i = 0;
3830#ifdef ASR_IOCTL_COMPAT
3831		if (cmd & 0xffff0000)
3832			bcopy(&i, data, sizeof(i));
3833		else
3834#endif /* ASR_IOCTL_COMPAT */
3835		error = copyout(&i, *(caddr_t *)data, sizeof(i));
3836		break;
3837
3838		/* Send an I2O command */
3839	case I2OUSRCMD:
3840		return (ASR_queue_i(sc, *((PI2O_MESSAGE_FRAME *)data)));
3841
3842		/* Reset and re-initialize the adapter */
3843	case I2ORESETCMD:
3844		return (ASR_reset(sc));
3845
3846		/* Rescan the LCT table and resynchronize the information */
3847	case I2ORESCANCMD:
3848		return (ASR_rescan(sc));
3849	}
3850	return (EINVAL);
3851} /* asr_ioctl */
3852