1/*-
2 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
3 * Copyright (c) 2000-2001 Adaptec Corporation
4 * All rights reserved.
5 *
6 * TERMS AND CONDITIONS OF USE
7 *
8 * Redistribution and use in source form, with or without modification, are
9 * permitted provided that redistributions of source code must retain the
10 * above copyright notice, this list of conditions and the following disclaimer.
11 *
12 * This software is provided `as is' by Adaptec and any express or implied
13 * warranties, including, but not limited to, the implied warranties of
14 * merchantability and fitness for a particular purpose, are disclaimed. In no
15 * event shall Adaptec be liable for any direct, indirect, incidental, special,
16 * exemplary or consequential damages (including, but not limited to,
17 * procurement of substitute goods or services; loss of use, data, or profits;
18 * or business interruptions) however caused and on any theory of liability,
19 * whether in contract, strict liability, or tort (including negligence or
20 * otherwise) arising in any way out of the use of this driver software, even
21 * if advised of the possibility of such damage.
22 *
23 * SCSI I2O host adapter driver
24 *
25 *	V1.10 2004/05/05 scottl@freebsd.org
26 *		- Massive cleanup of the driver to remove dead code and
27 *		  non-conformant style.
28 *		- Removed most i386-specific code to make it more portable.
29 *		- Converted to the bus_space API.
30 *	V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
31 *		- The 2000S and 2005S do not initialize on some machines,
32 *		  increased timeout to 255ms from 50ms for the StatusGet
33 *		  command.
34 *	V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
35 *		- I knew this one was too good to be true. The error return
36 *		  on ioctl commands needs to be compared to CAM_REQ_CMP, not
37 *		  to the bit masked status.
38 *	V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
39 *		- The 2005S that was supported is affectionately called the
40 *		  Conjoined BAR Firmware. In order to support RAID-5 in a
41 *		  16MB low-cost configuration, Firmware was forced to go
42 *		  to a Split BAR Firmware. This requires a separate IOP and
43 *		  Messaging base address.
44 *	V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
45 *		- Handle support for 2005S Zero Channel RAID solution.
46 *		- System locked up if the Adapter locked up. Do not try
47 *		  to send other commands if the resetIOP command fails. The
48 *		  fail outstanding command discovery loop was flawed as the
49 *		  removal of the command from the list prevented discovering
50 *		  all the commands.
51 *		- Comment changes to clarify driver.
52 *		- SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
53 *		- We do not use the AC_FOUND_DEV event because of I2O.
54 *		  Removed asr_async.
55 *	V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
56 *			 lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
57 *		- Removed support for PM1554, PM2554 and PM2654 in Mode-0
58 *		  mode as this is confused with competitor adapters in run
59 *		  mode.
60 *		- critical locking needed in ASR_ccbAdd and ASR_ccbRemove
61 *		  to prevent operating system panic.
62 *		- moved default major number to 154 from 97.
63 *	V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
64 *		- The controller is not actually an ASR (Adaptec SCSI RAID)
65 *		  series that is visible, it's more of an internal code name.
66 *		  remove any visible references within reason for now.
67 *		- bus_ptr->LUN was not correctly zeroed when initially
68 *		  allocated causing a possible panic of the operating system
69 *		  during boot.
70 *	V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
71 *		- Code always fails for ASR_getTid affecting performance.
72 *		- initiated a set of changes that resulted from a formal
73 *		  code inspection by Mark_Salyzyn@adaptec.com,
74 *		  George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
75 *		  Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
76 *		  Their findings were focussed on the LCT & TID handler, and
77 *		  all resulting changes were to improve code readability,
78 *		  consistency or have a positive effect on performance.
79 *	V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
80 *		- Passthrough returned an incorrect error.
81 *		- Passthrough did not migrate the intrinsic scsi layer wakeup
82 *		  on command completion.
83 *		- generate control device nodes using make_dev and delete_dev.
84 *		- Performance affected by TID caching reallocing.
85 *		- Made suggested changes by Justin_Gibbs@adaptec.com
86 *			- use splcam instead of splbio.
87 *			- use cam_imask instead of bio_imask.
88 *			- use u_int8_t instead of u_char.
89 *			- use u_int16_t instead of u_short.
90 *			- use u_int32_t instead of u_long where appropriate.
91 *			- use 64 bit context handler instead of 32 bit.
92 *			- create_ccb should only allocate the worst case
93 *			  requirements for the driver since CAM may evolve
94 *			  making union ccb much larger than needed here.
95 *			  renamed create_ccb to asr_alloc_ccb.
96 *			- go nutz justifying all debug prints as macros
97 *			  defined at the top and remove unsightly ifdefs.
98 *			- INLINE STATIC viewed as confusing. Historically
99 *			  utilized to affect code performance and debug
100 *			  issues in OS, Compiler or OEM specific situations.
101 *	V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
102 *		- Ported from FreeBSD 2.2.X DPT I2O driver.
103 *			changed struct scsi_xfer to union ccb/struct ccb_hdr
104 *			changed variable name xs to ccb
105 *			changed struct scsi_link to struct cam_path
106 *			changed struct scsibus_data to struct cam_sim
107 *			stopped using fordriver for holding on to the TID
108 *			use proprietary packet creation instead of scsi_inquire
109 *			CAM layer sends synchronize commands.
110 */
111
112#include <sys/cdefs.h>
113#include <sys/param.h>	/* TRUE=1 and FALSE=0 defined here */
114#include <sys/kernel.h>
115#include <sys/module.h>
116#include <sys/systm.h>
117#include <sys/malloc.h>
118#include <sys/conf.h>
119#include <sys/ioccom.h>
120#include <sys/priv.h>
121#include <sys/proc.h>
122#include <sys/bus.h>
123#include <machine/resource.h>
124#include <machine/bus.h>
125#include <sys/rman.h>
126#include <sys/stat.h>
127#include <sys/bus_dma.h>
128
129#include <cam/cam.h>
130#include <cam/cam_ccb.h>
131#include <cam/cam_sim.h>
132#include <cam/cam_xpt_sim.h>
133
134#include <cam/scsi/scsi_all.h>
135#include <cam/scsi/scsi_message.h>
136
137#include <vm/vm.h>
138#include <vm/pmap.h>
139
140#if defined(__i386__)
141#include "opt_asr.h"
142#include <i386/include/cputypes.h>
143
144#if defined(ASR_COMPAT)
145#define ASR_IOCTL_COMPAT
146#endif /* ASR_COMPAT */
147#endif
148#include <machine/vmparam.h>
149
150#include <dev/pci/pcivar.h>
151#include <dev/pci/pcireg.h>
152
153#define	osdSwap4(x) ((u_long)ntohl((u_long)(x)))
154#define	KVTOPHYS(x) vtophys(x)
155#include	<dev/asr/dptalign.h>
156#include	<dev/asr/i2oexec.h>
157#include	<dev/asr/i2obscsi.h>
158#include	<dev/asr/i2odpt.h>
159#include	<dev/asr/i2oadptr.h>
160
161#include	<dev/asr/sys_info.h>
162
163__FBSDID("$FreeBSD$");
164
165#define	ASR_VERSION	1
166#define	ASR_REVISION	'1'
167#define	ASR_SUBREVISION '0'
168#define	ASR_MONTH	5
169#define	ASR_DAY		5
170#define	ASR_YEAR	(2004 - 1980)
171
172/*
173 *	Debug macros to reduce the unsightly ifdefs
174 */
175#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
176static __inline void
177debug_asr_message(PI2O_MESSAGE_FRAME message)
178{
179	u_int32_t * pointer = (u_int32_t *)message;
180	u_int32_t   length = I2O_MESSAGE_FRAME_getMessageSize(message);
181	u_int32_t   counter = 0;
182
183	while (length--) {
184		printf("%08lx%c", (u_long)*(pointer++),
185		  (((++counter & 7) == 0) || (length == 0)) ? '\n' : ' ');
186	}
187}
188#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
189
190#ifdef DEBUG_ASR
191  /* Breaks on none STDC based compilers :-( */
192#define debug_asr_printf(fmt,args...)	printf(fmt, ##args)
193#define debug_asr_dump_message(message)	debug_asr_message(message)
194#define debug_asr_print_path(ccb)	xpt_print_path(ccb->ccb_h.path);
195#else /* DEBUG_ASR */
196#define debug_asr_printf(fmt,args...)
197#define debug_asr_dump_message(message)
198#define debug_asr_print_path(ccb)
199#endif /* DEBUG_ASR */
200
201/*
202 *	If DEBUG_ASR_CMD is defined:
203 *		0 - Display incoming SCSI commands
204 *		1 - add in a quick character before queueing.
205 *		2 - add in outgoing message frames.
206 */
207#if (defined(DEBUG_ASR_CMD))
208#define debug_asr_cmd_printf(fmt,args...)     printf(fmt,##args)
209static __inline void
210debug_asr_dump_ccb(union ccb *ccb)
211{
212	u_int8_t	*cp = (unsigned char *)&(ccb->csio.cdb_io);
213	int		len = ccb->csio.cdb_len;
214
215	while (len) {
216		debug_asr_cmd_printf (" %02x", *(cp++));
217		--len;
218	}
219}
220#if (DEBUG_ASR_CMD > 0)
221#define debug_asr_cmd1_printf		       debug_asr_cmd_printf
222#else
223#define debug_asr_cmd1_printf(fmt,args...)
224#endif
225#if (DEBUG_ASR_CMD > 1)
226#define debug_asr_cmd2_printf			debug_asr_cmd_printf
227#define debug_asr_cmd2_dump_message(message)	debug_asr_message(message)
228#else
229#define debug_asr_cmd2_printf(fmt,args...)
230#define debug_asr_cmd2_dump_message(message)
231#endif
232#else /* DEBUG_ASR_CMD */
233#define debug_asr_cmd_printf(fmt,args...)
234#define debug_asr_dump_ccb(ccb)
235#define debug_asr_cmd1_printf(fmt,args...)
236#define debug_asr_cmd2_printf(fmt,args...)
237#define debug_asr_cmd2_dump_message(message)
238#endif /* DEBUG_ASR_CMD */
239
240#if (defined(DEBUG_ASR_USR_CMD))
241#define debug_usr_cmd_printf(fmt,args...)   printf(fmt,##args)
242#define debug_usr_cmd_dump_message(message) debug_usr_message(message)
243#else /* DEBUG_ASR_USR_CMD */
244#define debug_usr_cmd_printf(fmt,args...)
245#define debug_usr_cmd_dump_message(message)
246#endif /* DEBUG_ASR_USR_CMD */
247
248#ifdef ASR_IOCTL_COMPAT
249#define	dsDescription_size 46	/* Snug as a bug in a rug */
250#endif /* ASR_IOCTL_COMPAT */
251
252#include "dev/asr/dptsig.h"
253
254static dpt_sig_S ASR_sig = {
255	{ 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
256	PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
257	OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, ADF_ALL_SC5,
258	0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
259	ASR_MONTH, ASR_DAY, ASR_YEAR,
260/*	 01234567890123456789012345678901234567890123456789	< 50 chars */
261	"Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
262	/*		 ^^^^^ asr_attach alters these to match OS */
263};
264
265/* Configuration Definitions */
266
267#define	SG_SIZE		 58	/* Scatter Gather list Size		 */
268#define	MAX_TARGET_ID	 126	/* Maximum Target ID supported		 */
269#define	MAX_LUN		 255	/* Maximum LUN Supported		 */
270#define	MAX_CHANNEL	 7	/* Maximum Channel # Supported by driver */
271#define	MAX_INBOUND	 2000	/* Max CCBs, Also Max Queue Size	 */
272#define	MAX_OUTBOUND	 256	/* Maximum outbound frames/adapter	 */
273#define	MAX_INBOUND_SIZE 512	/* Maximum inbound frame size		 */
274#define	MAX_MAP		 4194304L /* Maximum mapping size of IOP	 */
275				/* Also serves as the minimum map for	 */
276				/* the 2005S zero channel RAID product	 */
277
278/* I2O register set */
279#define	I2O_REG_STATUS		0x30
280#define	I2O_REG_MASK		0x34
281#define	I2O_REG_TOFIFO		0x40
282#define	I2O_REG_FROMFIFO	0x44
283
284#define	Mask_InterruptsDisabled	0x08
285
286/*
287 * A MIX of performance and space considerations for TID lookups
288 */
289typedef u_int16_t tid_t;
290
291typedef struct {
292	u_int32_t size;		/* up to MAX_LUN    */
293	tid_t	  TID[1];
294} lun2tid_t;
295
296typedef struct {
297	u_int32_t   size;	/* up to MAX_TARGET */
298	lun2tid_t * LUN[1];
299} target2lun_t;
300
301/*
302 *	To ensure that we only allocate and use the worst case ccb here, lets
303 *	make our own local ccb union. If asr_alloc_ccb is utilized for another
304 *	ccb type, ensure that you add the additional structures into our local
305 *	ccb union. To ensure strict type checking, we will utilize the local
306 *	ccb definition wherever possible.
307 */
308union asr_ccb {
309	struct ccb_hdr	    ccb_h;  /* For convenience */
310	struct ccb_scsiio   csio;
311	struct ccb_setasync csa;
312};
313
314struct Asr_status_mem {
315	I2O_EXEC_STATUS_GET_REPLY	status;
316	U32				rstatus;
317};
318
319/**************************************************************************
320** ASR Host Adapter structure - One Structure For Each Host Adapter That **
321**  Is Configured Into The System.  The Structure Supplies Configuration **
322**  Information, Status Info, Queue Info And An Active CCB List Pointer. **
323***************************************************************************/
324
325typedef struct Asr_softc {
326	device_t		ha_dev;
327	u_int16_t		ha_irq;
328	u_long			ha_Base;       /* base port for each board */
329	bus_size_t		ha_blinkLED;
330	bus_space_handle_t	ha_i2o_bhandle;
331	bus_space_tag_t		ha_i2o_btag;
332	bus_space_handle_t	ha_frame_bhandle;
333	bus_space_tag_t		ha_frame_btag;
334	I2O_IOP_ENTRY		ha_SystemTable;
335	LIST_HEAD(,ccb_hdr)	ha_ccb;	       /* ccbs in use		   */
336
337	bus_dma_tag_t		ha_parent_dmat;
338	bus_dma_tag_t		ha_statusmem_dmat;
339	bus_dmamap_t		ha_statusmem_dmamap;
340	struct Asr_status_mem * ha_statusmem;
341	u_int32_t		ha_rstatus_phys;
342	u_int32_t		ha_status_phys;
343	struct cam_path	      * ha_path[MAX_CHANNEL+1];
344	struct cam_sim	      * ha_sim[MAX_CHANNEL+1];
345	struct resource	      * ha_mem_res;
346	struct resource	      * ha_mes_res;
347	struct resource	      * ha_irq_res;
348	void		      * ha_intr;
349	PI2O_LCT		ha_LCT;	       /* Complete list of devices */
350#define le_type	  IdentityTag[0]
351#define I2O_BSA	    0x20
352#define I2O_FCA	    0x40
353#define I2O_SCSI    0x00
354#define I2O_PORT    0x80
355#define I2O_UNKNOWN 0x7F
356#define le_bus	  IdentityTag[1]
357#define le_target IdentityTag[2]
358#define le_lun	  IdentityTag[3]
359	target2lun_t	      * ha_targets[MAX_CHANNEL+1];
360	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
361	u_long			ha_Msgs_Phys;
362
363	u_int8_t		ha_in_reset;
364#define HA_OPERATIONAL	    0
365#define HA_IN_RESET	    1
366#define HA_OFF_LINE	    2
367#define HA_OFF_LINE_RECOVERY 3
368	/* Configuration information */
369	/* The target id maximums we take */
370	u_int8_t		ha_MaxBus;     /* Maximum bus */
371	u_int8_t		ha_MaxId;      /* Maximum target ID */
372	u_int8_t		ha_MaxLun;     /* Maximum target LUN */
373	u_int8_t		ha_SgSize;     /* Max SG elements */
374	u_int8_t		ha_pciBusNum;
375	u_int8_t		ha_pciDeviceNum;
376	u_int8_t		ha_adapter_target[MAX_CHANNEL+1];
377	u_int16_t		ha_QueueSize;  /* Max outstanding commands */
378	u_int16_t		ha_Msgs_Count;
379
380	/* Links into other parents and HBAs */
381	STAILQ_ENTRY(Asr_softc) ha_next;       /* HBA list */
382	struct cdev *ha_devt;
383} Asr_softc_t;
384
385static STAILQ_HEAD(, Asr_softc) Asr_softc_list =
386	STAILQ_HEAD_INITIALIZER(Asr_softc_list);
387
388/*
389 *	Prototypes of the routines we have in this object.
390 */
391
392/* I2O HDM interface */
393static int	asr_probe(device_t dev);
394static int	asr_attach(device_t dev);
395
396static int	asr_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag,
397			  struct thread *td);
398static int	asr_open(struct cdev *dev, int32_t flags, int32_t ifmt,
399			 struct thread *td);
400static int	asr_close(struct cdev *dev, int flags, int ifmt, struct thread *td);
401static int	asr_intr(Asr_softc_t *sc);
402static void	asr_timeout(void *arg);
403static int	ASR_init(Asr_softc_t *sc);
404static int	ASR_acquireLct(Asr_softc_t *sc);
405static int	ASR_acquireHrt(Asr_softc_t *sc);
406static void	asr_action(struct cam_sim *sim, union ccb *ccb);
407static void	asr_poll(struct cam_sim *sim);
408static int	ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message);
409
410/*
411 *	Here is the auto-probe structure used to nest our tests appropriately
412 *	during the startup phase of the operating system.
413 */
414static device_method_t asr_methods[] = {
415	DEVMETHOD(device_probe,	 asr_probe),
416	DEVMETHOD(device_attach, asr_attach),
417	{ 0, 0 }
418};
419
420static driver_t asr_driver = {
421	"asr",
422	asr_methods,
423	sizeof(Asr_softc_t)
424};
425
426static devclass_t asr_devclass;
427DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
428MODULE_DEPEND(asr, pci, 1, 1, 1);
429MODULE_DEPEND(asr, cam, 1, 1, 1);
430
431/*
432 * devsw for asr hba driver
433 *
434 * only ioctl is used. the sd driver provides all other access.
435 */
436static struct cdevsw asr_cdevsw = {
437	.d_version =	D_VERSION,
438	.d_flags =	D_NEEDGIANT,
439	.d_open =	asr_open,
440	.d_close =	asr_close,
441	.d_ioctl =	asr_ioctl,
442	.d_name =	"asr",
443};
444
445/* I2O support routines */
446
447static __inline u_int32_t
448asr_get_FromFIFO(Asr_softc_t *sc)
449{
450	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
451				 I2O_REG_FROMFIFO));
452}
453
454static __inline u_int32_t
455asr_get_ToFIFO(Asr_softc_t *sc)
456{
457	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
458				 I2O_REG_TOFIFO));
459}
460
461static __inline u_int32_t
462asr_get_intr(Asr_softc_t *sc)
463{
464	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
465				 I2O_REG_MASK));
466}
467
468static __inline u_int32_t
469asr_get_status(Asr_softc_t *sc)
470{
471	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
472				 I2O_REG_STATUS));
473}
474
475static __inline void
476asr_set_FromFIFO(Asr_softc_t *sc, u_int32_t val)
477{
478	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_FROMFIFO,
479			  val);
480}
481
482static __inline void
483asr_set_ToFIFO(Asr_softc_t *sc, u_int32_t val)
484{
485	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_TOFIFO,
486			  val);
487}
488
489static __inline void
490asr_set_intr(Asr_softc_t *sc, u_int32_t val)
491{
492	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_MASK,
493			  val);
494}
495
496static __inline void
497asr_set_frame(Asr_softc_t *sc, void *frame, u_int32_t offset, int len)
498{
499	bus_space_write_region_4(sc->ha_frame_btag, sc->ha_frame_bhandle,
500				 offset, (u_int32_t *)frame, len);
501}
502
503/*
504 *	Fill message with default.
505 */
506static PI2O_MESSAGE_FRAME
507ASR_fillMessage(void *Message, u_int16_t size)
508{
509	PI2O_MESSAGE_FRAME Message_Ptr;
510
511	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
512	bzero(Message_Ptr, size);
513	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
514	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
515	  (size + sizeof(U32) - 1) >> 2);
516	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
517	KASSERT(Message_Ptr != NULL, ("Message_Ptr == NULL"));
518	return (Message_Ptr);
519} /* ASR_fillMessage */
520
521#define	EMPTY_QUEUE (0xffffffff)
522
523static __inline U32
524ASR_getMessage(Asr_softc_t *sc)
525{
526	U32	MessageOffset;
527
528	MessageOffset = asr_get_ToFIFO(sc);
529	if (MessageOffset == EMPTY_QUEUE)
530		MessageOffset = asr_get_ToFIFO(sc);
531
532	return (MessageOffset);
533} /* ASR_getMessage */
534
535/* Issue a polled command */
536static U32
537ASR_initiateCp(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
538{
539	U32	Mask = 0xffffffff;
540	U32	MessageOffset;
541	u_int	Delay = 1500;
542
543	/*
544	 * ASR_initiateCp is only used for synchronous commands and will
545	 * be made more resiliant to adapter delays since commands like
546	 * resetIOP can cause the adapter to be deaf for a little time.
547	 */
548	while (((MessageOffset = ASR_getMessage(sc)) == EMPTY_QUEUE)
549	 && (--Delay != 0)) {
550		DELAY (10000);
551	}
552	if (MessageOffset != EMPTY_QUEUE) {
553		asr_set_frame(sc, Message, MessageOffset,
554			      I2O_MESSAGE_FRAME_getMessageSize(Message));
555		/*
556		 *	Disable the Interrupts
557		 */
558		Mask = asr_get_intr(sc);
559		asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
560		asr_set_ToFIFO(sc, MessageOffset);
561	}
562	return (Mask);
563} /* ASR_initiateCp */
564
565/*
566 *	Reset the adapter.
567 */
568static U32
569ASR_resetIOP(Asr_softc_t *sc)
570{
571	I2O_EXEC_IOP_RESET_MESSAGE	 Message;
572	PI2O_EXEC_IOP_RESET_MESSAGE	 Message_Ptr;
573	U32			       * Reply_Ptr;
574	U32				 Old;
575
576	/*
577	 *  Build up our copy of the Message.
578	 */
579	Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(&Message,
580	  sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
581	I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
582	/*
583	 *  Reset the Reply Status
584	 */
585	Reply_Ptr = &sc->ha_statusmem->rstatus;
586	*Reply_Ptr = 0;
587	I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
588	    sc->ha_rstatus_phys);
589	/*
590	 *	Send the Message out
591	 */
592	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
593	     0xffffffff) {
594		/*
595		 * Wait for a response (Poll), timeouts are dangerous if
596		 * the card is truly responsive. We assume response in 2s.
597		 */
598		u_int8_t Delay = 200;
599
600		while ((*Reply_Ptr == 0) && (--Delay != 0)) {
601			DELAY (10000);
602		}
603		/*
604		 *	Re-enable the interrupts.
605		 */
606		asr_set_intr(sc, Old);
607		KASSERT(*Reply_Ptr != 0, ("*Reply_Ptr == 0"));
608		return(*Reply_Ptr);
609	}
610	KASSERT(Old != 0xffffffff, ("Old == -1"));
611	return (0);
612} /* ASR_resetIOP */
613
614/*
615 *	Get the curent state of the adapter
616 */
617static PI2O_EXEC_STATUS_GET_REPLY
618ASR_getStatus(Asr_softc_t *sc)
619{
620	I2O_EXEC_STATUS_GET_MESSAGE	Message;
621	PI2O_EXEC_STATUS_GET_MESSAGE	Message_Ptr;
622	PI2O_EXEC_STATUS_GET_REPLY	buffer;
623	U32				Old;
624
625	/*
626	 *  Build up our copy of the Message.
627	 */
628	Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(&Message,
629	    sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
630	I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
631	    I2O_EXEC_STATUS_GET);
632	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
633	    sc->ha_status_phys);
634	/* This one is a Byte Count */
635	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
636	    sizeof(I2O_EXEC_STATUS_GET_REPLY));
637	/*
638	 *  Reset the Reply Status
639	 */
640	buffer = &sc->ha_statusmem->status;
641	bzero(buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
642	/*
643	 *	Send the Message out
644	 */
645	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
646	    0xffffffff) {
647		/*
648		 *	Wait for a response (Poll), timeouts are dangerous if
649		 * the card is truly responsive. We assume response in 50ms.
650		 */
651		u_int8_t Delay = 255;
652
653		while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
654			if (--Delay == 0) {
655				buffer = NULL;
656				break;
657			}
658			DELAY (1000);
659		}
660		/*
661		 *	Re-enable the interrupts.
662		 */
663		asr_set_intr(sc, Old);
664		return (buffer);
665	}
666	return (NULL);
667} /* ASR_getStatus */
668
669/*
670 *	Check if the device is a SCSI I2O HBA, and add it to the list.
671 */
672
673/*
674 * Probe for ASR controller.  If we find it, we will use it.
675 * virtual adapters.
676 */
677static int
678asr_probe(device_t dev)
679{
680	u_int32_t id;
681
682	id = (pci_get_device(dev) << 16) | pci_get_vendor(dev);
683	if ((id == 0xA5011044) || (id == 0xA5111044)) {
684		device_set_desc(dev, "Adaptec Caching SCSI RAID");
685		return (BUS_PROBE_DEFAULT);
686	}
687	return (ENXIO);
688} /* asr_probe */
689
690static __inline union asr_ccb *
691asr_alloc_ccb(Asr_softc_t *sc)
692{
693	union asr_ccb *new_ccb;
694
695	if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb),
696	  M_DEVBUF, M_WAITOK | M_ZERO)) != NULL) {
697		new_ccb->ccb_h.pinfo.priority = 1;
698		new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
699		new_ccb->ccb_h.spriv_ptr0 = sc;
700	}
701	return (new_ccb);
702} /* asr_alloc_ccb */
703
704static __inline void
705asr_free_ccb(union asr_ccb *free_ccb)
706{
707	free(free_ccb, M_DEVBUF);
708} /* asr_free_ccb */
709
710/*
711 *	Print inquiry data `carefully'
712 */
713static void
714ASR_prstring(u_int8_t *s, int len)
715{
716	while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
717		printf ("%c", *(s++));
718	}
719} /* ASR_prstring */
720
721/*
722 *	Send a message synchronously and without Interrupt to a ccb.
723 */
724static int
725ASR_queue_s(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message)
726{
727	int		s;
728	U32		Mask;
729	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
730
731	/*
732	 * We do not need any (optional byteswapping) method access to
733	 * the Initiator context field.
734	 */
735	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
736
737	/* Prevent interrupt service */
738	s = splcam ();
739	Mask = asr_get_intr(sc);
740	asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
741
742	if (ASR_queue(sc, Message) == EMPTY_QUEUE) {
743		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
744		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
745	}
746
747	/*
748	 * Wait for this board to report a finished instruction.
749	 */
750	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
751		(void)asr_intr (sc);
752	}
753
754	/* Re-enable Interrupts */
755	asr_set_intr(sc, Mask);
756	splx(s);
757
758	return (ccb->ccb_h.status);
759} /* ASR_queue_s */
760
761/*
762 *	Send a message synchronously to an Asr_softc_t.
763 */
764static int
765ASR_queue_c(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
766{
767	union asr_ccb	*ccb;
768	int		status;
769
770	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
771		return (CAM_REQUEUE_REQ);
772	}
773
774	status = ASR_queue_s (ccb, Message);
775
776	asr_free_ccb(ccb);
777
778	return (status);
779} /* ASR_queue_c */
780
781/*
782 *	Add the specified ccb to the active queue
783 */
784static __inline void
785ASR_ccbAdd(Asr_softc_t *sc, union asr_ccb *ccb)
786{
787	int s;
788
789	s = splcam();
790	LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
791	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
792		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
793			/*
794			 * RAID systems can take considerable time to
795			 * complete some commands given the large cache
796			 * flashes switching from write back to write thru.
797			 */
798			ccb->ccb_h.timeout = 6 * 60 * 1000;
799		}
800		ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
801		  (ccb->ccb_h.timeout * hz) / 1000);
802	}
803	splx(s);
804} /* ASR_ccbAdd */
805
806/*
807 *	Remove the specified ccb from the active queue.
808 */
809static __inline void
810ASR_ccbRemove(Asr_softc_t *sc, union asr_ccb *ccb)
811{
812	int s;
813
814	s = splcam();
815	untimeout(asr_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
816	LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
817	splx(s);
818} /* ASR_ccbRemove */
819
820/*
821 *	Fail all the active commands, so they get re-issued by the operating
822 *	system.
823 */
824static void
825ASR_failActiveCommands(Asr_softc_t *sc)
826{
827	struct ccb_hdr	*ccb;
828	int		s;
829
830	s = splcam();
831	/*
832	 *	We do not need to inform the CAM layer that we had a bus
833	 * reset since we manage it on our own, this also prevents the
834	 * SCSI_DELAY settling that would be required on other systems.
835	 * The `SCSI_DELAY' has already been handled by the card via the
836	 * acquisition of the LCT table while we are at CAM priority level.
837	 *  for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
838	 *	xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
839	 *  }
840	 */
841	while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) {
842		ASR_ccbRemove (sc, (union asr_ccb *)ccb);
843
844		ccb->status &= ~CAM_STATUS_MASK;
845		ccb->status |= CAM_REQUEUE_REQ;
846		/* Nothing Transfered */
847		((struct ccb_scsiio *)ccb)->resid
848		  = ((struct ccb_scsiio *)ccb)->dxfer_len;
849
850		if (ccb->path) {
851			xpt_done ((union ccb *)ccb);
852		} else {
853			wakeup (ccb);
854		}
855	}
856	splx(s);
857} /* ASR_failActiveCommands */
858
859/*
860 *	The following command causes the HBA to reset the specific bus
861 */
862static void
863ASR_resetBus(Asr_softc_t *sc, int bus)
864{
865	I2O_HBA_BUS_RESET_MESSAGE	Message;
866	I2O_HBA_BUS_RESET_MESSAGE	*Message_Ptr;
867	PI2O_LCT_ENTRY			Device;
868
869	Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(&Message,
870	  sizeof(I2O_HBA_BUS_RESET_MESSAGE));
871	I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
872	  I2O_HBA_BUS_RESET);
873	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
874	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
875	  ++Device) {
876		if (((Device->le_type & I2O_PORT) != 0)
877		 && (Device->le_bus == bus)) {
878			I2O_MESSAGE_FRAME_setTargetAddress(
879			  &Message_Ptr->StdMessageFrame,
880			  I2O_LCT_ENTRY_getLocalTID(Device));
881			/* Asynchronous command, with no expectations */
882			(void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
883			break;
884		}
885	}
886} /* ASR_resetBus */
887
888static __inline int
889ASR_getBlinkLedCode(Asr_softc_t *sc)
890{
891	U8	blink;
892
893	if (sc == NULL)
894		return (0);
895
896	blink = bus_space_read_1(sc->ha_frame_btag,
897				 sc->ha_frame_bhandle, sc->ha_blinkLED + 1);
898	if (blink != 0xBC)
899		return (0);
900
901	blink = bus_space_read_1(sc->ha_frame_btag,
902				 sc->ha_frame_bhandle, sc->ha_blinkLED);
903	return (blink);
904} /* ASR_getBlinkCode */
905
906/*
907 *	Determine the address of an TID lookup. Must be done at high priority
908 *	since the address can be changed by other threads of execution.
909 *
910 *	Returns NULL pointer if not indexible (but will attempt to generate
911 *	an index if `new_entry' flag is set to TRUE).
912 *
913 *	All addressible entries are to be guaranteed zero if never initialized.
914 */
915static tid_t *
916ASR_getTidAddress(Asr_softc_t *sc, int bus, int target, int lun, int new_entry)
917{
918	target2lun_t	*bus_ptr;
919	lun2tid_t	*target_ptr;
920	unsigned	new_size;
921
922	/*
923	 *	Validity checking of incoming parameters. More of a bound
924	 * expansion limit than an issue with the code dealing with the
925	 * values.
926	 *
927	 *	sc must be valid before it gets here, so that check could be
928	 * dropped if speed a critical issue.
929	 */
930	if ((sc == NULL)
931	 || (bus > MAX_CHANNEL)
932	 || (target > sc->ha_MaxId)
933	 || (lun > sc->ha_MaxLun)) {
934		debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
935		  (u_long)sc, bus, target, lun);
936		return (NULL);
937	}
938	/*
939	 *	See if there is an associated bus list.
940	 *
941	 *	for performance, allocate in size of BUS_CHUNK chunks.
942	 *	BUS_CHUNK must be a power of two. This is to reduce
943	 *	fragmentation effects on the allocations.
944	 */
945#define BUS_CHUNK 8
946	new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
947	if ((bus_ptr = sc->ha_targets[bus]) == NULL) {
948		/*
949		 *	Allocate a new structure?
950		 *		Since one element in structure, the +1
951		 *		needed for size has been abstracted.
952		 */
953		if ((new_entry == FALSE)
954		 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc (
955		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
956		    M_TEMP, M_WAITOK | M_ZERO))
957		   == NULL)) {
958			debug_asr_printf("failed to allocate bus list\n");
959			return (NULL);
960		}
961		bus_ptr->size = new_size + 1;
962	} else if (bus_ptr->size <= new_size) {
963		target2lun_t * new_bus_ptr;
964
965		/*
966		 *	Reallocate a new structure?
967		 *		Since one element in structure, the +1
968		 *		needed for size has been abstracted.
969		 */
970		if ((new_entry == FALSE)
971		 || ((new_bus_ptr = (target2lun_t *)malloc (
972		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
973		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
974			debug_asr_printf("failed to reallocate bus list\n");
975			return (NULL);
976		}
977		/*
978		 *	Copy the whole thing, safer, simpler coding
979		 * and not really performance critical at this point.
980		 */
981		bcopy(bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
982		    + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
983		sc->ha_targets[bus] = new_bus_ptr;
984		free(bus_ptr, M_TEMP);
985		bus_ptr = new_bus_ptr;
986		bus_ptr->size = new_size + 1;
987	}
988	/*
989	 *	We now have the bus list, lets get to the target list.
990	 *	Since most systems have only *one* lun, we do not allocate
991	 *	in chunks as above, here we allow one, then in chunk sizes.
992	 *	TARGET_CHUNK must be a power of two. This is to reduce
993	 *	fragmentation effects on the allocations.
994	 */
995#define TARGET_CHUNK 8
996	if ((new_size = lun) != 0) {
997		new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
998	}
999	if ((target_ptr = bus_ptr->LUN[target]) == NULL) {
1000		/*
1001		 *	Allocate a new structure?
1002		 *		Since one element in structure, the +1
1003		 *		needed for size has been abstracted.
1004		 */
1005		if ((new_entry == FALSE)
1006		 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc (
1007		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1008		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
1009			debug_asr_printf("failed to allocate target list\n");
1010			return (NULL);
1011		}
1012		target_ptr->size = new_size + 1;
1013	} else if (target_ptr->size <= new_size) {
1014		lun2tid_t * new_target_ptr;
1015
1016		/*
1017		 *	Reallocate a new structure?
1018		 *		Since one element in structure, the +1
1019		 *		needed for size has been abstracted.
1020		 */
1021		if ((new_entry == FALSE)
1022		 || ((new_target_ptr = (lun2tid_t *)malloc (
1023		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1024		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
1025			debug_asr_printf("failed to reallocate target list\n");
1026			return (NULL);
1027		}
1028		/*
1029		 *	Copy the whole thing, safer, simpler coding
1030		 * and not really performance critical at this point.
1031		 */
1032		bcopy(target_ptr, new_target_ptr, sizeof(*target_ptr)
1033		    + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1034		bus_ptr->LUN[target] = new_target_ptr;
1035		free(target_ptr, M_TEMP);
1036		target_ptr = new_target_ptr;
1037		target_ptr->size = new_size + 1;
1038	}
1039	/*
1040	 *	Now, acquire the TID address from the LUN indexed list.
1041	 */
1042	return (&(target_ptr->TID[lun]));
1043} /* ASR_getTidAddress */
1044
1045/*
1046 *	Get a pre-existing TID relationship.
1047 *
1048 *	If the TID was never set, return (tid_t)-1.
1049 *
1050 *	should use mutex rather than spl.
1051 */
1052static __inline tid_t
1053ASR_getTid(Asr_softc_t *sc, int bus, int target, int lun)
1054{
1055	tid_t	*tid_ptr;
1056	int	s;
1057	tid_t	retval;
1058
1059	s = splcam();
1060	if (((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, FALSE)) == NULL)
1061	/* (tid_t)0 or (tid_t)-1 indicate no TID */
1062	 || (*tid_ptr == (tid_t)0)) {
1063		splx(s);
1064		return ((tid_t)-1);
1065	}
1066	retval = *tid_ptr;
1067	splx(s);
1068	return (retval);
1069} /* ASR_getTid */
1070
1071/*
1072 *	Set a TID relationship.
1073 *
1074 *	If the TID was not set, return (tid_t)-1.
1075 *
1076 *	should use mutex rather than spl.
1077 */
1078static __inline tid_t
1079ASR_setTid(Asr_softc_t *sc, int bus, int target, int lun, tid_t	TID)
1080{
1081	tid_t	*tid_ptr;
1082	int	s;
1083
1084	if (TID != (tid_t)-1) {
1085		if (TID == 0) {
1086			return ((tid_t)-1);
1087		}
1088		s = splcam();
1089		if ((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, TRUE))
1090		 == NULL) {
1091			splx(s);
1092			return ((tid_t)-1);
1093		}
1094		*tid_ptr = TID;
1095		splx(s);
1096	}
1097	return (TID);
1098} /* ASR_setTid */
1099
1100/*-------------------------------------------------------------------------*/
1101/*		      Function ASR_rescan				   */
1102/*-------------------------------------------------------------------------*/
1103/* The Parameters Passed To This Function Are :				   */
1104/*     Asr_softc_t *	 : HBA miniport driver's adapter data storage.	   */
1105/*									   */
1106/* This Function Will rescan the adapter and resynchronize any data	   */
1107/*									   */
1108/* Return : 0 For OK, Error Code Otherwise				   */
1109/*-------------------------------------------------------------------------*/
1110
1111static int
1112ASR_rescan(Asr_softc_t *sc)
1113{
1114	int bus;
1115	int error;
1116
1117	/*
1118	 * Re-acquire the LCT table and synchronize us to the adapter.
1119	 */
1120	if ((error = ASR_acquireLct(sc)) == 0) {
1121		error = ASR_acquireHrt(sc);
1122	}
1123
1124	if (error != 0) {
1125		return error;
1126	}
1127
1128	bus = sc->ha_MaxBus;
1129	/* Reset all existing cached TID lookups */
1130	do {
1131		int target, event = 0;
1132
1133		/*
1134		 *	Scan for all targets on this bus to see if they
1135		 * got affected by the rescan.
1136		 */
1137		for (target = 0; target <= sc->ha_MaxId; ++target) {
1138			int lun;
1139
1140			/* Stay away from the controller ID */
1141			if (target == sc->ha_adapter_target[bus]) {
1142				continue;
1143			}
1144			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1145				PI2O_LCT_ENTRY Device;
1146				tid_t	       TID = (tid_t)-1;
1147				tid_t	       LastTID;
1148
1149				/*
1150				 * See if the cached TID changed. Search for
1151				 * the device in our new LCT.
1152				 */
1153				for (Device = sc->ha_LCT->LCTEntry;
1154				  Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1155				   + I2O_LCT_getTableSize(sc->ha_LCT));
1156				  ++Device) {
1157					if ((Device->le_type != I2O_UNKNOWN)
1158					 && (Device->le_bus == bus)
1159					 && (Device->le_target == target)
1160					 && (Device->le_lun == lun)
1161					 && (I2O_LCT_ENTRY_getUserTID(Device)
1162					  == 0xFFF)) {
1163						TID = I2O_LCT_ENTRY_getLocalTID(
1164						  Device);
1165						break;
1166					}
1167				}
1168				/*
1169				 * Indicate to the OS that the label needs
1170				 * to be recalculated, or that the specific
1171				 * open device is no longer valid (Merde)
1172				 * because the cached TID changed.
1173				 */
1174				LastTID = ASR_getTid (sc, bus, target, lun);
1175				if (LastTID != TID) {
1176					struct cam_path * path;
1177
1178					if (xpt_create_path(&path,
1179					  /*periph*/NULL,
1180					  cam_sim_path(sc->ha_sim[bus]),
1181					  target, lun) != CAM_REQ_CMP) {
1182						if (TID == (tid_t)-1) {
1183							event |= AC_LOST_DEVICE;
1184						} else {
1185							event |= AC_INQ_CHANGED
1186							       | AC_GETDEV_CHANGED;
1187						}
1188					} else {
1189						if (TID == (tid_t)-1) {
1190							xpt_async(
1191							  AC_LOST_DEVICE,
1192							  path, NULL);
1193						} else if (LastTID == (tid_t)-1) {
1194							struct ccb_getdev ccb;
1195
1196							xpt_setup_ccb(
1197							  &(ccb.ccb_h),
1198							  path, /*priority*/5);
1199							xpt_async(
1200							  AC_FOUND_DEVICE,
1201							  path,
1202							  &ccb);
1203						} else {
1204							xpt_async(
1205							  AC_INQ_CHANGED,
1206							  path, NULL);
1207							xpt_async(
1208							  AC_GETDEV_CHANGED,
1209							  path, NULL);
1210						}
1211					}
1212				}
1213				/*
1214				 *	We have the option of clearing the
1215				 * cached TID for it to be rescanned, or to
1216				 * set it now even if the device never got
1217				 * accessed. We chose the later since we
1218				 * currently do not use the condition that
1219				 * the TID ever got cached.
1220				 */
1221				ASR_setTid (sc, bus, target, lun, TID);
1222			}
1223		}
1224		/*
1225		 *	The xpt layer can not handle multiple events at the
1226		 * same call.
1227		 */
1228		if (event & AC_LOST_DEVICE) {
1229			xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1230		}
1231		if (event & AC_INQ_CHANGED) {
1232			xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1233		}
1234		if (event & AC_GETDEV_CHANGED) {
1235			xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1236		}
1237	} while (--bus >= 0);
1238	return (error);
1239} /* ASR_rescan */
1240
1241/*-------------------------------------------------------------------------*/
1242/*		      Function ASR_reset				   */
1243/*-------------------------------------------------------------------------*/
1244/* The Parameters Passed To This Function Are :				   */
1245/*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
1246/*									   */
1247/* This Function Will reset the adapter and resynchronize any data	   */
1248/*									   */
1249/* Return : None							   */
1250/*-------------------------------------------------------------------------*/
1251
1252static int
1253ASR_reset(Asr_softc_t *sc)
1254{
1255	int s, retVal;
1256
1257	s = splcam();
1258	if ((sc->ha_in_reset == HA_IN_RESET)
1259	 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1260		splx (s);
1261		return (EBUSY);
1262	}
1263	/*
1264	 *	Promotes HA_OPERATIONAL to HA_IN_RESET,
1265	 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1266	 */
1267	++(sc->ha_in_reset);
1268	if (ASR_resetIOP(sc) == 0) {
1269		debug_asr_printf ("ASR_resetIOP failed\n");
1270		/*
1271		 *	We really need to take this card off-line, easier said
1272		 * than make sense. Better to keep retrying for now since if a
1273		 * UART cable is connected the blinkLEDs the adapter is now in
1274		 * a hard state requiring action from the monitor commands to
1275		 * the HBA to continue. For debugging waiting forever is a
1276		 * good thing. In a production system, however, one may wish
1277		 * to instead take the card off-line ...
1278		 */
1279		/* Wait Forever */
1280		while (ASR_resetIOP(sc) == 0);
1281	}
1282	retVal = ASR_init (sc);
1283	splx (s);
1284	if (retVal != 0) {
1285		debug_asr_printf ("ASR_init failed\n");
1286		sc->ha_in_reset = HA_OFF_LINE;
1287		return (ENXIO);
1288	}
1289	if (ASR_rescan (sc) != 0) {
1290		debug_asr_printf ("ASR_rescan failed\n");
1291	}
1292	ASR_failActiveCommands (sc);
1293	if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1294		printf ("asr%d: Brining adapter back on-line\n",
1295		  sc->ha_path[0]
1296		    ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1297		    : 0);
1298	}
1299	sc->ha_in_reset = HA_OPERATIONAL;
1300	return (0);
1301} /* ASR_reset */
1302
1303/*
1304 *	Device timeout handler.
1305 */
1306static void
1307asr_timeout(void *arg)
1308{
1309	union asr_ccb	*ccb = (union asr_ccb *)arg;
1310	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1311	int		s;
1312
1313	debug_asr_print_path(ccb);
1314	debug_asr_printf("timed out");
1315
1316	/*
1317	 *	Check if the adapter has locked up?
1318	 */
1319	if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1320		/* Reset Adapter */
1321		printf ("asr%d: Blink LED 0x%x resetting adapter\n",
1322		  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1323		if (ASR_reset (sc) == ENXIO) {
1324			/* Try again later */
1325			ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1326			  (caddr_t)ccb,
1327			  (ccb->ccb_h.timeout * hz) / 1000);
1328		}
1329		return;
1330	}
1331	/*
1332	 *	Abort does not function on the ASR card!!! Walking away from
1333	 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1334	 * our best bet, followed by a complete adapter reset if that fails.
1335	 */
1336	s = splcam();
1337	/* Check if we already timed out once to raise the issue */
1338	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1339		debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1340		if (ASR_reset (sc) == ENXIO) {
1341			ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1342			  (caddr_t)ccb,
1343			  (ccb->ccb_h.timeout * hz) / 1000);
1344		}
1345		splx(s);
1346		return;
1347	}
1348	debug_asr_printf ("\nresetting bus\n");
1349	/* If the BUS reset does not take, then an adapter reset is next! */
1350	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1351	ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1352	ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
1353	  (ccb->ccb_h.timeout * hz) / 1000);
1354	ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1355	xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1356	splx(s);
1357} /* asr_timeout */
1358
1359/*
1360 * send a message asynchronously
1361 */
1362static int
1363ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
1364{
1365	U32		MessageOffset;
1366	union asr_ccb	*ccb;
1367
1368	debug_asr_printf("Host Command Dump:\n");
1369	debug_asr_dump_message(Message);
1370
1371	ccb = (union asr_ccb *)(long)
1372	  I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1373
1374	if ((MessageOffset = ASR_getMessage(sc)) != EMPTY_QUEUE) {
1375		asr_set_frame(sc, Message, MessageOffset,
1376			      I2O_MESSAGE_FRAME_getMessageSize(Message));
1377		if (ccb) {
1378			ASR_ccbAdd (sc, ccb);
1379		}
1380		/* Post the command */
1381		asr_set_ToFIFO(sc, MessageOffset);
1382	} else {
1383		if (ASR_getBlinkLedCode(sc)) {
1384			/*
1385			 *	Unlikely we can do anything if we can't grab a
1386			 * message frame :-(, but lets give it a try.
1387			 */
1388			(void)ASR_reset(sc);
1389		}
1390	}
1391	return (MessageOffset);
1392} /* ASR_queue */
1393
1394
1395/* Simple Scatter Gather elements */
1396#define	SG(SGL,Index,Flags,Buffer,Size)				   \
1397	I2O_FLAGS_COUNT_setCount(				   \
1398	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1399	  Size);						   \
1400	I2O_FLAGS_COUNT_setFlags(				   \
1401	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1402	  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags));	   \
1403	I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(		   \
1404	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]),		   \
1405	  (Buffer == NULL) ? 0 : KVTOPHYS(Buffer))
1406
1407/*
1408 *	Retrieve Parameter Group.
1409 */
1410static void *
1411ASR_getParams(Asr_softc_t *sc, tid_t TID, int Group, void *Buffer,
1412	      unsigned BufferSize)
1413{
1414	struct paramGetMessage {
1415		I2O_UTIL_PARAMS_GET_MESSAGE M;
1416		char
1417		   F[sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1418		struct Operations {
1419			I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1420			I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1421		}			     O;
1422	}				Message;
1423	struct Operations		*Operations_Ptr;
1424	I2O_UTIL_PARAMS_GET_MESSAGE	*Message_Ptr;
1425	struct ParamBuffer {
1426		I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1427		I2O_PARAM_READ_OPERATION_RESULT	    Read;
1428		char				    Info[1];
1429	}				*Buffer_Ptr;
1430
1431	Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(&Message,
1432	  sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1433	    + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1434	Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1435	  + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1436	  + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1437	bzero(Operations_Ptr, sizeof(struct Operations));
1438	I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1439	  &(Operations_Ptr->Header), 1);
1440	I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1441	  &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1442	I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1443	  &(Operations_Ptr->Template[0]), 0xFFFF);
1444	I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1445	  &(Operations_Ptr->Template[0]), Group);
1446	Buffer_Ptr = (struct ParamBuffer *)Buffer;
1447	bzero(Buffer_Ptr, BufferSize);
1448
1449	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1450	  I2O_VERSION_11
1451	  + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1452	    / sizeof(U32)) << 4));
1453	I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1454	  TID);
1455	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1456	  I2O_UTIL_PARAMS_GET);
1457	/*
1458	 *  Set up the buffers as scatter gather elements.
1459	 */
1460	SG(&(Message_Ptr->SGL), 0,
1461	  I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1462	  Operations_Ptr, sizeof(struct Operations));
1463	SG(&(Message_Ptr->SGL), 1,
1464	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1465	  Buffer_Ptr, BufferSize);
1466
1467	if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1468	 && (Buffer_Ptr->Header.ResultCount)) {
1469		return ((void *)(Buffer_Ptr->Info));
1470	}
1471	return (NULL);
1472} /* ASR_getParams */
1473
1474/*
1475 *	Acquire the LCT information.
1476 */
1477static int
1478ASR_acquireLct(Asr_softc_t *sc)
1479{
1480	PI2O_EXEC_LCT_NOTIFY_MESSAGE	Message_Ptr;
1481	PI2O_SGE_SIMPLE_ELEMENT		sg;
1482	int				MessageSizeInBytes;
1483	caddr_t				v;
1484	int				len;
1485	I2O_LCT				Table;
1486	PI2O_LCT_ENTRY			Entry;
1487
1488	/*
1489	 *	sc value assumed valid
1490	 */
1491	MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1492	    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1493	if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc(
1494	    MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) {
1495		return (ENOMEM);
1496	}
1497	(void)ASR_fillMessage((void *)Message_Ptr, MessageSizeInBytes);
1498	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1499	    (I2O_VERSION_11 + (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1500	    sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)));
1501	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1502	    I2O_EXEC_LCT_NOTIFY);
1503	I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1504	    I2O_CLASS_MATCH_ANYCLASS);
1505	/*
1506	 *	Call the LCT table to determine the number of device entries
1507	 * to reserve space for.
1508	 */
1509	SG(&(Message_Ptr->SGL), 0,
1510	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1511	  sizeof(I2O_LCT));
1512	/*
1513	 *	since this code is reused in several systems, code efficiency
1514	 * is greater by using a shift operation rather than a divide by
1515	 * sizeof(u_int32_t).
1516	 */
1517	I2O_LCT_setTableSize(&Table,
1518	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1519	(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1520	/*
1521	 *	Determine the size of the LCT table.
1522	 */
1523	if (sc->ha_LCT) {
1524		free(sc->ha_LCT, M_TEMP);
1525	}
1526	/*
1527	 *	malloc only generates contiguous memory when less than a
1528	 * page is expected. We must break the request up into an SG list ...
1529	 */
1530	if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1531	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1532	 || (len > (128 * 1024))) {	/* Arbitrary */
1533		free(Message_Ptr, M_TEMP);
1534		return (EINVAL);
1535	}
1536	if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK)) == NULL) {
1537		free(Message_Ptr, M_TEMP);
1538		return (ENOMEM);
1539	}
1540	/*
1541	 *	since this code is reused in several systems, code efficiency
1542	 * is greater by using a shift operation rather than a divide by
1543	 * sizeof(u_int32_t).
1544	 */
1545	I2O_LCT_setTableSize(sc->ha_LCT,
1546	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1547	/*
1548	 *	Convert the access to the LCT table into a SG list.
1549	 */
1550	sg = Message_Ptr->SGL.u.Simple;
1551	v = (caddr_t)(sc->ha_LCT);
1552	for (;;) {
1553		int next, base, span;
1554
1555		span = 0;
1556		next = base = KVTOPHYS(v);
1557		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1558
1559		/* How far can we go contiguously */
1560		while ((len > 0) && (base == next)) {
1561			int size;
1562
1563			next = trunc_page(base) + PAGE_SIZE;
1564			size = next - base;
1565			if (size > len) {
1566				size = len;
1567			}
1568			span += size;
1569			v += size;
1570			len -= size;
1571			base = KVTOPHYS(v);
1572		}
1573
1574		/* Construct the Flags */
1575		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1576		{
1577			int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1578			if (len <= 0) {
1579				rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1580				    | I2O_SGL_FLAGS_LAST_ELEMENT
1581				    | I2O_SGL_FLAGS_END_OF_BUFFER);
1582			}
1583			I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1584		}
1585
1586		if (len <= 0) {
1587			break;
1588		}
1589
1590		/*
1591		 * Incrementing requires resizing of the packet.
1592		 */
1593		++sg;
1594		MessageSizeInBytes += sizeof(*sg);
1595		I2O_MESSAGE_FRAME_setMessageSize(
1596		  &(Message_Ptr->StdMessageFrame),
1597		  I2O_MESSAGE_FRAME_getMessageSize(
1598		    &(Message_Ptr->StdMessageFrame))
1599		  + (sizeof(*sg) / sizeof(U32)));
1600		{
1601			PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1602
1603			if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1604			    malloc(MessageSizeInBytes, M_TEMP, M_WAITOK))
1605			    == NULL) {
1606				free(sc->ha_LCT, M_TEMP);
1607				sc->ha_LCT = NULL;
1608				free(Message_Ptr, M_TEMP);
1609				return (ENOMEM);
1610			}
1611			span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1612			bcopy(Message_Ptr, NewMessage_Ptr, span);
1613			free(Message_Ptr, M_TEMP);
1614			sg = (PI2O_SGE_SIMPLE_ELEMENT)
1615			  (((caddr_t)NewMessage_Ptr) + span);
1616			Message_Ptr = NewMessage_Ptr;
1617		}
1618	}
1619	{	int retval;
1620
1621		retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1622		free(Message_Ptr, M_TEMP);
1623		if (retval != CAM_REQ_CMP) {
1624			return (ENODEV);
1625		}
1626	}
1627	/* If the LCT table grew, lets truncate accesses */
1628	if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1629		I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1630	}
1631	for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1632	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1633	  ++Entry) {
1634		Entry->le_type = I2O_UNKNOWN;
1635		switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1636
1637		case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1638			Entry->le_type = I2O_BSA;
1639			break;
1640
1641		case I2O_CLASS_SCSI_PERIPHERAL:
1642			Entry->le_type = I2O_SCSI;
1643			break;
1644
1645		case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1646			Entry->le_type = I2O_FCA;
1647			break;
1648
1649		case I2O_CLASS_BUS_ADAPTER_PORT:
1650			Entry->le_type = I2O_PORT | I2O_SCSI;
1651			/* FALLTHRU */
1652		case I2O_CLASS_FIBRE_CHANNEL_PORT:
1653			if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1654			  I2O_CLASS_FIBRE_CHANNEL_PORT) {
1655				Entry->le_type = I2O_PORT | I2O_FCA;
1656			}
1657		{	struct ControllerInfo {
1658				I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1659				I2O_PARAM_READ_OPERATION_RESULT	    Read;
1660				I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1661			} Buffer;
1662			PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1663
1664			Entry->le_bus = 0xff;
1665			Entry->le_target = 0xff;
1666			Entry->le_lun = 0xff;
1667
1668			if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1669			  ASR_getParams(sc,
1670			    I2O_LCT_ENTRY_getLocalTID(Entry),
1671			    I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1672			    &Buffer, sizeof(struct ControllerInfo))) == NULL) {
1673				continue;
1674			}
1675			Entry->le_target
1676			  = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1677			    Info);
1678			Entry->le_lun = 0;
1679		}	/* FALLTHRU */
1680		default:
1681			continue;
1682		}
1683		{	struct DeviceInfo {
1684				I2O_PARAM_RESULTS_LIST_HEADER	Header;
1685				I2O_PARAM_READ_OPERATION_RESULT Read;
1686				I2O_DPT_DEVICE_INFO_SCALAR	Info;
1687			} Buffer;
1688			PI2O_DPT_DEVICE_INFO_SCALAR	 Info;
1689
1690			Entry->le_bus = 0xff;
1691			Entry->le_target = 0xff;
1692			Entry->le_lun = 0xff;
1693
1694			if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1695			  ASR_getParams(sc,
1696			    I2O_LCT_ENTRY_getLocalTID(Entry),
1697			    I2O_DPT_DEVICE_INFO_GROUP_NO,
1698			    &Buffer, sizeof(struct DeviceInfo))) == NULL) {
1699				continue;
1700			}
1701			Entry->le_type
1702			  |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1703			Entry->le_bus
1704			  = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1705			if ((Entry->le_bus > sc->ha_MaxBus)
1706			 && (Entry->le_bus <= MAX_CHANNEL)) {
1707				sc->ha_MaxBus = Entry->le_bus;
1708			}
1709			Entry->le_target
1710			  = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1711			Entry->le_lun
1712			  = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1713		}
1714	}
1715	/*
1716	 *	A zero return value indicates success.
1717	 */
1718	return (0);
1719} /* ASR_acquireLct */
1720
1721/*
1722 * Initialize a message frame.
1723 * We assume that the CDB has already been set up, so all we do here is
1724 * generate the Scatter Gather list.
1725 */
1726static PI2O_MESSAGE_FRAME
1727ASR_init_message(union asr_ccb *ccb, PI2O_MESSAGE_FRAME	Message)
1728{
1729	PI2O_MESSAGE_FRAME	Message_Ptr;
1730	PI2O_SGE_SIMPLE_ELEMENT sg;
1731	Asr_softc_t		*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1732	vm_size_t		size, len;
1733	caddr_t			v;
1734	U32			MessageSize;
1735	int			next, span, base, rw;
1736	int			target = ccb->ccb_h.target_id;
1737	int			lun = ccb->ccb_h.target_lun;
1738	int			bus =cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1739	tid_t			TID;
1740
1741	/* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1742	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
1743	bzero(Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
1744	      sizeof(I2O_SG_ELEMENT)));
1745
1746	if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1747		PI2O_LCT_ENTRY Device;
1748
1749		TID = 0;
1750		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1751		    (((U32 *)sc->ha_LCT) + I2O_LCT_getTableSize(sc->ha_LCT));
1752		    ++Device) {
1753			if ((Device->le_type != I2O_UNKNOWN)
1754			 && (Device->le_bus == bus)
1755			 && (Device->le_target == target)
1756			 && (Device->le_lun == lun)
1757			 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1758				TID = I2O_LCT_ENTRY_getLocalTID(Device);
1759				ASR_setTid(sc, Device->le_bus,
1760					   Device->le_target, Device->le_lun,
1761					   TID);
1762				break;
1763			}
1764		}
1765	}
1766	if (TID == (tid_t)0) {
1767		return (NULL);
1768	}
1769	I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1770	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1771	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1772	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1773	  (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1774		/ sizeof(U32)) << 4));
1775	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1776	  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1777	  - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1778	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1779	I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1780	I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1781	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1782	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1783	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1784	    I2O_SCB_FLAG_ENABLE_DISCONNECT
1785	  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1786	  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1787	/*
1788	 * We do not need any (optional byteswapping) method access to
1789	 * the Initiator & Transaction context field.
1790	 */
1791	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1792
1793	I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1794	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1795	/*
1796	 * copy the cdb over
1797	 */
1798	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1799	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1800	bcopy(&(ccb->csio.cdb_io),
1801	    ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB,
1802	    ccb->csio.cdb_len);
1803
1804	/*
1805	 * Given a buffer describing a transfer, set up a scatter/gather map
1806	 * in a ccb to map that SCSI transfer.
1807	 */
1808
1809	rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1810
1811	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1812	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1813	  (ccb->csio.dxfer_len)
1814	    ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1815		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1816		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1817		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1818		    : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1819		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1820		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1821		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
1822	    :	      (I2O_SCB_FLAG_ENABLE_DISCONNECT
1823		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1824		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
1825
1826	/*
1827	 * Given a transfer described by a `data', fill in the SG list.
1828	 */
1829	sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
1830
1831	len = ccb->csio.dxfer_len;
1832	v = ccb->csio.data_ptr;
1833	KASSERT(ccb->csio.dxfer_len >= 0, ("csio.dxfer_len < 0"));
1834	MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
1835	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
1836	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
1837	while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1838	  Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
1839		span = 0;
1840		next = base = KVTOPHYS(v);
1841		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1842
1843		/* How far can we go contiguously */
1844		while ((len > 0) && (base == next)) {
1845			next = trunc_page(base) + PAGE_SIZE;
1846			size = next - base;
1847			if (size > len) {
1848				size = len;
1849			}
1850			span += size;
1851			v += size;
1852			len -= size;
1853			base = KVTOPHYS(v);
1854		}
1855
1856		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1857		if (len == 0) {
1858			rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
1859		}
1860		I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
1861		  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
1862		++sg;
1863		MessageSize += sizeof(*sg) / sizeof(U32);
1864	}
1865	/* We always do the request sense ... */
1866	if ((span = ccb->csio.sense_len) == 0) {
1867		span = sizeof(ccb->csio.sense_data);
1868	}
1869	SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1870	  &(ccb->csio.sense_data), span);
1871	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1872	  MessageSize + (sizeof(*sg) / sizeof(U32)));
1873	return (Message_Ptr);
1874} /* ASR_init_message */
1875
1876/*
1877 *	Reset the adapter.
1878 */
1879static U32
1880ASR_initOutBound(Asr_softc_t *sc)
1881{
1882	struct initOutBoundMessage {
1883		I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
1884		U32			       R;
1885	}				Message;
1886	PI2O_EXEC_OUTBOUND_INIT_MESSAGE	Message_Ptr;
1887	U32				*volatile Reply_Ptr;
1888	U32				Old;
1889
1890	/*
1891	 *  Build up our copy of the Message.
1892	 */
1893	Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(&Message,
1894	  sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
1895	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1896	  I2O_EXEC_OUTBOUND_INIT);
1897	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
1898	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
1899	  sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
1900	/*
1901	 *  Reset the Reply Status
1902	 */
1903	*(Reply_Ptr = (U32 *)((char *)Message_Ptr
1904	  + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
1905	SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
1906	  sizeof(U32));
1907	/*
1908	 *	Send the Message out
1909	 */
1910	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
1911	    0xffffffff) {
1912		u_long size, addr;
1913
1914		/*
1915		 *	Wait for a response (Poll).
1916		 */
1917		while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
1918		/*
1919		 *	Re-enable the interrupts.
1920		 */
1921		asr_set_intr(sc, Old);
1922		/*
1923		 *	Populate the outbound table.
1924		 */
1925		if (sc->ha_Msgs == NULL) {
1926
1927			/* Allocate the reply frames */
1928			size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1929			  * sc->ha_Msgs_Count;
1930
1931			/*
1932			 *	contigmalloc only works reliably at
1933			 * initialization time.
1934			 */
1935			if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1936			  contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
1937			    0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) != NULL) {
1938				bzero(sc->ha_Msgs, size);
1939				sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
1940			}
1941		}
1942
1943		/* Initialize the outbound FIFO */
1944		if (sc->ha_Msgs != NULL)
1945		for(size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
1946		    size; --size) {
1947			asr_set_FromFIFO(sc, addr);
1948			addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
1949		}
1950		return (*Reply_Ptr);
1951	}
1952	return (0);
1953} /* ASR_initOutBound */
1954
1955/*
1956 *	Set the system table
1957 */
1958static int
1959ASR_setSysTab(Asr_softc_t *sc)
1960{
1961	PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
1962	PI2O_SET_SYSTAB_HEADER	      SystemTable;
1963	Asr_softc_t		    * ha, *next;
1964	PI2O_SGE_SIMPLE_ELEMENT	      sg;
1965	int			      retVal;
1966
1967	if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc (
1968	  sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO)) == NULL) {
1969		return (ENOMEM);
1970	}
1971	STAILQ_FOREACH(ha, &Asr_softc_list, ha_next) {
1972		++SystemTable->NumberEntries;
1973	}
1974	if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc (
1975	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1976	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
1977	  M_TEMP, M_WAITOK)) == NULL) {
1978		free(SystemTable, M_TEMP);
1979		return (ENOMEM);
1980	}
1981	(void)ASR_fillMessage((void *)Message_Ptr,
1982	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1983	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
1984	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1985	  (I2O_VERSION_11 +
1986	  (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1987			/ sizeof(U32)) << 4)));
1988	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1989	  I2O_EXEC_SYS_TAB_SET);
1990	/*
1991	 *	Call the LCT table to determine the number of device entries
1992	 * to reserve space for.
1993	 *	since this code is reused in several systems, code efficiency
1994	 * is greater by using a shift operation rather than a divide by
1995	 * sizeof(u_int32_t).
1996	 */
1997	sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
1998	  + ((I2O_MESSAGE_FRAME_getVersionOffset(
1999	      &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
2000	SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2001	++sg;
2002	STAILQ_FOREACH_SAFE(ha, &Asr_softc_list, ha_next, next) {
2003		SG(sg, 0,
2004		  ((next)
2005		    ? (I2O_SGL_FLAGS_DIR)
2006		    : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2007		  &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2008		++sg;
2009	}
2010	SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2011	SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2012	    | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2013	retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2014	free(Message_Ptr, M_TEMP);
2015	free(SystemTable, M_TEMP);
2016	return (retVal);
2017} /* ASR_setSysTab */
2018
2019static int
2020ASR_acquireHrt(Asr_softc_t *sc)
2021{
2022	I2O_EXEC_HRT_GET_MESSAGE	Message;
2023	I2O_EXEC_HRT_GET_MESSAGE	*Message_Ptr;
2024	struct {
2025		I2O_HRT	      Header;
2026		I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2027	}				Hrt;
2028	u_int8_t			NumberOfEntries;
2029	PI2O_HRT_ENTRY			Entry;
2030
2031	bzero(&Hrt, sizeof (Hrt));
2032	Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(&Message,
2033	  sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2034	  + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2035	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2036	  (I2O_VERSION_11
2037	  + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2038		   / sizeof(U32)) << 4)));
2039	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2040	  I2O_EXEC_HRT_GET);
2041
2042	/*
2043	 *  Set up the buffers as scatter gather elements.
2044	 */
2045	SG(&(Message_Ptr->SGL), 0,
2046	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2047	  &Hrt, sizeof(Hrt));
2048	if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2049		return (ENODEV);
2050	}
2051	if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2052	  > (MAX_CHANNEL + 1)) {
2053		NumberOfEntries = MAX_CHANNEL + 1;
2054	}
2055	for (Entry = Hrt.Header.HRTEntry;
2056	  NumberOfEntries != 0;
2057	  ++Entry, --NumberOfEntries) {
2058		PI2O_LCT_ENTRY Device;
2059
2060		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2061		  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2062		  ++Device) {
2063			if (I2O_LCT_ENTRY_getLocalTID(Device)
2064			  == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2065				Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2066				  Entry) >> 16;
2067				if ((Device->le_bus > sc->ha_MaxBus)
2068				 && (Device->le_bus <= MAX_CHANNEL)) {
2069					sc->ha_MaxBus = Device->le_bus;
2070				}
2071			}
2072		}
2073	}
2074	return (0);
2075} /* ASR_acquireHrt */
2076
2077/*
2078 *	Enable the adapter.
2079 */
2080static int
2081ASR_enableSys(Asr_softc_t *sc)
2082{
2083	I2O_EXEC_SYS_ENABLE_MESSAGE	Message;
2084	PI2O_EXEC_SYS_ENABLE_MESSAGE	Message_Ptr;
2085
2086	Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(&Message,
2087	  sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2088	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2089	  I2O_EXEC_SYS_ENABLE);
2090	return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2091} /* ASR_enableSys */
2092
2093/*
2094 *	Perform the stages necessary to initialize the adapter
2095 */
2096static int
2097ASR_init(Asr_softc_t *sc)
2098{
2099	return ((ASR_initOutBound(sc) == 0)
2100	 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2101	 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2102} /* ASR_init */
2103
2104/*
2105 *	Send a Synchronize Cache command to the target device.
2106 */
2107static void
2108ASR_sync(Asr_softc_t *sc, int bus, int target, int lun)
2109{
2110	tid_t TID;
2111
2112	/*
2113	 * We will not synchronize the device when there are outstanding
2114	 * commands issued by the OS (this is due to a locked up device,
2115	 * as the OS normally would flush all outstanding commands before
2116	 * issuing a shutdown or an adapter reset).
2117	 */
2118	if ((sc != NULL)
2119	 && (LIST_FIRST(&(sc->ha_ccb)) != NULL)
2120	 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2121	 && (TID != (tid_t)0)) {
2122		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2123		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2124
2125		Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message;
2126		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2127		    - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2128
2129		I2O_MESSAGE_FRAME_setVersionOffset(
2130		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2131		  I2O_VERSION_11
2132		    | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2133		    - sizeof(I2O_SG_ELEMENT))
2134			/ sizeof(U32)) << 4));
2135		I2O_MESSAGE_FRAME_setMessageSize(
2136		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2137		  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2138		  - sizeof(I2O_SG_ELEMENT))
2139			/ sizeof(U32));
2140		I2O_MESSAGE_FRAME_setInitiatorAddress (
2141		  (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2142		I2O_MESSAGE_FRAME_setFunction(
2143		  (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2144		I2O_MESSAGE_FRAME_setTargetAddress(
2145		  (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2146		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2147		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2148		  I2O_SCSI_SCB_EXEC);
2149		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2150		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2151		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2152		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2153		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2154		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2155		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2156		  DPT_ORGANIZATION_ID);
2157		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2158		Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2159		Message_Ptr->CDB[1] = (lun << 5);
2160
2161		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2162		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2163		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2164		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2165		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2166
2167		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2168
2169	}
2170}
2171
2172static void
2173ASR_synchronize(Asr_softc_t *sc)
2174{
2175	int bus, target, lun;
2176
2177	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2178		for (target = 0; target <= sc->ha_MaxId; ++target) {
2179			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2180				ASR_sync(sc,bus,target,lun);
2181			}
2182		}
2183	}
2184}
2185
2186/*
2187 *	Reset the HBA, targets and BUS.
2188 *		Currently this resets *all* the SCSI busses.
2189 */
2190static __inline void
2191asr_hbareset(Asr_softc_t *sc)
2192{
2193	ASR_synchronize(sc);
2194	(void)ASR_reset(sc);
2195} /* asr_hbareset */
2196
2197/*
2198 *	A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2199 * limit and a reduction in error checking (in the pre 4.0 case).
2200 */
2201static int
2202asr_pci_map_mem(device_t dev, Asr_softc_t *sc)
2203{
2204	int		rid;
2205	u_int32_t	p, l, s;
2206
2207	/*
2208	 * I2O specification says we must find first *memory* mapped BAR
2209	 */
2210	for (rid = 0; rid < 4; rid++) {
2211		p = pci_read_config(dev, PCIR_BAR(rid), sizeof(p));
2212		if ((p & 1) == 0) {
2213			break;
2214		}
2215	}
2216	/*
2217	 *	Give up?
2218	 */
2219	if (rid >= 4) {
2220		rid = 0;
2221	}
2222	rid = PCIR_BAR(rid);
2223	p = pci_read_config(dev, rid, sizeof(p));
2224	pci_write_config(dev, rid, -1, sizeof(p));
2225	l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2226	pci_write_config(dev, rid, p, sizeof(p));
2227	if (l > MAX_MAP) {
2228		l = MAX_MAP;
2229	}
2230	/*
2231	 * The 2005S Zero Channel RAID solution is not a perfect PCI
2232	 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2233	 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2234	 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2235	 * accessible via BAR0, the messaging registers are accessible
2236	 * via BAR1. If the subdevice code is 50 to 59 decimal.
2237	 */
2238	s = pci_read_config(dev, PCIR_DEVVENDOR, sizeof(s));
2239	if (s != 0xA5111044) {
2240		s = pci_read_config(dev, PCIR_SUBVEND_0, sizeof(s));
2241		if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2242		 && (ADPTDOMINATOR_SUB_ID_START <= s)
2243		 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2244			l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2245		}
2246	}
2247	p &= ~15;
2248	sc->ha_mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2249	  p, p + l, l, RF_ACTIVE);
2250	if (sc->ha_mem_res == NULL) {
2251		return (0);
2252	}
2253	sc->ha_Base = rman_get_start(sc->ha_mem_res);
2254	sc->ha_i2o_bhandle = rman_get_bushandle(sc->ha_mem_res);
2255	sc->ha_i2o_btag = rman_get_bustag(sc->ha_mem_res);
2256
2257	if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2258		if ((rid += sizeof(u_int32_t)) >= PCIR_BAR(4)) {
2259			return (0);
2260		}
2261		p = pci_read_config(dev, rid, sizeof(p));
2262		pci_write_config(dev, rid, -1, sizeof(p));
2263		l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2264		pci_write_config(dev, rid, p, sizeof(p));
2265		if (l > MAX_MAP) {
2266			l = MAX_MAP;
2267		}
2268		p &= ~15;
2269		sc->ha_mes_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2270		  p, p + l, l, RF_ACTIVE);
2271		if (sc->ha_mes_res == NULL) {
2272			return (0);
2273		}
2274		sc->ha_frame_bhandle = rman_get_bushandle(sc->ha_mes_res);
2275		sc->ha_frame_btag = rman_get_bustag(sc->ha_mes_res);
2276	} else {
2277		sc->ha_frame_bhandle = sc->ha_i2o_bhandle;
2278		sc->ha_frame_btag = sc->ha_i2o_btag;
2279	}
2280	return (1);
2281} /* asr_pci_map_mem */
2282
2283/*
2284 *	A simplified copy of the real pci_map_int with additional
2285 * registration requirements.
2286 */
2287static int
2288asr_pci_map_int(device_t dev, Asr_softc_t *sc)
2289{
2290	int rid = 0;
2291
2292	sc->ha_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2293	  RF_ACTIVE | RF_SHAREABLE);
2294	if (sc->ha_irq_res == NULL) {
2295		return (0);
2296	}
2297	if (bus_setup_intr(dev, sc->ha_irq_res, INTR_TYPE_CAM | INTR_ENTROPY,
2298	  NULL, (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) {
2299		return (0);
2300	}
2301	sc->ha_irq = pci_read_config(dev, PCIR_INTLINE, sizeof(char));
2302	return (1);
2303} /* asr_pci_map_int */
2304
2305static void
2306asr_status_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2307{
2308	Asr_softc_t *sc;
2309
2310	if (error)
2311		return;
2312
2313	sc = (Asr_softc_t *)arg;
2314
2315	/* XXX
2316	 * The status word can be at a 64-bit address, but the existing
2317	 * accessor macros simply cannot manipulate 64-bit addresses.
2318	 */
2319	sc->ha_status_phys = (u_int32_t)segs[0].ds_addr +
2320	    offsetof(struct Asr_status_mem, status);
2321	sc->ha_rstatus_phys = (u_int32_t)segs[0].ds_addr +
2322	    offsetof(struct Asr_status_mem, rstatus);
2323}
2324
2325static int
2326asr_alloc_dma(Asr_softc_t *sc)
2327{
2328	device_t dev;
2329
2330	dev = sc->ha_dev;
2331
2332	if (bus_dma_tag_create(bus_get_dma_tag(dev),	/* PCI parent */
2333			       1, 0,			/* algnmnt, boundary */
2334			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2335			       BUS_SPACE_MAXADDR,	/* highaddr */
2336			       NULL, NULL,		/* filter, filterarg */
2337			       BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2338			       BUS_SPACE_UNRESTRICTED,	/* nsegments */
2339			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2340			       0,			/* flags */
2341			       NULL, NULL,		/* lockfunc, lockarg */
2342			       &sc->ha_parent_dmat)) {
2343		device_printf(dev, "Cannot allocate parent DMA tag\n");
2344		return (ENOMEM);
2345	}
2346
2347	if (bus_dma_tag_create(sc->ha_parent_dmat,	/* parent */
2348			       1, 0,			/* algnmnt, boundary */
2349			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2350			       BUS_SPACE_MAXADDR,	/* highaddr */
2351			       NULL, NULL,		/* filter, filterarg */
2352			       sizeof(sc->ha_statusmem),/* maxsize */
2353			       1,			/* nsegments */
2354			       sizeof(sc->ha_statusmem),/* maxsegsize */
2355			       0,			/* flags */
2356			       NULL, NULL,		/* lockfunc, lockarg */
2357			       &sc->ha_statusmem_dmat)) {
2358		device_printf(dev, "Cannot allocate status DMA tag\n");
2359		bus_dma_tag_destroy(sc->ha_parent_dmat);
2360		return (ENOMEM);
2361	}
2362
2363	if (bus_dmamem_alloc(sc->ha_statusmem_dmat, (void **)&sc->ha_statusmem,
2364	    BUS_DMA_NOWAIT, &sc->ha_statusmem_dmamap)) {
2365		device_printf(dev, "Cannot allocate status memory\n");
2366		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2367		bus_dma_tag_destroy(sc->ha_parent_dmat);
2368		return (ENOMEM);
2369	}
2370	(void)bus_dmamap_load(sc->ha_statusmem_dmat, sc->ha_statusmem_dmamap,
2371	    sc->ha_statusmem, sizeof(sc->ha_statusmem), asr_status_cb, sc, 0);
2372
2373	return (0);
2374}
2375
2376static void
2377asr_release_dma(Asr_softc_t *sc)
2378{
2379
2380	if (sc->ha_rstatus_phys != 0)
2381		bus_dmamap_unload(sc->ha_statusmem_dmat,
2382		    sc->ha_statusmem_dmamap);
2383	if (sc->ha_statusmem != NULL)
2384		bus_dmamem_free(sc->ha_statusmem_dmat, sc->ha_statusmem,
2385		    sc->ha_statusmem_dmamap);
2386	if (sc->ha_statusmem_dmat != NULL)
2387		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2388	if (sc->ha_parent_dmat != NULL)
2389		bus_dma_tag_destroy(sc->ha_parent_dmat);
2390}
2391
2392/*
2393 *	Attach the devices, and virtual devices to the driver list.
2394 */
2395static int
2396asr_attach(device_t dev)
2397{
2398	PI2O_EXEC_STATUS_GET_REPLY status;
2399	PI2O_LCT_ENTRY		 Device;
2400	Asr_softc_t		 *sc;
2401	struct scsi_inquiry_data *iq;
2402	int			 bus, size, unit;
2403	int			 error;
2404
2405	sc = device_get_softc(dev);
2406	unit = device_get_unit(dev);
2407	sc->ha_dev = dev;
2408
2409	if (STAILQ_EMPTY(&Asr_softc_list)) {
2410		/*
2411		 *	Fixup the OS revision as saved in the dptsig for the
2412		 *	engine (dptioctl.h) to pick up.
2413		 */
2414		bcopy(osrelease, &ASR_sig.dsDescription[16], 5);
2415	}
2416	/*
2417	 *	Initialize the software structure
2418	 */
2419	LIST_INIT(&(sc->ha_ccb));
2420	/* Link us into the HA list */
2421	STAILQ_INSERT_TAIL(&Asr_softc_list, sc, ha_next);
2422
2423	/*
2424	 *	This is the real McCoy!
2425	 */
2426	if (!asr_pci_map_mem(dev, sc)) {
2427		device_printf(dev, "could not map memory\n");
2428		return(ENXIO);
2429	}
2430	/* Enable if not formerly enabled */
2431	pci_enable_busmaster(dev);
2432
2433	sc->ha_pciBusNum = pci_get_bus(dev);
2434	sc->ha_pciDeviceNum = (pci_get_slot(dev) << 3) | pci_get_function(dev);
2435
2436	if ((error = asr_alloc_dma(sc)) != 0)
2437		return (error);
2438
2439	/* Check if the device is there? */
2440	if (ASR_resetIOP(sc) == 0) {
2441		device_printf(dev, "Cannot reset adapter\n");
2442		asr_release_dma(sc);
2443		return (EIO);
2444	}
2445	status = &sc->ha_statusmem->status;
2446	if (ASR_getStatus(sc) == NULL) {
2447		device_printf(dev, "could not initialize hardware\n");
2448		asr_release_dma(sc);
2449		return(ENODEV);
2450	}
2451	sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2452	sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2453	sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2454	sc->ha_SystemTable.IopState = status->IopState;
2455	sc->ha_SystemTable.MessengerType = status->MessengerType;
2456	sc->ha_SystemTable.InboundMessageFrameSize = status->InboundMFrameSize;
2457	sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow =
2458	    (U32)(sc->ha_Base + I2O_REG_TOFIFO);	/* XXX 64-bit */
2459
2460	if (!asr_pci_map_int(dev, (void *)sc)) {
2461		device_printf(dev, "could not map interrupt\n");
2462		asr_release_dma(sc);
2463		return(ENXIO);
2464	}
2465
2466	/* Adjust the maximim inbound count */
2467	if (((sc->ha_QueueSize =
2468	    I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) >
2469	    MAX_INBOUND) || (sc->ha_QueueSize == 0)) {
2470		sc->ha_QueueSize = MAX_INBOUND;
2471	}
2472
2473	/* Adjust the maximum outbound count */
2474	if (((sc->ha_Msgs_Count =
2475	    I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) >
2476	    MAX_OUTBOUND) || (sc->ha_Msgs_Count == 0)) {
2477		sc->ha_Msgs_Count = MAX_OUTBOUND;
2478	}
2479	if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2480		sc->ha_Msgs_Count = sc->ha_QueueSize;
2481	}
2482
2483	/* Adjust the maximum SG size to adapter */
2484	if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(status) <<
2485	    2)) > MAX_INBOUND_SIZE) {
2486		size = MAX_INBOUND_SIZE;
2487	}
2488	sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2489	  + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2490
2491	/*
2492	 *	Only do a bus/HBA reset on the first time through. On this
2493	 * first time through, we do not send a flush to the devices.
2494	 */
2495	if (ASR_init(sc) == 0) {
2496		struct BufferInfo {
2497			I2O_PARAM_RESULTS_LIST_HEADER	    Header;
2498			I2O_PARAM_READ_OPERATION_RESULT	    Read;
2499			I2O_DPT_EXEC_IOP_BUFFERS_SCALAR	    Info;
2500		} Buffer;
2501		PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2502#define FW_DEBUG_BLED_OFFSET 8
2503
2504		if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2505		    ASR_getParams(sc, 0, I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2506		    &Buffer, sizeof(struct BufferInfo))) != NULL) {
2507			sc->ha_blinkLED = FW_DEBUG_BLED_OFFSET +
2508			    I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info);
2509		}
2510		if (ASR_acquireLct(sc) == 0) {
2511			(void)ASR_acquireHrt(sc);
2512		}
2513	} else {
2514		device_printf(dev, "failed to initialize\n");
2515		asr_release_dma(sc);
2516		return(ENXIO);
2517	}
2518	/*
2519	 *	Add in additional probe responses for more channels. We
2520	 * are reusing the variable `target' for a channel loop counter.
2521	 * Done here because of we need both the acquireLct and
2522	 * acquireHrt data.
2523	 */
2524	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2525	    (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) {
2526		if (Device->le_type == I2O_UNKNOWN) {
2527			continue;
2528		}
2529		if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2530			if (Device->le_target > sc->ha_MaxId) {
2531				sc->ha_MaxId = Device->le_target;
2532			}
2533			if (Device->le_lun > sc->ha_MaxLun) {
2534				sc->ha_MaxLun = Device->le_lun;
2535			}
2536		}
2537		if (((Device->le_type & I2O_PORT) != 0)
2538		 && (Device->le_bus <= MAX_CHANNEL)) {
2539			/* Do not increase MaxId for efficiency */
2540			sc->ha_adapter_target[Device->le_bus] =
2541			    Device->le_target;
2542		}
2543	}
2544
2545	/*
2546	 *	Print the HBA model number as inquired from the card.
2547	 */
2548
2549	device_printf(dev, " ");
2550
2551	if ((iq = (struct scsi_inquiry_data *)malloc(
2552	    sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO)) !=
2553	    NULL) {
2554		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2555		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2556		int					posted = 0;
2557
2558		Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message;
2559		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2560		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2561
2562		I2O_MESSAGE_FRAME_setVersionOffset(
2563		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_VERSION_11 |
2564		    (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2565		    - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4));
2566		I2O_MESSAGE_FRAME_setMessageSize(
2567		    (PI2O_MESSAGE_FRAME)Message_Ptr,
2568		    (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2569		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) /
2570		    sizeof(U32));
2571		I2O_MESSAGE_FRAME_setInitiatorAddress(
2572		    (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2573		I2O_MESSAGE_FRAME_setFunction(
2574		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2575		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode(
2576		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2577		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2578		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2579		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2580		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2581		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2582		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2583		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2584		    DPT_ORGANIZATION_ID);
2585		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2586		Message_Ptr->CDB[0] = INQUIRY;
2587		Message_Ptr->CDB[4] =
2588		    (unsigned char)sizeof(struct scsi_inquiry_data);
2589		if (Message_Ptr->CDB[4] == 0) {
2590			Message_Ptr->CDB[4] = 255;
2591		}
2592
2593		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2594		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2595		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2596		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2597		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2598
2599		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2600		  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2601		  sizeof(struct scsi_inquiry_data));
2602		SG(&(Message_Ptr->SGL), 0,
2603		  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2604		  iq, sizeof(struct scsi_inquiry_data));
2605		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2606
2607		if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2608			printf (" ");
2609			ASR_prstring (iq->vendor, 8);
2610			++posted;
2611		}
2612		if (iq->product[0] && (iq->product[0] != ' ')) {
2613			printf (" ");
2614			ASR_prstring (iq->product, 16);
2615			++posted;
2616		}
2617		if (iq->revision[0] && (iq->revision[0] != ' ')) {
2618			printf (" FW Rev. ");
2619			ASR_prstring (iq->revision, 4);
2620			++posted;
2621		}
2622		free(iq, M_TEMP);
2623		if (posted) {
2624			printf (",");
2625		}
2626	}
2627	printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2628	  (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2629
2630	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2631		struct cam_devq	  * devq;
2632		int		    QueueSize = sc->ha_QueueSize;
2633
2634		if (QueueSize > MAX_INBOUND) {
2635			QueueSize = MAX_INBOUND;
2636		}
2637
2638		/*
2639		 *	Create the device queue for our SIM(s).
2640		 */
2641		if ((devq = cam_simq_alloc(QueueSize)) == NULL) {
2642			continue;
2643		}
2644
2645		/*
2646		 *	Construct our first channel SIM entry
2647		 */
2648		sc->ha_sim[bus] = cam_sim_alloc(asr_action, asr_poll, "asr", sc,
2649						unit, &Giant,
2650						1, QueueSize, devq);
2651		if (sc->ha_sim[bus] == NULL) {
2652			continue;
2653		}
2654
2655		if (xpt_bus_register(sc->ha_sim[bus], dev, bus) != CAM_SUCCESS){
2656			cam_sim_free(sc->ha_sim[bus],
2657			  /*free_devq*/TRUE);
2658			sc->ha_sim[bus] = NULL;
2659			continue;
2660		}
2661
2662		if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2663		    cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2664		    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2665			xpt_bus_deregister( cam_sim_path(sc->ha_sim[bus]));
2666			cam_sim_free(sc->ha_sim[bus], /*free_devq*/TRUE);
2667			sc->ha_sim[bus] = NULL;
2668			continue;
2669		}
2670	}
2671
2672	/*
2673	 *	Generate the device node information
2674	 */
2675	sc->ha_devt = make_dev(&asr_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
2676			       "asr%d", unit);
2677	if (sc->ha_devt != NULL)
2678		(void)make_dev_alias(sc->ha_devt, "rdpti%d", unit);
2679	sc->ha_devt->si_drv1 = sc;
2680	return(0);
2681} /* asr_attach */
2682
2683static void
2684asr_poll(struct cam_sim *sim)
2685{
2686	asr_intr(cam_sim_softc(sim));
2687} /* asr_poll */
2688
2689static void
2690asr_action(struct cam_sim *sim, union ccb  *ccb)
2691{
2692	struct Asr_softc *sc;
2693
2694	debug_asr_printf("asr_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb,
2695			 ccb->ccb_h.func_code);
2696
2697	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2698
2699	ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2700
2701	switch ((int)ccb->ccb_h.func_code) {
2702
2703	/* Common cases first */
2704	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2705	{
2706		struct Message {
2707			char M[MAX_INBOUND_SIZE];
2708		} Message;
2709		PI2O_MESSAGE_FRAME   Message_Ptr;
2710
2711		/* Reject incoming commands while we are resetting the card */
2712		if (sc->ha_in_reset != HA_OPERATIONAL) {
2713			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2714			if (sc->ha_in_reset >= HA_OFF_LINE) {
2715				/* HBA is now off-line */
2716				ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2717			} else {
2718				/* HBA currently resetting, try again later. */
2719				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2720			}
2721			debug_asr_cmd_printf (" e\n");
2722			xpt_done(ccb);
2723			debug_asr_cmd_printf (" q\n");
2724			break;
2725		}
2726		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2727			printf(
2728			  "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2729			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2730			  ccb->csio.cdb_io.cdb_bytes[0],
2731			  cam_sim_bus(sim),
2732			  ccb->ccb_h.target_id,
2733			  ccb->ccb_h.target_lun);
2734		}
2735		debug_asr_cmd_printf("(%d,%d,%d,%d)", cam_sim_unit(sim),
2736				     cam_sim_bus(sim), ccb->ccb_h.target_id,
2737				     ccb->ccb_h.target_lun);
2738		debug_asr_dump_ccb(ccb);
2739
2740		if ((Message_Ptr = ASR_init_message((union asr_ccb *)ccb,
2741		  (PI2O_MESSAGE_FRAME)&Message)) != NULL) {
2742			debug_asr_cmd2_printf ("TID=%x:\n",
2743			  PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2744			    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2745			debug_asr_cmd2_dump_message(Message_Ptr);
2746			debug_asr_cmd1_printf (" q");
2747
2748			if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
2749				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2750				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2751				debug_asr_cmd_printf (" E\n");
2752				xpt_done(ccb);
2753			}
2754			debug_asr_cmd_printf(" Q\n");
2755			break;
2756		}
2757		/*
2758		 *	We will get here if there is no valid TID for the device
2759		 * referenced in the scsi command packet.
2760		 */
2761		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2762		ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2763		debug_asr_cmd_printf (" B\n");
2764		xpt_done(ccb);
2765		break;
2766	}
2767
2768	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
2769		/* Rese HBA device ... */
2770		asr_hbareset (sc);
2771		ccb->ccb_h.status = CAM_REQ_CMP;
2772		xpt_done(ccb);
2773		break;
2774
2775#if (defined(REPORT_LUNS))
2776	case REPORT_LUNS:
2777#endif
2778	case XPT_ABORT:			/* Abort the specified CCB */
2779		/* XXX Implement */
2780		ccb->ccb_h.status = CAM_REQ_INVALID;
2781		xpt_done(ccb);
2782		break;
2783
2784	case XPT_SET_TRAN_SETTINGS:
2785		/* XXX Implement */
2786		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2787		xpt_done(ccb);
2788		break;
2789
2790	case XPT_GET_TRAN_SETTINGS:
2791	/* Get default/user set transfer settings for the target */
2792	{
2793		struct	ccb_trans_settings *cts = &(ccb->cts);
2794		struct ccb_trans_settings_scsi *scsi =
2795		    &cts->proto_specific.scsi;
2796		struct ccb_trans_settings_spi *spi =
2797		    &cts->xport_specific.spi;
2798
2799		if (cts->type == CTS_TYPE_USER_SETTINGS) {
2800			cts->protocol = PROTO_SCSI;
2801			cts->protocol_version = SCSI_REV_2;
2802			cts->transport = XPORT_SPI;
2803			cts->transport_version = 2;
2804
2805			scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2806			spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2807			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2808			spi->sync_period = 6; /* 40MHz */
2809			spi->sync_offset = 15;
2810			spi->valid = CTS_SPI_VALID_SYNC_RATE
2811				   | CTS_SPI_VALID_SYNC_OFFSET
2812				   | CTS_SPI_VALID_BUS_WIDTH
2813				   | CTS_SPI_VALID_DISC;
2814			scsi->valid = CTS_SCSI_VALID_TQ;
2815
2816			ccb->ccb_h.status = CAM_REQ_CMP;
2817		} else {
2818			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2819		}
2820		xpt_done(ccb);
2821		break;
2822	}
2823
2824	case XPT_CALC_GEOMETRY:
2825	{
2826		struct	  ccb_calc_geometry *ccg;
2827		u_int32_t size_mb;
2828		u_int32_t secs_per_cylinder;
2829
2830		ccg = &(ccb->ccg);
2831		size_mb = ccg->volume_size
2832			/ ((1024L * 1024L) / ccg->block_size);
2833
2834		if (size_mb > 4096) {
2835			ccg->heads = 255;
2836			ccg->secs_per_track = 63;
2837		} else if (size_mb > 2048) {
2838			ccg->heads = 128;
2839			ccg->secs_per_track = 63;
2840		} else if (size_mb > 1024) {
2841			ccg->heads = 65;
2842			ccg->secs_per_track = 63;
2843		} else {
2844			ccg->heads = 64;
2845			ccg->secs_per_track = 32;
2846		}
2847		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2848		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2849		ccb->ccb_h.status = CAM_REQ_CMP;
2850		xpt_done(ccb);
2851		break;
2852	}
2853
2854	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
2855		ASR_resetBus (sc, cam_sim_bus(sim));
2856		ccb->ccb_h.status = CAM_REQ_CMP;
2857		xpt_done(ccb);
2858		break;
2859
2860	case XPT_TERM_IO:		/* Terminate the I/O process */
2861		/* XXX Implement */
2862		ccb->ccb_h.status = CAM_REQ_INVALID;
2863		xpt_done(ccb);
2864		break;
2865
2866	case XPT_PATH_INQ:		/* Path routing inquiry */
2867	{
2868		struct ccb_pathinq *cpi = &(ccb->cpi);
2869
2870		cpi->version_num = 1; /* XXX??? */
2871		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2872		cpi->target_sprt = 0;
2873		/* Not necessary to reset bus, done by HDM initialization */
2874		cpi->hba_misc = PIM_NOBUSRESET;
2875		cpi->hba_eng_cnt = 0;
2876		cpi->max_target = sc->ha_MaxId;
2877		cpi->max_lun = sc->ha_MaxLun;
2878		cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
2879		cpi->bus_id = cam_sim_bus(sim);
2880		cpi->base_transfer_speed = 3300;
2881		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2882		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
2883		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2884		cpi->unit_number = cam_sim_unit(sim);
2885		cpi->ccb_h.status = CAM_REQ_CMP;
2886                cpi->transport = XPORT_SPI;
2887                cpi->transport_version = 2;
2888                cpi->protocol = PROTO_SCSI;
2889                cpi->protocol_version = SCSI_REV_2;
2890		xpt_done(ccb);
2891		break;
2892	}
2893	default:
2894		ccb->ccb_h.status = CAM_REQ_INVALID;
2895		xpt_done(ccb);
2896		break;
2897	}
2898} /* asr_action */
2899
2900/*
2901 * Handle processing of current CCB as pointed to by the Status.
2902 */
2903static int
2904asr_intr(Asr_softc_t *sc)
2905{
2906	int processed;
2907
2908	for(processed = 0; asr_get_status(sc) & Mask_InterruptsDisabled;
2909	    processed = 1) {
2910		union asr_ccb			   *ccb;
2911		u_int				    dsc;
2912		U32				    ReplyOffset;
2913		PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
2914
2915		if (((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)
2916		 && ((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)) {
2917			break;
2918		}
2919		Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
2920		  - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
2921		/*
2922		 * We do not need any (optional byteswapping) method access to
2923		 * the Initiator context field.
2924		 */
2925		ccb = (union asr_ccb *)(long)
2926		  I2O_MESSAGE_FRAME_getInitiatorContext64(
2927		    &(Reply->StdReplyFrame.StdMessageFrame));
2928		if (I2O_MESSAGE_FRAME_getMsgFlags(
2929		  &(Reply->StdReplyFrame.StdMessageFrame))
2930		  & I2O_MESSAGE_FLAGS_FAIL) {
2931			I2O_UTIL_NOP_MESSAGE	Message;
2932			PI2O_UTIL_NOP_MESSAGE	Message_Ptr;
2933			U32			MessageOffset;
2934
2935			MessageOffset = (u_long)
2936			  I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
2937			    (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
2938			/*
2939			 *  Get the Original Message Frame's address, and get
2940			 * it's Transaction Context into our space. (Currently
2941			 * unused at original authorship, but better to be
2942			 * safe than sorry). Straight copy means that we
2943			 * need not concern ourselves with the (optional
2944			 * byteswapping) method access.
2945			 */
2946			Reply->StdReplyFrame.TransactionContext =
2947			    bus_space_read_4(sc->ha_frame_btag,
2948			    sc->ha_frame_bhandle, MessageOffset +
2949			    offsetof(I2O_SINGLE_REPLY_MESSAGE_FRAME,
2950			    TransactionContext));
2951			/*
2952			 *	For 64 bit machines, we need to reconstruct the
2953			 * 64 bit context.
2954			 */
2955			ccb = (union asr_ccb *)(long)
2956			  I2O_MESSAGE_FRAME_getInitiatorContext64(
2957			    &(Reply->StdReplyFrame.StdMessageFrame));
2958			/*
2959			 * Unique error code for command failure.
2960			 */
2961			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
2962			  &(Reply->StdReplyFrame), (u_int16_t)-2);
2963			/*
2964			 *  Modify the message frame to contain a NOP and
2965			 * re-issue it to the controller.
2966			 */
2967			Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
2968			    &Message, sizeof(I2O_UTIL_NOP_MESSAGE));
2969#if (I2O_UTIL_NOP != 0)
2970				I2O_MESSAGE_FRAME_setFunction (
2971				  &(Message_Ptr->StdMessageFrame),
2972				  I2O_UTIL_NOP);
2973#endif
2974			/*
2975			 *  Copy the packet out to the Original Message
2976			 */
2977			asr_set_frame(sc, Message_Ptr, MessageOffset,
2978				      sizeof(I2O_UTIL_NOP_MESSAGE));
2979			/*
2980			 *  Issue the NOP
2981			 */
2982			asr_set_ToFIFO(sc, MessageOffset);
2983		}
2984
2985		/*
2986		 *	Asynchronous command with no return requirements,
2987		 * and a generic handler for immunity against odd error
2988		 * returns from the adapter.
2989		 */
2990		if (ccb == NULL) {
2991			/*
2992			 * Return Reply so that it can be used for the
2993			 * next command
2994			 */
2995			asr_set_FromFIFO(sc, ReplyOffset);
2996			continue;
2997		}
2998
2999		/* Welease Wadjah! (and stop timeouts) */
3000		ASR_ccbRemove (sc, ccb);
3001
3002		dsc = I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
3003		    &(Reply->StdReplyFrame));
3004		ccb->csio.scsi_status = dsc & I2O_SCSI_DEVICE_DSC_MASK;
3005		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3006		switch (dsc) {
3007
3008		case I2O_SCSI_DSC_SUCCESS:
3009			ccb->ccb_h.status |= CAM_REQ_CMP;
3010			break;
3011
3012		case I2O_SCSI_DSC_CHECK_CONDITION:
3013			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR |
3014			    CAM_AUTOSNS_VALID;
3015			break;
3016
3017		case I2O_SCSI_DSC_BUSY:
3018			/* FALLTHRU */
3019		case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3020			/* FALLTHRU */
3021		case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3022			/* FALLTHRU */
3023		case I2O_SCSI_HBA_DSC_BUS_BUSY:
3024			ccb->ccb_h.status |= CAM_SCSI_BUSY;
3025			break;
3026
3027		case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3028			ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3029			break;
3030
3031		case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3032			/* FALLTHRU */
3033		case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3034			/* FALLTHRU */
3035		case I2O_SCSI_HBA_DSC_LUN_INVALID:
3036			/* FALLTHRU */
3037		case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3038			ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3039			break;
3040
3041		case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3042			/* FALLTHRU */
3043		case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3044			ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3045			break;
3046
3047		default:
3048			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3049			break;
3050		}
3051		if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3052			ccb->csio.resid -=
3053			  I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3054			    Reply);
3055		}
3056
3057		/* Sense data in reply packet */
3058		if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3059			u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3060
3061			if (size) {
3062				if (size > sizeof(ccb->csio.sense_data)) {
3063					size = sizeof(ccb->csio.sense_data);
3064				}
3065				if (size > I2O_SCSI_SENSE_DATA_SZ) {
3066					size = I2O_SCSI_SENSE_DATA_SZ;
3067				}
3068				if ((ccb->csio.sense_len)
3069				 && (size > ccb->csio.sense_len)) {
3070					size = ccb->csio.sense_len;
3071				}
3072				if (size < ccb->csio.sense_len) {
3073					ccb->csio.sense_resid =
3074					    ccb->csio.sense_len - size;
3075				} else {
3076					ccb->csio.sense_resid = 0;
3077				}
3078				bzero(&(ccb->csio.sense_data),
3079				    sizeof(ccb->csio.sense_data));
3080				bcopy(Reply->SenseData,
3081				      &(ccb->csio.sense_data), size);
3082			}
3083		}
3084
3085		/*
3086		 * Return Reply so that it can be used for the next command
3087		 * since we have no more need for it now
3088		 */
3089		asr_set_FromFIFO(sc, ReplyOffset);
3090
3091		if (ccb->ccb_h.path) {
3092			xpt_done ((union ccb *)ccb);
3093		} else {
3094			wakeup (ccb);
3095		}
3096	}
3097	return (processed);
3098} /* asr_intr */
3099
3100#undef QueueSize	/* Grrrr */
3101#undef SG_Size		/* Grrrr */
3102
3103/*
3104 *	Meant to be included at the bottom of asr.c !!!
3105 */
3106
3107/*
3108 *	Included here as hard coded. Done because other necessary include
3109 *	files utilize C++ comment structures which make them a nuisance to
3110 *	included here just to pick up these three typedefs.
3111 */
3112typedef U32   DPT_TAG_T;
3113typedef U32   DPT_MSG_T;
3114typedef U32   DPT_RTN_T;
3115
3116#undef SCSI_RESET	/* Conflicts with "scsi/scsiconf.h" defintion */
3117#include	"dev/asr/osd_unix.h"
3118
3119#define	asr_unit(dev)	  dev2unit(dev)
3120
3121static u_int8_t ASR_ctlr_held;
3122
3123static int
3124asr_open(struct cdev *dev, int32_t flags, int32_t ifmt, struct thread *td)
3125{
3126	int		 s;
3127	int		 error;
3128
3129	if (dev->si_drv1 == NULL) {
3130		return (ENODEV);
3131	}
3132	s = splcam ();
3133	if (ASR_ctlr_held) {
3134		error = EBUSY;
3135	} else if ((error = priv_check(td, PRIV_DRIVER)) == 0) {
3136		++ASR_ctlr_held;
3137	}
3138	splx(s);
3139	return (error);
3140} /* asr_open */
3141
3142static int
3143asr_close(struct cdev *dev, int flags, int ifmt, struct thread *td)
3144{
3145
3146	ASR_ctlr_held = 0;
3147	return (0);
3148} /* asr_close */
3149
3150
3151/*-------------------------------------------------------------------------*/
3152/*		      Function ASR_queue_i				   */
3153/*-------------------------------------------------------------------------*/
3154/* The Parameters Passed To This Function Are :				   */
3155/*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
3156/*     PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command	   */
3157/*	I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure	   */
3158/*									   */
3159/* This Function Will Take The User Request Packet And Convert It To An	   */
3160/* I2O MSG And Send It Off To The Adapter.				   */
3161/*									   */
3162/* Return : 0 For OK, Error Code Otherwise				   */
3163/*-------------------------------------------------------------------------*/
3164static int
3165ASR_queue_i(Asr_softc_t	*sc, PI2O_MESSAGE_FRAME	Packet)
3166{
3167	union asr_ccb				   * ccb;
3168	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply;
3169	PI2O_MESSAGE_FRAME			     Message_Ptr;
3170	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply_Ptr;
3171	int					     MessageSizeInBytes;
3172	int					     ReplySizeInBytes;
3173	int					     error;
3174	int					     s;
3175	/* Scatter Gather buffer list */
3176	struct ioctlSgList_S {
3177		SLIST_ENTRY(ioctlSgList_S) link;
3178		caddr_t			   UserSpace;
3179		I2O_FLAGS_COUNT		   FlagsCount;
3180		char			   KernelSpace[sizeof(long)];
3181	}					   * elm;
3182	/* Generates a `first' entry */
3183	SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3184
3185	if (ASR_getBlinkLedCode(sc)) {
3186		debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3187		  ASR_getBlinkLedCode(sc));
3188		return (EIO);
3189	}
3190	/* Copy in the message into a local allocation */
3191	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (
3192	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3193		debug_usr_cmd_printf (
3194		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3195		return (ENOMEM);
3196	}
3197	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3198	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3199		free(Message_Ptr, M_TEMP);
3200		debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3201		return (error);
3202	}
3203	/* Acquire information to determine type of packet */
3204	MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3205	/* The offset of the reply information within the user packet */
3206	Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3207	  + MessageSizeInBytes);
3208
3209	/* Check if the message is a synchronous initialization command */
3210	s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3211	free(Message_Ptr, M_TEMP);
3212	switch (s) {
3213
3214	case I2O_EXEC_IOP_RESET:
3215	{	U32 status;
3216
3217		status = ASR_resetIOP(sc);
3218		ReplySizeInBytes = sizeof(status);
3219		debug_usr_cmd_printf ("resetIOP done\n");
3220		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3221		  ReplySizeInBytes));
3222	}
3223
3224	case I2O_EXEC_STATUS_GET:
3225	{	PI2O_EXEC_STATUS_GET_REPLY status;
3226
3227		status = &sc->ha_statusmem->status;
3228		if (ASR_getStatus(sc) == NULL) {
3229			debug_usr_cmd_printf ("getStatus failed\n");
3230			return (ENXIO);
3231		}
3232		ReplySizeInBytes = sizeof(status);
3233		debug_usr_cmd_printf ("getStatus done\n");
3234		return (copyout ((caddr_t)status, (caddr_t)Reply,
3235		  ReplySizeInBytes));
3236	}
3237
3238	case I2O_EXEC_OUTBOUND_INIT:
3239	{	U32 status;
3240
3241		status = ASR_initOutBound(sc);
3242		ReplySizeInBytes = sizeof(status);
3243		debug_usr_cmd_printf ("intOutBound done\n");
3244		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3245		  ReplySizeInBytes));
3246	}
3247	}
3248
3249	/* Determine if the message size is valid */
3250	if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3251	 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3252		debug_usr_cmd_printf ("Packet size %d incorrect\n",
3253		  MessageSizeInBytes);
3254		return (EINVAL);
3255	}
3256
3257	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (MessageSizeInBytes,
3258	  M_TEMP, M_WAITOK)) == NULL) {
3259		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3260		  MessageSizeInBytes);
3261		return (ENOMEM);
3262	}
3263	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3264	  MessageSizeInBytes)) != 0) {
3265		free(Message_Ptr, M_TEMP);
3266		debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3267		  MessageSizeInBytes, error);
3268		return (error);
3269	}
3270
3271	/* Check the size of the reply frame, and start constructing */
3272
3273	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3274	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3275		free(Message_Ptr, M_TEMP);
3276		debug_usr_cmd_printf (
3277		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3278		return (ENOMEM);
3279	}
3280	if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3281	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3282		free(Reply_Ptr, M_TEMP);
3283		free(Message_Ptr, M_TEMP);
3284		debug_usr_cmd_printf (
3285		  "Failed to copy in reply frame, errno=%d\n",
3286		  error);
3287		return (error);
3288	}
3289	ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3290	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3291	free(Reply_Ptr, M_TEMP);
3292	if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3293		free(Message_Ptr, M_TEMP);
3294		debug_usr_cmd_printf (
3295		  "Failed to copy in reply frame[%d], errno=%d\n",
3296		  ReplySizeInBytes, error);
3297		return (EINVAL);
3298	}
3299
3300	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3301	  ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3302	    ? ReplySizeInBytes : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3303	  M_TEMP, M_WAITOK)) == NULL) {
3304		free(Message_Ptr, M_TEMP);
3305		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3306		  ReplySizeInBytes);
3307		return (ENOMEM);
3308	}
3309	(void)ASR_fillMessage((void *)Reply_Ptr, ReplySizeInBytes);
3310	Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3311	  = Message_Ptr->InitiatorContext;
3312	Reply_Ptr->StdReplyFrame.TransactionContext
3313	  = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3314	I2O_MESSAGE_FRAME_setMsgFlags(
3315	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3316	  I2O_MESSAGE_FRAME_getMsgFlags(
3317	    &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3318	      | I2O_MESSAGE_FLAGS_REPLY);
3319
3320	/* Check if the message is a special case command */
3321	switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3322	case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3323		if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3324		  Message_Ptr) & 0xF0) >> 2)) {
3325			free(Message_Ptr, M_TEMP);
3326			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3327			  &(Reply_Ptr->StdReplyFrame),
3328			  (ASR_setSysTab(sc) != CAM_REQ_CMP));
3329			I2O_MESSAGE_FRAME_setMessageSize(
3330			  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3331			  sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3332			error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3333			  ReplySizeInBytes);
3334			free(Reply_Ptr, M_TEMP);
3335			return (error);
3336		}
3337	}
3338
3339	/* Deal in the general case */
3340	/* First allocate and optionally copy in each scatter gather element */
3341	SLIST_INIT(&sgList);
3342	if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3343		PI2O_SGE_SIMPLE_ELEMENT sg;
3344
3345		/*
3346		 *	since this code is reused in several systems, code
3347		 * efficiency is greater by using a shift operation rather
3348		 * than a divide by sizeof(u_int32_t).
3349		 */
3350		sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3351		  + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3352		    >> 2));
3353		while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3354		  + MessageSizeInBytes)) {
3355			caddr_t v;
3356			int	len;
3357
3358			if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3359			 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3360				error = EINVAL;
3361				break;
3362			}
3363			len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3364			debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3365			  sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3366			  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3367				Message_Ptr) & 0xF0) >> 2)),
3368			  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3369
3370			if ((elm = (struct ioctlSgList_S *)malloc (
3371			  sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3372			  M_TEMP, M_WAITOK)) == NULL) {
3373				debug_usr_cmd_printf (
3374				  "Failed to allocate SG[%d]\n", len);
3375				error = ENOMEM;
3376				break;
3377			}
3378			SLIST_INSERT_HEAD(&sgList, elm, link);
3379			elm->FlagsCount = sg->FlagsCount;
3380			elm->UserSpace = (caddr_t)
3381			  (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3382			v = elm->KernelSpace;
3383			/* Copy in outgoing data (DIR bit could be invalid) */
3384			if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3385			  != 0) {
3386				break;
3387			}
3388			/*
3389			 *	If the buffer is not contiguous, lets
3390			 * break up the scatter/gather entries.
3391			 */
3392			while ((len > 0)
3393			 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3394			  (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3395				int next, base, span;
3396
3397				span = 0;
3398				next = base = KVTOPHYS(v);
3399				I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3400				  base);
3401
3402				/* How far can we go physically contiguously */
3403				while ((len > 0) && (base == next)) {
3404					int size;
3405
3406					next = trunc_page(base) + PAGE_SIZE;
3407					size = next - base;
3408					if (size > len) {
3409						size = len;
3410					}
3411					span += size;
3412					v += size;
3413					len -= size;
3414					base = KVTOPHYS(v);
3415				}
3416
3417				/* Construct the Flags */
3418				I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3419				  span);
3420				{
3421					int flags = I2O_FLAGS_COUNT_getFlags(
3422					  &(elm->FlagsCount));
3423					/* Any remaining length? */
3424					if (len > 0) {
3425					    flags &=
3426						~(I2O_SGL_FLAGS_END_OF_BUFFER
3427						 | I2O_SGL_FLAGS_LAST_ELEMENT);
3428					}
3429					I2O_FLAGS_COUNT_setFlags(
3430					  &(sg->FlagsCount), flags);
3431				}
3432
3433				debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3434				  sg - (PI2O_SGE_SIMPLE_ELEMENT)
3435				    ((char *)Message_Ptr
3436				  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3437					Message_Ptr) & 0xF0) >> 2)),
3438				  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3439				  span);
3440				if (len <= 0) {
3441					break;
3442				}
3443
3444				/*
3445				 * Incrementing requires resizing of the
3446				 * packet, and moving up the existing SG
3447				 * elements.
3448				 */
3449				++sg;
3450				MessageSizeInBytes += sizeof(*sg);
3451				I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3452				  I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3453				  + (sizeof(*sg) / sizeof(U32)));
3454				{
3455					PI2O_MESSAGE_FRAME NewMessage_Ptr;
3456
3457					if ((NewMessage_Ptr
3458					  = (PI2O_MESSAGE_FRAME)
3459					    malloc (MessageSizeInBytes,
3460					     M_TEMP, M_WAITOK)) == NULL) {
3461						debug_usr_cmd_printf (
3462						  "Failed to acquire frame[%d] memory\n",
3463						  MessageSizeInBytes);
3464						error = ENOMEM;
3465						break;
3466					}
3467					span = ((caddr_t)sg)
3468					     - (caddr_t)Message_Ptr;
3469					bcopy(Message_Ptr,NewMessage_Ptr, span);
3470					bcopy((caddr_t)(sg-1),
3471					  ((caddr_t)NewMessage_Ptr) + span,
3472					  MessageSizeInBytes - span);
3473					free(Message_Ptr, M_TEMP);
3474					sg = (PI2O_SGE_SIMPLE_ELEMENT)
3475					  (((caddr_t)NewMessage_Ptr) + span);
3476					Message_Ptr = NewMessage_Ptr;
3477				}
3478			}
3479			if ((error)
3480			 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3481			  & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3482				break;
3483			}
3484			++sg;
3485		}
3486		if (error) {
3487			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3488				SLIST_REMOVE_HEAD(&sgList, link);
3489				free(elm, M_TEMP);
3490			}
3491			free(Reply_Ptr, M_TEMP);
3492			free(Message_Ptr, M_TEMP);
3493			return (error);
3494		}
3495	}
3496
3497	debug_usr_cmd_printf ("Inbound: ");
3498	debug_usr_cmd_dump_message(Message_Ptr);
3499
3500	/* Send the command */
3501	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
3502		/* Free up in-kernel buffers */
3503		while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3504			SLIST_REMOVE_HEAD(&sgList, link);
3505			free(elm, M_TEMP);
3506		}
3507		free(Reply_Ptr, M_TEMP);
3508		free(Message_Ptr, M_TEMP);
3509		return (ENOMEM);
3510	}
3511
3512	/*
3513	 * We do not need any (optional byteswapping) method access to
3514	 * the Initiator context field.
3515	 */
3516	I2O_MESSAGE_FRAME_setInitiatorContext64(
3517	  (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3518
3519	(void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3520
3521	free(Message_Ptr, M_TEMP);
3522
3523	/*
3524	 * Wait for the board to report a finished instruction.
3525	 */
3526	s = splcam();
3527	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3528		if (ASR_getBlinkLedCode(sc)) {
3529			/* Reset Adapter */
3530			printf ("asr%d: Blink LED 0x%x resetting adapter\n",
3531			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3532			  ASR_getBlinkLedCode(sc));
3533			if (ASR_reset (sc) == ENXIO) {
3534				/* Command Cleanup */
3535				ASR_ccbRemove(sc, ccb);
3536			}
3537			splx(s);
3538			/* Free up in-kernel buffers */
3539			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3540				SLIST_REMOVE_HEAD(&sgList, link);
3541				free(elm, M_TEMP);
3542			}
3543			free(Reply_Ptr, M_TEMP);
3544			asr_free_ccb(ccb);
3545			return (EIO);
3546		}
3547		/* Check every second for BlinkLed */
3548		/* There is no PRICAM, but outwardly PRIBIO is functional */
3549		tsleep(ccb, PRIBIO, "asr", hz);
3550	}
3551	splx(s);
3552
3553	debug_usr_cmd_printf ("Outbound: ");
3554	debug_usr_cmd_dump_message(Reply_Ptr);
3555
3556	I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3557	  &(Reply_Ptr->StdReplyFrame),
3558	  (ccb->ccb_h.status != CAM_REQ_CMP));
3559
3560	if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3561	  - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3562		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3563		  ccb->csio.dxfer_len - ccb->csio.resid);
3564	}
3565	if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3566	 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3567	 - I2O_SCSI_SENSE_DATA_SZ))) {
3568		int size = ReplySizeInBytes
3569		  - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3570		  - I2O_SCSI_SENSE_DATA_SZ;
3571
3572		if (size > sizeof(ccb->csio.sense_data)) {
3573			size = sizeof(ccb->csio.sense_data);
3574		}
3575		if (size < ccb->csio.sense_len) {
3576			ccb->csio.sense_resid = ccb->csio.sense_len - size;
3577		} else {
3578			ccb->csio.sense_resid = 0;
3579		}
3580		bzero(&(ccb->csio.sense_data), sizeof(ccb->csio.sense_data));
3581		bcopy(&(ccb->csio.sense_data), Reply_Ptr->SenseData, size);
3582		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3583		    Reply_Ptr, size);
3584	}
3585
3586	/* Free up in-kernel buffers */
3587	while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3588		/* Copy out as necessary */
3589		if ((error == 0)
3590		/* DIR bit considered `valid', error due to ignorance works */
3591		 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3592		  & I2O_SGL_FLAGS_DIR) == 0)) {
3593			error = copyout((caddr_t)(elm->KernelSpace),
3594			  elm->UserSpace,
3595			  I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3596		}
3597		SLIST_REMOVE_HEAD(&sgList, link);
3598		free(elm, M_TEMP);
3599	}
3600	if (error == 0) {
3601	/* Copy reply frame to user space */
3602		error = copyout((caddr_t)Reply_Ptr, (caddr_t)Reply,
3603				ReplySizeInBytes);
3604	}
3605	free(Reply_Ptr, M_TEMP);
3606	asr_free_ccb(ccb);
3607
3608	return (error);
3609} /* ASR_queue_i */
3610
3611/*----------------------------------------------------------------------*/
3612/*			    Function asr_ioctl			       */
3613/*----------------------------------------------------------------------*/
3614/* The parameters passed to this function are :				*/
3615/*     dev  : Device number.						*/
3616/*     cmd  : Ioctl Command						*/
3617/*     data : User Argument Passed In.					*/
3618/*     flag : Mode Parameter						*/
3619/*     proc : Process Parameter						*/
3620/*									*/
3621/* This function is the user interface into this adapter driver		*/
3622/*									*/
3623/* Return : zero if OK, error code if not				*/
3624/*----------------------------------------------------------------------*/
3625
3626static int
3627asr_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
3628{
3629	Asr_softc_t	*sc = dev->si_drv1;
3630	int		i, error = 0;
3631#ifdef ASR_IOCTL_COMPAT
3632	int		j;
3633#endif /* ASR_IOCTL_COMPAT */
3634
3635	if (sc != NULL)
3636	switch(cmd) {
3637
3638	case DPT_SIGNATURE:
3639#ifdef ASR_IOCTL_COMPAT
3640#if (dsDescription_size != 50)
3641	case DPT_SIGNATURE + ((50 - dsDescription_size) << 16):
3642#endif
3643		if (cmd & 0xFFFF0000) {
3644			bcopy(&ASR_sig, data, sizeof(dpt_sig_S));
3645			return (0);
3646		}
3647	/* Traditional version of the ioctl interface */
3648	case DPT_SIGNATURE & 0x0000FFFF:
3649#endif
3650		return (copyout((caddr_t)(&ASR_sig), *((caddr_t *)data),
3651				sizeof(dpt_sig_S)));
3652
3653	/* Traditional version of the ioctl interface */
3654	case DPT_CTRLINFO & 0x0000FFFF:
3655	case DPT_CTRLINFO: {
3656		struct {
3657			u_int16_t length;
3658			u_int16_t drvrHBAnum;
3659			u_int32_t baseAddr;
3660			u_int16_t blinkState;
3661			u_int8_t  pciBusNum;
3662			u_int8_t  pciDeviceNum;
3663			u_int16_t hbaFlags;
3664			u_int16_t Interrupt;
3665			u_int32_t reserved1;
3666			u_int32_t reserved2;
3667			u_int32_t reserved3;
3668		} CtlrInfo;
3669
3670		bzero(&CtlrInfo, sizeof(CtlrInfo));
3671		CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t);
3672		CtlrInfo.drvrHBAnum = asr_unit(dev);
3673		CtlrInfo.baseAddr = sc->ha_Base;
3674		i = ASR_getBlinkLedCode (sc);
3675		if (i == -1)
3676			i = 0;
3677
3678		CtlrInfo.blinkState = i;
3679		CtlrInfo.pciBusNum = sc->ha_pciBusNum;
3680		CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum;
3681#define	FLG_OSD_PCI_VALID 0x0001
3682#define	FLG_OSD_DMA	  0x0002
3683#define	FLG_OSD_I2O	  0x0004
3684		CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID|FLG_OSD_DMA|FLG_OSD_I2O;
3685		CtlrInfo.Interrupt = sc->ha_irq;
3686#ifdef ASR_IOCTL_COMPAT
3687		if (cmd & 0xffff0000)
3688			bcopy(&CtlrInfo, data, sizeof(CtlrInfo));
3689		else
3690#endif /* ASR_IOCTL_COMPAT */
3691		error = copyout(&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo));
3692	}	return (error);
3693
3694	/* Traditional version of the ioctl interface */
3695	case DPT_SYSINFO & 0x0000FFFF:
3696	case DPT_SYSINFO: {
3697		sysInfo_S	Info;
3698#ifdef ASR_IOCTL_COMPAT
3699		char	      * cp;
3700		/* Kernel Specific ptok `hack' */
3701#define		ptok(a) ((char *)(uintptr_t)(a) + KERNBASE)
3702
3703		bzero(&Info, sizeof(Info));
3704
3705		/* Appears I am the only person in the Kernel doing this */
3706		outb (0x70, 0x12);
3707		i = inb(0x71);
3708		j = i >> 4;
3709		if (i == 0x0f) {
3710			outb (0x70, 0x19);
3711			j = inb (0x71);
3712		}
3713		Info.drive0CMOS = j;
3714
3715		j = i & 0x0f;
3716		if (i == 0x0f) {
3717			outb (0x70, 0x1a);
3718			j = inb (0x71);
3719		}
3720		Info.drive1CMOS = j;
3721
3722		Info.numDrives = *((char *)ptok(0x475));
3723#else /* ASR_IOCTL_COMPAT */
3724		bzero(&Info, sizeof(Info));
3725#endif /* ASR_IOCTL_COMPAT */
3726
3727		Info.processorFamily = ASR_sig.dsProcessorFamily;
3728#if defined(__i386__)
3729		switch (cpu) {
3730		case CPU_386SX: case CPU_386:
3731			Info.processorType = PROC_386; break;
3732		case CPU_486SX: case CPU_486:
3733			Info.processorType = PROC_486; break;
3734		case CPU_586:
3735			Info.processorType = PROC_PENTIUM; break;
3736		case CPU_686:
3737			Info.processorType = PROC_SEXIUM; break;
3738		}
3739#endif
3740
3741		Info.osType = OS_BSDI_UNIX;
3742		Info.osMajorVersion = osrelease[0] - '0';
3743		Info.osMinorVersion = osrelease[2] - '0';
3744		/* Info.osRevision = 0; */
3745		/* Info.osSubRevision = 0; */
3746		Info.busType = SI_PCI_BUS;
3747		Info.flags = SI_OSversionValid|SI_BusTypeValid|SI_NO_SmartROM;
3748
3749#ifdef ASR_IOCTL_COMPAT
3750		Info.flags |= SI_CMOS_Valid | SI_NumDrivesValid;
3751		/* Go Out And Look For I2O SmartROM */
3752		for(j = 0xC8000; j < 0xE0000; j += 2048) {
3753			int k;
3754
3755			cp = ptok(j);
3756			if (*((unsigned short *)cp) != 0xAA55) {
3757				continue;
3758			}
3759			j += (cp[2] * 512) - 2048;
3760			if ((*((u_long *)(cp + 6))
3761			  != ('S' + (' ' * 256) + (' ' * 65536L)))
3762			 || (*((u_long *)(cp + 10))
3763			  != ('I' + ('2' * 256) + ('0' * 65536L)))) {
3764				continue;
3765			}
3766			cp += 0x24;
3767			for (k = 0; k < 64; ++k) {
3768				if (*((unsigned short *)cp)
3769				 == (' ' + ('v' * 256))) {
3770					break;
3771				}
3772			}
3773			if (k < 64) {
3774				Info.smartROMMajorVersion
3775				    = *((unsigned char *)(cp += 4)) - '0';
3776				Info.smartROMMinorVersion
3777				    = *((unsigned char *)(cp += 2));
3778				Info.smartROMRevision
3779				    = *((unsigned char *)(++cp));
3780				Info.flags |= SI_SmartROMverValid;
3781				Info.flags &= ~SI_NO_SmartROM;
3782				break;
3783			}
3784		}
3785		/* Get The Conventional Memory Size From CMOS */
3786		outb (0x70, 0x16);
3787		j = inb (0x71);
3788		j <<= 8;
3789		outb (0x70, 0x15);
3790		j |= inb(0x71);
3791		Info.conventionalMemSize = j;
3792
3793		/* Get The Extended Memory Found At Power On From CMOS */
3794		outb (0x70, 0x31);
3795		j = inb (0x71);
3796		j <<= 8;
3797		outb (0x70, 0x30);
3798		j |= inb(0x71);
3799		Info.extendedMemSize = j;
3800		Info.flags |= SI_MemorySizeValid;
3801
3802		/* Copy Out The Info Structure To The User */
3803		if (cmd & 0xFFFF0000)
3804			bcopy(&Info, data, sizeof(Info));
3805		else
3806#endif /* ASR_IOCTL_COMPAT */
3807		error = copyout(&Info, *(caddr_t *)data, sizeof(Info));
3808		return (error); }
3809
3810		/* Get The BlinkLED State */
3811	case DPT_BLINKLED:
3812		i = ASR_getBlinkLedCode (sc);
3813		if (i == -1)
3814			i = 0;
3815#ifdef ASR_IOCTL_COMPAT
3816		if (cmd & 0xffff0000)
3817			bcopy(&i, data, sizeof(i));
3818		else
3819#endif /* ASR_IOCTL_COMPAT */
3820		error = copyout(&i, *(caddr_t *)data, sizeof(i));
3821		break;
3822
3823		/* Send an I2O command */
3824	case I2OUSRCMD:
3825		return (ASR_queue_i(sc, *((PI2O_MESSAGE_FRAME *)data)));
3826
3827		/* Reset and re-initialize the adapter */
3828	case I2ORESETCMD:
3829		return (ASR_reset(sc));
3830
3831		/* Rescan the LCT table and resynchronize the information */
3832	case I2ORESCANCMD:
3833		return (ASR_rescan(sc));
3834	}
3835	return (EINVAL);
3836} /* asr_ioctl */
3837