asr.c revision 155286
1/*-
2 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
3 * Copyright (c) 2000-2001 Adaptec Corporation
4 * All rights reserved.
5 *
6 * TERMS AND CONDITIONS OF USE
7 *
8 * Redistribution and use in source form, with or without modification, are
9 * permitted provided that redistributions of source code must retain the
10 * above copyright notice, this list of conditions and the following disclaimer.
11 *
12 * This software is provided `as is' by Adaptec and any express or implied
13 * warranties, including, but not limited to, the implied warranties of
14 * merchantability and fitness for a particular purpose, are disclaimed. In no
15 * event shall Adaptec be liable for any direct, indirect, incidental, special,
16 * exemplary or consequential damages (including, but not limited to,
17 * procurement of substitute goods or services; loss of use, data, or profits;
18 * or business interruptions) however caused and on any theory of liability,
19 * whether in contract, strict liability, or tort (including negligence or
20 * otherwise) arising in any way out of the use of this driver software, even
21 * if advised of the possibility of such damage.
22 *
23 * SCSI I2O host adapter driver
24 *
25 *	V1.10 2004/05/05 scottl@freebsd.org
26 *		- Massive cleanup of the driver to remove dead code and
27 *		  non-conformant style.
28 *		- Removed most i386-specific code to make it more portable.
29 *		- Converted to the bus_space API.
30 *	V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
31 *		- The 2000S and 2005S do not initialize on some machines,
32 *		  increased timeout to 255ms from 50ms for the StatusGet
33 *		  command.
34 *	V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
35 *		- I knew this one was too good to be true. The error return
36 *		  on ioctl commands needs to be compared to CAM_REQ_CMP, not
37 *		  to the bit masked status.
38 *	V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
39 *		- The 2005S that was supported is affectionately called the
40 *		  Conjoined BAR Firmware. In order to support RAID-5 in a
41 *		  16MB low-cost configuration, Firmware was forced to go
42 *		  to a Split BAR Firmware. This requires a separate IOP and
43 *		  Messaging base address.
44 *	V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
45 *		- Handle support for 2005S Zero Channel RAID solution.
46 *		- System locked up if the Adapter locked up. Do not try
47 *		  to send other commands if the resetIOP command fails. The
48 *		  fail outstanding command discovery loop was flawed as the
49 *		  removal of the command from the list prevented discovering
50 *		  all the commands.
51 *		- Comment changes to clarify driver.
52 *		- SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
53 *		- We do not use the AC_FOUND_DEV event because of I2O.
54 *		  Removed asr_async.
55 *	V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
56 *			 lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
57 *		- Removed support for PM1554, PM2554 and PM2654 in Mode-0
58 *		  mode as this is confused with competitor adapters in run
59 *		  mode.
60 *		- critical locking needed in ASR_ccbAdd and ASR_ccbRemove
61 *		  to prevent operating system panic.
62 *		- moved default major number to 154 from 97.
63 *	V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
64 *		- The controller is not actually an ASR (Adaptec SCSI RAID)
65 *		  series that is visible, it's more of an internal code name.
66 *		  remove any visible references within reason for now.
67 *		- bus_ptr->LUN was not correctly zeroed when initially
68 *		  allocated causing a possible panic of the operating system
69 *		  during boot.
70 *	V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
71 *		- Code always fails for ASR_getTid affecting performance.
72 *		- initiated a set of changes that resulted from a formal
73 *		  code inspection by Mark_Salyzyn@adaptec.com,
74 *		  George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
75 *		  Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
76 *		  Their findings were focussed on the LCT & TID handler, and
77 *		  all resulting changes were to improve code readability,
78 *		  consistency or have a positive effect on performance.
79 *	V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
80 *		- Passthrough returned an incorrect error.
81 *		- Passthrough did not migrate the intrinsic scsi layer wakeup
82 *		  on command completion.
83 *		- generate control device nodes using make_dev and delete_dev.
84 *		- Performance affected by TID caching reallocing.
85 *		- Made suggested changes by Justin_Gibbs@adaptec.com
86 *			- use splcam instead of splbio.
87 *			- use cam_imask instead of bio_imask.
88 *			- use u_int8_t instead of u_char.
89 *			- use u_int16_t instead of u_short.
90 *			- use u_int32_t instead of u_long where appropriate.
91 *			- use 64 bit context handler instead of 32 bit.
92 *			- create_ccb should only allocate the worst case
93 *			  requirements for the driver since CAM may evolve
94 *			  making union ccb much larger than needed here.
95 *			  renamed create_ccb to asr_alloc_ccb.
96 *			- go nutz justifying all debug prints as macros
97 *			  defined at the top and remove unsightly ifdefs.
98 *			- INLINE STATIC viewed as confusing. Historically
99 *			  utilized to affect code performance and debug
100 *			  issues in OS, Compiler or OEM specific situations.
101 *	V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
102 *		- Ported from FreeBSD 2.2.X DPT I2O driver.
103 *			changed struct scsi_xfer to union ccb/struct ccb_hdr
104 *			changed variable name xs to ccb
105 *			changed struct scsi_link to struct cam_path
106 *			changed struct scsibus_data to struct cam_sim
107 *			stopped using fordriver for holding on to the TID
108 *			use proprietary packet creation instead of scsi_inquire
109 *			CAM layer sends synchronize commands.
110 */
111
112#include <sys/cdefs.h>
113#include <sys/param.h>	/* TRUE=1 and FALSE=0 defined here */
114#include <sys/kernel.h>
115#include <sys/module.h>
116#include <sys/systm.h>
117#include <sys/malloc.h>
118#include <sys/conf.h>
119#include <sys/ioccom.h>
120#include <sys/proc.h>
121#include <sys/bus.h>
122#include <machine/resource.h>
123#include <machine/bus.h>
124#include <sys/rman.h>
125#include <sys/stat.h>
126#include <sys/bus_dma.h>
127
128#include <cam/cam.h>
129#include <cam/cam_ccb.h>
130#include <cam/cam_sim.h>
131#include <cam/cam_xpt_sim.h>
132#include <cam/cam_xpt_periph.h>
133
134#include <cam/scsi/scsi_all.h>
135#include <cam/scsi/scsi_message.h>
136
137#include <vm/vm.h>
138#include <vm/pmap.h>
139
140#if defined(__i386__)
141#include "opt_asr.h"
142#include <i386/include/cputypes.h>
143
144#ifndef BURN_BRIDGES
145#if defined(ASR_COMPAT)
146#define ASR_IOCTL_COMPAT
147#endif /* ASR_COMPAT */
148#endif /* !BURN_BRIDGES */
149
150#elif defined(__alpha__)
151#include <alpha/include/pmap.h>
152#endif
153#include <machine/vmparam.h>
154
155#include <dev/pci/pcivar.h>
156#include <dev/pci/pcireg.h>
157
158#define	osdSwap4(x) ((u_long)ntohl((u_long)(x)))
159#define	KVTOPHYS(x) vtophys(x)
160#include	<dev/asr/dptalign.h>
161#include	<dev/asr/i2oexec.h>
162#include	<dev/asr/i2obscsi.h>
163#include	<dev/asr/i2odpt.h>
164#include	<dev/asr/i2oadptr.h>
165
166#include	<dev/asr/sys_info.h>
167
168__FBSDID("$FreeBSD: head/sys/dev/asr/asr.c 155286 2006-02-04 08:45:19Z scottl $");
169
170#define	ASR_VERSION	1
171#define	ASR_REVISION	'1'
172#define	ASR_SUBREVISION '0'
173#define	ASR_MONTH	5
174#define	ASR_DAY		5
175#define	ASR_YEAR	(2004 - 1980)
176
177/*
178 *	Debug macros to reduce the unsightly ifdefs
179 */
180#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
181static __inline void
182debug_asr_message(PI2O_MESSAGE_FRAME message)
183{
184	u_int32_t * pointer = (u_int32_t *)message;
185	u_int32_t   length = I2O_MESSAGE_FRAME_getMessageSize(message);
186	u_int32_t   counter = 0;
187
188	while (length--) {
189		printf("%08lx%c", (u_long)*(pointer++),
190		  (((++counter & 7) == 0) || (length == 0)) ? '\n' : ' ');
191	}
192}
193#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
194
195#ifdef DEBUG_ASR
196  /* Breaks on none STDC based compilers :-( */
197#define debug_asr_printf(fmt,args...)	printf(fmt, ##args)
198#define debug_asr_dump_message(message)	debug_asr_message(message)
199#define debug_asr_print_path(ccb)	xpt_print_path(ccb->ccb_h.path);
200#else /* DEBUG_ASR */
201#define debug_asr_printf(fmt,args...)
202#define debug_asr_dump_message(message)
203#define debug_asr_print_path(ccb)
204#endif /* DEBUG_ASR */
205
206/*
207 *	If DEBUG_ASR_CMD is defined:
208 *		0 - Display incoming SCSI commands
209 *		1 - add in a quick character before queueing.
210 *		2 - add in outgoing message frames.
211 */
212#if (defined(DEBUG_ASR_CMD))
213#define debug_asr_cmd_printf(fmt,args...)     printf(fmt,##args)
214static __inline void
215debug_asr_dump_ccb(union ccb *ccb)
216{
217	u_int8_t	*cp = (unsigned char *)&(ccb->csio.cdb_io);
218	int		len = ccb->csio.cdb_len;
219
220	while (len) {
221		debug_asr_cmd_printf (" %02x", *(cp++));
222		--len;
223	}
224}
225#if (DEBUG_ASR_CMD > 0)
226#define debug_asr_cmd1_printf		       debug_asr_cmd_printf
227#else
228#define debug_asr_cmd1_printf(fmt,args...)
229#endif
230#if (DEBUG_ASR_CMD > 1)
231#define debug_asr_cmd2_printf			debug_asr_cmd_printf
232#define debug_asr_cmd2_dump_message(message)	debug_asr_message(message)
233#else
234#define debug_asr_cmd2_printf(fmt,args...)
235#define debug_asr_cmd2_dump_message(message)
236#endif
237#else /* DEBUG_ASR_CMD */
238#define debug_asr_cmd_printf(fmt,args...)
239#define debug_asr_dump_ccb(ccb)
240#define debug_asr_cmd1_printf(fmt,args...)
241#define debug_asr_cmd2_printf(fmt,args...)
242#define debug_asr_cmd2_dump_message(message)
243#endif /* DEBUG_ASR_CMD */
244
245#if (defined(DEBUG_ASR_USR_CMD))
246#define debug_usr_cmd_printf(fmt,args...)   printf(fmt,##args)
247#define debug_usr_cmd_dump_message(message) debug_usr_message(message)
248#else /* DEBUG_ASR_USR_CMD */
249#define debug_usr_cmd_printf(fmt,args...)
250#define debug_usr_cmd_dump_message(message)
251#endif /* DEBUG_ASR_USR_CMD */
252
253#ifdef ASR_IOCTL_COMPAT
254#define	dsDescription_size 46	/* Snug as a bug in a rug */
255#endif /* ASR_IOCTL_COMPAT */
256
257#include "dev/asr/dptsig.h"
258
259static dpt_sig_S ASR_sig = {
260	{ 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
261	PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
262	OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, ADF_ALL_SC5,
263	0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
264	ASR_MONTH, ASR_DAY, ASR_YEAR,
265/*	 01234567890123456789012345678901234567890123456789	< 50 chars */
266	"Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
267	/*		 ^^^^^ asr_attach alters these to match OS */
268};
269
270/* Configuration Definitions */
271
272#define	SG_SIZE		 58	/* Scatter Gather list Size		 */
273#define	MAX_TARGET_ID	 126	/* Maximum Target ID supported		 */
274#define	MAX_LUN		 255	/* Maximum LUN Supported		 */
275#define	MAX_CHANNEL	 7	/* Maximum Channel # Supported by driver */
276#define	MAX_INBOUND	 2000	/* Max CCBs, Also Max Queue Size	 */
277#define	MAX_OUTBOUND	 256	/* Maximum outbound frames/adapter	 */
278#define	MAX_INBOUND_SIZE 512	/* Maximum inbound frame size		 */
279#define	MAX_MAP		 4194304L /* Maximum mapping size of IOP	 */
280				/* Also serves as the minimum map for	 */
281				/* the 2005S zero channel RAID product	 */
282
283/* I2O register set */
284#define	I2O_REG_STATUS		0x30
285#define	I2O_REG_MASK		0x34
286#define	I2O_REG_TOFIFO		0x40
287#define	I2O_REG_FROMFIFO	0x44
288
289#define	Mask_InterruptsDisabled	0x08
290
291/*
292 * A MIX of performance and space considerations for TID lookups
293 */
294typedef u_int16_t tid_t;
295
296typedef struct {
297	u_int32_t size;		/* up to MAX_LUN    */
298	tid_t	  TID[1];
299} lun2tid_t;
300
301typedef struct {
302	u_int32_t   size;	/* up to MAX_TARGET */
303	lun2tid_t * LUN[1];
304} target2lun_t;
305
306/*
307 *	To ensure that we only allocate and use the worst case ccb here, lets
308 *	make our own local ccb union. If asr_alloc_ccb is utilized for another
309 *	ccb type, ensure that you add the additional structures into our local
310 *	ccb union. To ensure strict type checking, we will utilize the local
311 *	ccb definition wherever possible.
312 */
313union asr_ccb {
314	struct ccb_hdr	    ccb_h;  /* For convenience */
315	struct ccb_scsiio   csio;
316	struct ccb_setasync csa;
317};
318
319/**************************************************************************
320** ASR Host Adapter structure - One Structure For Each Host Adapter That **
321**  Is Configured Into The System.  The Structure Supplies Configuration **
322**  Information, Status Info, Queue Info And An Active CCB List Pointer. **
323***************************************************************************/
324
325typedef struct Asr_softc {
326	device_t		ha_dev;
327	u_int16_t		ha_irq;
328	u_long			ha_Base;       /* base port for each board */
329	bus_size_t		ha_blinkLED;
330	bus_space_handle_t	ha_i2o_bhandle;
331	bus_space_tag_t		ha_i2o_btag;
332	bus_space_handle_t	ha_frame_bhandle;
333	bus_space_tag_t		ha_frame_btag;
334	I2O_IOP_ENTRY		ha_SystemTable;
335	LIST_HEAD(,ccb_hdr)	ha_ccb;	       /* ccbs in use		   */
336
337	bus_dma_tag_t		ha_parent_dmat;
338	bus_dma_tag_t		ha_status_dmat;
339	bus_dmamap_t		ha_status_dmamap;
340	u_int32_t	      * ha_status;
341	u_int32_t		ha_status_phys;
342	struct cam_path	      * ha_path[MAX_CHANNEL+1];
343	struct cam_sim	      * ha_sim[MAX_CHANNEL+1];
344	struct resource	      * ha_mem_res;
345	struct resource	      * ha_mes_res;
346	struct resource	      * ha_irq_res;
347	void		      * ha_intr;
348	PI2O_LCT		ha_LCT;	       /* Complete list of devices */
349#define le_type	  IdentityTag[0]
350#define I2O_BSA	    0x20
351#define I2O_FCA	    0x40
352#define I2O_SCSI    0x00
353#define I2O_PORT    0x80
354#define I2O_UNKNOWN 0x7F
355#define le_bus	  IdentityTag[1]
356#define le_target IdentityTag[2]
357#define le_lun	  IdentityTag[3]
358	target2lun_t	      * ha_targets[MAX_CHANNEL+1];
359	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
360	u_long			ha_Msgs_Phys;
361
362	u_int8_t		ha_in_reset;
363#define HA_OPERATIONAL	    0
364#define HA_IN_RESET	    1
365#define HA_OFF_LINE	    2
366#define HA_OFF_LINE_RECOVERY 3
367	/* Configuration information */
368	/* The target id maximums we take */
369	u_int8_t		ha_MaxBus;     /* Maximum bus */
370	u_int8_t		ha_MaxId;      /* Maximum target ID */
371	u_int8_t		ha_MaxLun;     /* Maximum target LUN */
372	u_int8_t		ha_SgSize;     /* Max SG elements */
373	u_int8_t		ha_pciBusNum;
374	u_int8_t		ha_pciDeviceNum;
375	u_int8_t		ha_adapter_target[MAX_CHANNEL+1];
376	u_int16_t		ha_QueueSize;  /* Max outstanding commands */
377	u_int16_t		ha_Msgs_Count;
378
379	/* Links into other parents and HBAs */
380	struct Asr_softc      * ha_next;       /* HBA list */
381	struct cdev *ha_devt;
382} Asr_softc_t;
383
384static Asr_softc_t * Asr_softc;
385
386/*
387 *	Prototypes of the routines we have in this object.
388 */
389
390/* I2O HDM interface */
391static int	asr_probe(device_t dev);
392static int	asr_attach(device_t dev);
393
394static int	asr_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag,
395			  struct thread *td);
396static int	asr_open(struct cdev *dev, int32_t flags, int32_t ifmt,
397			 struct thread *td);
398static int	asr_close(struct cdev *dev, int flags, int ifmt, struct thread *td);
399static int	asr_intr(Asr_softc_t *sc);
400static void	asr_timeout(void *arg);
401static int	ASR_init(Asr_softc_t *sc);
402static int	ASR_acquireLct(Asr_softc_t *sc);
403static int	ASR_acquireHrt(Asr_softc_t *sc);
404static void	asr_action(struct cam_sim *sim, union ccb *ccb);
405static void	asr_poll(struct cam_sim *sim);
406static int	ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message);
407
408/*
409 *	Here is the auto-probe structure used to nest our tests appropriately
410 *	during the startup phase of the operating system.
411 */
412static device_method_t asr_methods[] = {
413	DEVMETHOD(device_probe,	 asr_probe),
414	DEVMETHOD(device_attach, asr_attach),
415	{ 0, 0 }
416};
417
418static driver_t asr_driver = {
419	"asr",
420	asr_methods,
421	sizeof(Asr_softc_t)
422};
423
424static devclass_t asr_devclass;
425DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
426
427/*
428 * devsw for asr hba driver
429 *
430 * only ioctl is used. the sd driver provides all other access.
431 */
432static struct cdevsw asr_cdevsw = {
433	.d_version =	D_VERSION,
434	.d_flags =	D_NEEDGIANT,
435	.d_open =	asr_open,
436	.d_close =	asr_close,
437	.d_ioctl =	asr_ioctl,
438	.d_name =	"asr",
439};
440
441/* I2O support routines */
442
443static __inline u_int32_t
444asr_get_FromFIFO(Asr_softc_t *sc)
445{
446	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
447				 I2O_REG_FROMFIFO));
448}
449
450static __inline u_int32_t
451asr_get_ToFIFO(Asr_softc_t *sc)
452{
453	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
454				 I2O_REG_TOFIFO));
455}
456
457static __inline u_int32_t
458asr_get_intr(Asr_softc_t *sc)
459{
460	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
461				 I2O_REG_MASK));
462}
463
464static __inline u_int32_t
465asr_get_status(Asr_softc_t *sc)
466{
467	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
468				 I2O_REG_STATUS));
469}
470
471static __inline void
472asr_set_FromFIFO(Asr_softc_t *sc, u_int32_t val)
473{
474	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_FROMFIFO,
475			  val);
476}
477
478static __inline void
479asr_set_ToFIFO(Asr_softc_t *sc, u_int32_t val)
480{
481	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_TOFIFO,
482			  val);
483}
484
485static __inline void
486asr_set_intr(Asr_softc_t *sc, u_int32_t val)
487{
488	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_MASK,
489			  val);
490}
491
492static __inline void
493asr_set_frame(Asr_softc_t *sc, void *frame, u_int32_t offset, int len)
494{
495	bus_space_write_region_4(sc->ha_frame_btag, sc->ha_frame_bhandle,
496				 offset, (u_int32_t *)frame, len);
497}
498
499/*
500 *	Fill message with default.
501 */
502static PI2O_MESSAGE_FRAME
503ASR_fillMessage(void *Message, u_int16_t size)
504{
505	PI2O_MESSAGE_FRAME Message_Ptr;
506
507	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
508	bzero(Message_Ptr, size);
509	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
510	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
511	  (size + sizeof(U32) - 1) >> 2);
512	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
513	KASSERT(Message_Ptr != NULL, ("Message_Ptr == NULL"));
514	return (Message_Ptr);
515} /* ASR_fillMessage */
516
517#define	EMPTY_QUEUE (0xffffffff)
518
519static __inline U32
520ASR_getMessage(Asr_softc_t *sc)
521{
522	U32	MessageOffset;
523
524	MessageOffset = asr_get_ToFIFO(sc);
525	if (MessageOffset == EMPTY_QUEUE)
526		MessageOffset = asr_get_ToFIFO(sc);
527
528	return (MessageOffset);
529} /* ASR_getMessage */
530
531/* Issue a polled command */
532static U32
533ASR_initiateCp(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
534{
535	U32	Mask = -1L;
536	U32	MessageOffset;
537	u_int	Delay = 1500;
538
539	/*
540	 * ASR_initiateCp is only used for synchronous commands and will
541	 * be made more resiliant to adapter delays since commands like
542	 * resetIOP can cause the adapter to be deaf for a little time.
543	 */
544	while (((MessageOffset = ASR_getMessage(sc)) == EMPTY_QUEUE)
545	 && (--Delay != 0)) {
546		DELAY (10000);
547	}
548	if (MessageOffset != EMPTY_QUEUE) {
549		asr_set_frame(sc, Message, MessageOffset,
550			      I2O_MESSAGE_FRAME_getMessageSize(Message));
551		/*
552		 *	Disable the Interrupts
553		 */
554		Mask = asr_get_intr(sc);
555		asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
556		asr_set_ToFIFO(sc, MessageOffset);
557	}
558	return (Mask);
559} /* ASR_initiateCp */
560
561/*
562 *	Reset the adapter.
563 */
564static U32
565ASR_resetIOP(Asr_softc_t *sc)
566{
567	struct resetMessage {
568		I2O_EXEC_IOP_RESET_MESSAGE M;
569		U32			   R;
570	} Message;
571	PI2O_EXEC_IOP_RESET_MESSAGE	 Message_Ptr;
572	U32			       * Reply_Ptr;
573	U32				 Old;
574
575	/*
576	 *  Build up our copy of the Message.
577	 */
578	Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(&Message,
579	  sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
580	I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
581	/*
582	 *  Reset the Reply Status
583	 */
584	Reply_Ptr = sc->ha_status;
585	*Reply_Ptr = 0;
586	I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
587	    sc->ha_status_phys);
588	/*
589	 *	Send the Message out
590	 */
591	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
592	     0xffffffff) {
593		/*
594		 * Wait for a response (Poll), timeouts are dangerous if
595		 * the card is truly responsive. We assume response in 2s.
596		 */
597		u_int8_t Delay = 200;
598
599		while ((*Reply_Ptr == 0) && (--Delay != 0)) {
600			DELAY (10000);
601		}
602		/*
603		 *	Re-enable the interrupts.
604		 */
605		asr_set_intr(sc, Old);
606		KASSERT(*Reply_Ptr != 0, ("*Reply_Ptr == 0"));
607		return(*Reply_Ptr);
608	}
609	KASSERT(Old != -1L, ("Old == -1"));
610	return (0);
611} /* ASR_resetIOP */
612
613/*
614 *	Get the curent state of the adapter
615 */
616static PI2O_EXEC_STATUS_GET_REPLY
617ASR_getStatus(Asr_softc_t *sc, PI2O_EXEC_STATUS_GET_REPLY buffer)
618{
619	I2O_EXEC_STATUS_GET_MESSAGE	Message;
620	PI2O_EXEC_STATUS_GET_MESSAGE	Message_Ptr;
621	U32				Old;
622
623	/*
624	 *  Build up our copy of the Message.
625	 */
626	Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(&Message,
627	    sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
628	I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
629	    I2O_EXEC_STATUS_GET);
630	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
631	    KVTOPHYS((void *)buffer));
632	/* This one is a Byte Count */
633	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
634	    sizeof(I2O_EXEC_STATUS_GET_REPLY));
635	/*
636	 *  Reset the Reply Status
637	 */
638	bzero(buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
639	/*
640	 *	Send the Message out
641	 */
642	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
643	    0xffffffff) {
644		/*
645		 *	Wait for a response (Poll), timeouts are dangerous if
646		 * the card is truly responsive. We assume response in 50ms.
647		 */
648		u_int8_t Delay = 255;
649
650		while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
651			if (--Delay == 0) {
652				buffer = NULL;
653				break;
654			}
655			DELAY (1000);
656		}
657		/*
658		 *	Re-enable the interrupts.
659		 */
660		asr_set_intr(sc, Old);
661		return (buffer);
662	}
663	return (NULL);
664} /* ASR_getStatus */
665
666/*
667 *	Check if the device is a SCSI I2O HBA, and add it to the list.
668 */
669
670/*
671 * Probe for ASR controller.  If we find it, we will use it.
672 * virtual adapters.
673 */
674static int
675asr_probe(device_t dev)
676{
677	u_int32_t id;
678
679	id = (pci_get_device(dev) << 16) | pci_get_vendor(dev);
680	if ((id == 0xA5011044) || (id == 0xA5111044)) {
681		device_set_desc(dev, "Adaptec Caching SCSI RAID");
682		return (BUS_PROBE_DEFAULT);
683	}
684	return (ENXIO);
685} /* asr_probe */
686
687static __inline union asr_ccb *
688asr_alloc_ccb(Asr_softc_t *sc)
689{
690	union asr_ccb *new_ccb;
691
692	if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb),
693	  M_DEVBUF, M_WAITOK | M_ZERO)) != NULL) {
694		new_ccb->ccb_h.pinfo.priority = 1;
695		new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
696		new_ccb->ccb_h.spriv_ptr0 = sc;
697	}
698	return (new_ccb);
699} /* asr_alloc_ccb */
700
701static __inline void
702asr_free_ccb(union asr_ccb *free_ccb)
703{
704	free(free_ccb, M_DEVBUF);
705} /* asr_free_ccb */
706
707/*
708 *	Print inquiry data `carefully'
709 */
710static void
711ASR_prstring(u_int8_t *s, int len)
712{
713	while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
714		printf ("%c", *(s++));
715	}
716} /* ASR_prstring */
717
718/*
719 *	Send a message synchronously and without Interrupt to a ccb.
720 */
721static int
722ASR_queue_s(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message)
723{
724	int		s;
725	U32		Mask;
726	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
727
728	/*
729	 * We do not need any (optional byteswapping) method access to
730	 * the Initiator context field.
731	 */
732	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
733
734	/* Prevent interrupt service */
735	s = splcam ();
736	Mask = asr_get_intr(sc);
737	asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
738
739	if (ASR_queue(sc, Message) == EMPTY_QUEUE) {
740		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
741		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
742	}
743
744	/*
745	 * Wait for this board to report a finished instruction.
746	 */
747	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
748		(void)asr_intr (sc);
749	}
750
751	/* Re-enable Interrupts */
752	asr_set_intr(sc, Mask);
753	splx(s);
754
755	return (ccb->ccb_h.status);
756} /* ASR_queue_s */
757
758/*
759 *	Send a message synchronously to an Asr_softc_t.
760 */
761static int
762ASR_queue_c(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
763{
764	union asr_ccb	*ccb;
765	int		status;
766
767	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
768		return (CAM_REQUEUE_REQ);
769	}
770
771	status = ASR_queue_s (ccb, Message);
772
773	asr_free_ccb(ccb);
774
775	return (status);
776} /* ASR_queue_c */
777
778/*
779 *	Add the specified ccb to the active queue
780 */
781static __inline void
782ASR_ccbAdd(Asr_softc_t *sc, union asr_ccb *ccb)
783{
784	int s;
785
786	s = splcam();
787	LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
788	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
789		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
790			/*
791			 * RAID systems can take considerable time to
792			 * complete some commands given the large cache
793			 * flashes switching from write back to write thru.
794			 */
795			ccb->ccb_h.timeout = 6 * 60 * 1000;
796		}
797		ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
798		  (ccb->ccb_h.timeout * hz) / 1000);
799	}
800	splx(s);
801} /* ASR_ccbAdd */
802
803/*
804 *	Remove the specified ccb from the active queue.
805 */
806static __inline void
807ASR_ccbRemove(Asr_softc_t *sc, union asr_ccb *ccb)
808{
809	int s;
810
811	s = splcam();
812	untimeout(asr_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
813	LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
814	splx(s);
815} /* ASR_ccbRemove */
816
817/*
818 *	Fail all the active commands, so they get re-issued by the operating
819 *	system.
820 */
821static void
822ASR_failActiveCommands(Asr_softc_t *sc)
823{
824	struct ccb_hdr	*ccb;
825	int		s;
826
827	s = splcam();
828	/*
829	 *	We do not need to inform the CAM layer that we had a bus
830	 * reset since we manage it on our own, this also prevents the
831	 * SCSI_DELAY settling that would be required on other systems.
832	 * The `SCSI_DELAY' has already been handled by the card via the
833	 * acquisition of the LCT table while we are at CAM priority level.
834	 *  for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
835	 *	xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
836	 *  }
837	 */
838	while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) {
839		ASR_ccbRemove (sc, (union asr_ccb *)ccb);
840
841		ccb->status &= ~CAM_STATUS_MASK;
842		ccb->status |= CAM_REQUEUE_REQ;
843		/* Nothing Transfered */
844		((struct ccb_scsiio *)ccb)->resid
845		  = ((struct ccb_scsiio *)ccb)->dxfer_len;
846
847		if (ccb->path) {
848			xpt_done ((union ccb *)ccb);
849		} else {
850			wakeup (ccb);
851		}
852	}
853	splx(s);
854} /* ASR_failActiveCommands */
855
856/*
857 *	The following command causes the HBA to reset the specific bus
858 */
859static void
860ASR_resetBus(Asr_softc_t *sc, int bus)
861{
862	I2O_HBA_BUS_RESET_MESSAGE	Message;
863	I2O_HBA_BUS_RESET_MESSAGE	*Message_Ptr;
864	PI2O_LCT_ENTRY			Device;
865
866	Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(&Message,
867	  sizeof(I2O_HBA_BUS_RESET_MESSAGE));
868	I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
869	  I2O_HBA_BUS_RESET);
870	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
871	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
872	  ++Device) {
873		if (((Device->le_type & I2O_PORT) != 0)
874		 && (Device->le_bus == bus)) {
875			I2O_MESSAGE_FRAME_setTargetAddress(
876			  &Message_Ptr->StdMessageFrame,
877			  I2O_LCT_ENTRY_getLocalTID(Device));
878			/* Asynchronous command, with no expectations */
879			(void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
880			break;
881		}
882	}
883} /* ASR_resetBus */
884
885static __inline int
886ASR_getBlinkLedCode(Asr_softc_t *sc)
887{
888	U8	blink;
889
890	if (sc == NULL)
891		return (0);
892
893	blink = bus_space_read_1(sc->ha_frame_btag,
894				 sc->ha_frame_bhandle, sc->ha_blinkLED + 1);
895	if (blink != 0xBC)
896		return (0);
897
898	blink = bus_space_read_1(sc->ha_frame_btag,
899				 sc->ha_frame_bhandle, sc->ha_blinkLED);
900	return (blink);
901} /* ASR_getBlinkCode */
902
903/*
904 *	Determine the address of an TID lookup. Must be done at high priority
905 *	since the address can be changed by other threads of execution.
906 *
907 *	Returns NULL pointer if not indexible (but will attempt to generate
908 *	an index if `new_entry' flag is set to TRUE).
909 *
910 *	All addressible entries are to be guaranteed zero if never initialized.
911 */
912static tid_t *
913ASR_getTidAddress(Asr_softc_t *sc, int bus, int target, int lun, int new_entry)
914{
915	target2lun_t	*bus_ptr;
916	lun2tid_t	*target_ptr;
917	unsigned	new_size;
918
919	/*
920	 *	Validity checking of incoming parameters. More of a bound
921	 * expansion limit than an issue with the code dealing with the
922	 * values.
923	 *
924	 *	sc must be valid before it gets here, so that check could be
925	 * dropped if speed a critical issue.
926	 */
927	if ((sc == NULL)
928	 || (bus > MAX_CHANNEL)
929	 || (target > sc->ha_MaxId)
930	 || (lun > sc->ha_MaxLun)) {
931		debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
932		  (u_long)sc, bus, target, lun);
933		return (NULL);
934	}
935	/*
936	 *	See if there is an associated bus list.
937	 *
938	 *	for performance, allocate in size of BUS_CHUNK chunks.
939	 *	BUS_CHUNK must be a power of two. This is to reduce
940	 *	fragmentation effects on the allocations.
941	 */
942#define BUS_CHUNK 8
943	new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
944	if ((bus_ptr = sc->ha_targets[bus]) == NULL) {
945		/*
946		 *	Allocate a new structure?
947		 *		Since one element in structure, the +1
948		 *		needed for size has been abstracted.
949		 */
950		if ((new_entry == FALSE)
951		 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc (
952		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
953		    M_TEMP, M_WAITOK | M_ZERO))
954		   == NULL)) {
955			debug_asr_printf("failed to allocate bus list\n");
956			return (NULL);
957		}
958		bus_ptr->size = new_size + 1;
959	} else if (bus_ptr->size <= new_size) {
960		target2lun_t * new_bus_ptr;
961
962		/*
963		 *	Reallocate a new structure?
964		 *		Since one element in structure, the +1
965		 *		needed for size has been abstracted.
966		 */
967		if ((new_entry == FALSE)
968		 || ((new_bus_ptr = (target2lun_t *)malloc (
969		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
970		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
971			debug_asr_printf("failed to reallocate bus list\n");
972			return (NULL);
973		}
974		/*
975		 *	Copy the whole thing, safer, simpler coding
976		 * and not really performance critical at this point.
977		 */
978		bcopy(bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
979		    + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
980		sc->ha_targets[bus] = new_bus_ptr;
981		free(bus_ptr, M_TEMP);
982		bus_ptr = new_bus_ptr;
983		bus_ptr->size = new_size + 1;
984	}
985	/*
986	 *	We now have the bus list, lets get to the target list.
987	 *	Since most systems have only *one* lun, we do not allocate
988	 *	in chunks as above, here we allow one, then in chunk sizes.
989	 *	TARGET_CHUNK must be a power of two. This is to reduce
990	 *	fragmentation effects on the allocations.
991	 */
992#define TARGET_CHUNK 8
993	if ((new_size = lun) != 0) {
994		new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
995	}
996	if ((target_ptr = bus_ptr->LUN[target]) == NULL) {
997		/*
998		 *	Allocate a new structure?
999		 *		Since one element in structure, the +1
1000		 *		needed for size has been abstracted.
1001		 */
1002		if ((new_entry == FALSE)
1003		 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc (
1004		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1005		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
1006			debug_asr_printf("failed to allocate target list\n");
1007			return (NULL);
1008		}
1009		target_ptr->size = new_size + 1;
1010	} else if (target_ptr->size <= new_size) {
1011		lun2tid_t * new_target_ptr;
1012
1013		/*
1014		 *	Reallocate a new structure?
1015		 *		Since one element in structure, the +1
1016		 *		needed for size has been abstracted.
1017		 */
1018		if ((new_entry == FALSE)
1019		 || ((new_target_ptr = (lun2tid_t *)malloc (
1020		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1021		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
1022			debug_asr_printf("failed to reallocate target list\n");
1023			return (NULL);
1024		}
1025		/*
1026		 *	Copy the whole thing, safer, simpler coding
1027		 * and not really performance critical at this point.
1028		 */
1029		bcopy(target_ptr, new_target_ptr, sizeof(*target_ptr)
1030		    + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1031		bus_ptr->LUN[target] = new_target_ptr;
1032		free(target_ptr, M_TEMP);
1033		target_ptr = new_target_ptr;
1034		target_ptr->size = new_size + 1;
1035	}
1036	/*
1037	 *	Now, acquire the TID address from the LUN indexed list.
1038	 */
1039	return (&(target_ptr->TID[lun]));
1040} /* ASR_getTidAddress */
1041
1042/*
1043 *	Get a pre-existing TID relationship.
1044 *
1045 *	If the TID was never set, return (tid_t)-1.
1046 *
1047 *	should use mutex rather than spl.
1048 */
1049static __inline tid_t
1050ASR_getTid(Asr_softc_t *sc, int bus, int target, int lun)
1051{
1052	tid_t	*tid_ptr;
1053	int	s;
1054	tid_t	retval;
1055
1056	s = splcam();
1057	if (((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, FALSE)) == NULL)
1058	/* (tid_t)0 or (tid_t)-1 indicate no TID */
1059	 || (*tid_ptr == (tid_t)0)) {
1060		splx(s);
1061		return ((tid_t)-1);
1062	}
1063	retval = *tid_ptr;
1064	splx(s);
1065	return (retval);
1066} /* ASR_getTid */
1067
1068/*
1069 *	Set a TID relationship.
1070 *
1071 *	If the TID was not set, return (tid_t)-1.
1072 *
1073 *	should use mutex rather than spl.
1074 */
1075static __inline tid_t
1076ASR_setTid(Asr_softc_t *sc, int bus, int target, int lun, tid_t	TID)
1077{
1078	tid_t	*tid_ptr;
1079	int	s;
1080
1081	if (TID != (tid_t)-1) {
1082		if (TID == 0) {
1083			return ((tid_t)-1);
1084		}
1085		s = splcam();
1086		if ((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, TRUE))
1087		 == NULL) {
1088			splx(s);
1089			return ((tid_t)-1);
1090		}
1091		*tid_ptr = TID;
1092		splx(s);
1093	}
1094	return (TID);
1095} /* ASR_setTid */
1096
1097/*-------------------------------------------------------------------------*/
1098/*		      Function ASR_rescan				   */
1099/*-------------------------------------------------------------------------*/
1100/* The Parameters Passed To This Function Are :				   */
1101/*     Asr_softc_t *	 : HBA miniport driver's adapter data storage.	   */
1102/*									   */
1103/* This Function Will rescan the adapter and resynchronize any data	   */
1104/*									   */
1105/* Return : 0 For OK, Error Code Otherwise				   */
1106/*-------------------------------------------------------------------------*/
1107
1108static int
1109ASR_rescan(Asr_softc_t *sc)
1110{
1111	int bus;
1112	int error;
1113
1114	/*
1115	 * Re-acquire the LCT table and synchronize us to the adapter.
1116	 */
1117	if ((error = ASR_acquireLct(sc)) == 0) {
1118		error = ASR_acquireHrt(sc);
1119	}
1120
1121	if (error != 0) {
1122		return error;
1123	}
1124
1125	bus = sc->ha_MaxBus;
1126	/* Reset all existing cached TID lookups */
1127	do {
1128		int target, event = 0;
1129
1130		/*
1131		 *	Scan for all targets on this bus to see if they
1132		 * got affected by the rescan.
1133		 */
1134		for (target = 0; target <= sc->ha_MaxId; ++target) {
1135			int lun;
1136
1137			/* Stay away from the controller ID */
1138			if (target == sc->ha_adapter_target[bus]) {
1139				continue;
1140			}
1141			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1142				PI2O_LCT_ENTRY Device;
1143				tid_t	       TID = (tid_t)-1;
1144				tid_t	       LastTID;
1145
1146				/*
1147				 * See if the cached TID changed. Search for
1148				 * the device in our new LCT.
1149				 */
1150				for (Device = sc->ha_LCT->LCTEntry;
1151				  Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1152				   + I2O_LCT_getTableSize(sc->ha_LCT));
1153				  ++Device) {
1154					if ((Device->le_type != I2O_UNKNOWN)
1155					 && (Device->le_bus == bus)
1156					 && (Device->le_target == target)
1157					 && (Device->le_lun == lun)
1158					 && (I2O_LCT_ENTRY_getUserTID(Device)
1159					  == 0xFFF)) {
1160						TID = I2O_LCT_ENTRY_getLocalTID(
1161						  Device);
1162						break;
1163					}
1164				}
1165				/*
1166				 * Indicate to the OS that the label needs
1167				 * to be recalculated, or that the specific
1168				 * open device is no longer valid (Merde)
1169				 * because the cached TID changed.
1170				 */
1171				LastTID = ASR_getTid (sc, bus, target, lun);
1172				if (LastTID != TID) {
1173					struct cam_path * path;
1174
1175					if (xpt_create_path(&path,
1176					  /*periph*/NULL,
1177					  cam_sim_path(sc->ha_sim[bus]),
1178					  target, lun) != CAM_REQ_CMP) {
1179						if (TID == (tid_t)-1) {
1180							event |= AC_LOST_DEVICE;
1181						} else {
1182							event |= AC_INQ_CHANGED
1183							       | AC_GETDEV_CHANGED;
1184						}
1185					} else {
1186						if (TID == (tid_t)-1) {
1187							xpt_async(
1188							  AC_LOST_DEVICE,
1189							  path, NULL);
1190						} else if (LastTID == (tid_t)-1) {
1191							struct ccb_getdev ccb;
1192
1193							xpt_setup_ccb(
1194							  &(ccb.ccb_h),
1195							  path, /*priority*/5);
1196							xpt_async(
1197							  AC_FOUND_DEVICE,
1198							  path,
1199							  &ccb);
1200						} else {
1201							xpt_async(
1202							  AC_INQ_CHANGED,
1203							  path, NULL);
1204							xpt_async(
1205							  AC_GETDEV_CHANGED,
1206							  path, NULL);
1207						}
1208					}
1209				}
1210				/*
1211				 *	We have the option of clearing the
1212				 * cached TID for it to be rescanned, or to
1213				 * set it now even if the device never got
1214				 * accessed. We chose the later since we
1215				 * currently do not use the condition that
1216				 * the TID ever got cached.
1217				 */
1218				ASR_setTid (sc, bus, target, lun, TID);
1219			}
1220		}
1221		/*
1222		 *	The xpt layer can not handle multiple events at the
1223		 * same call.
1224		 */
1225		if (event & AC_LOST_DEVICE) {
1226			xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1227		}
1228		if (event & AC_INQ_CHANGED) {
1229			xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1230		}
1231		if (event & AC_GETDEV_CHANGED) {
1232			xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1233		}
1234	} while (--bus >= 0);
1235	return (error);
1236} /* ASR_rescan */
1237
1238/*-------------------------------------------------------------------------*/
1239/*		      Function ASR_reset				   */
1240/*-------------------------------------------------------------------------*/
1241/* The Parameters Passed To This Function Are :				   */
1242/*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
1243/*									   */
1244/* This Function Will reset the adapter and resynchronize any data	   */
1245/*									   */
1246/* Return : None							   */
1247/*-------------------------------------------------------------------------*/
1248
1249static int
1250ASR_reset(Asr_softc_t *sc)
1251{
1252	int s, retVal;
1253
1254	s = splcam();
1255	if ((sc->ha_in_reset == HA_IN_RESET)
1256	 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1257		splx (s);
1258		return (EBUSY);
1259	}
1260	/*
1261	 *	Promotes HA_OPERATIONAL to HA_IN_RESET,
1262	 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1263	 */
1264	++(sc->ha_in_reset);
1265	if (ASR_resetIOP(sc) == 0) {
1266		debug_asr_printf ("ASR_resetIOP failed\n");
1267		/*
1268		 *	We really need to take this card off-line, easier said
1269		 * than make sense. Better to keep retrying for now since if a
1270		 * UART cable is connected the blinkLEDs the adapter is now in
1271		 * a hard state requiring action from the monitor commands to
1272		 * the HBA to continue. For debugging waiting forever is a
1273		 * good thing. In a production system, however, one may wish
1274		 * to instead take the card off-line ...
1275		 */
1276		/* Wait Forever */
1277		while (ASR_resetIOP(sc) == 0);
1278	}
1279	retVal = ASR_init (sc);
1280	splx (s);
1281	if (retVal != 0) {
1282		debug_asr_printf ("ASR_init failed\n");
1283		sc->ha_in_reset = HA_OFF_LINE;
1284		return (ENXIO);
1285	}
1286	if (ASR_rescan (sc) != 0) {
1287		debug_asr_printf ("ASR_rescan failed\n");
1288	}
1289	ASR_failActiveCommands (sc);
1290	if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1291		printf ("asr%d: Brining adapter back on-line\n",
1292		  sc->ha_path[0]
1293		    ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1294		    : 0);
1295	}
1296	sc->ha_in_reset = HA_OPERATIONAL;
1297	return (0);
1298} /* ASR_reset */
1299
1300/*
1301 *	Device timeout handler.
1302 */
1303static void
1304asr_timeout(void *arg)
1305{
1306	union asr_ccb	*ccb = (union asr_ccb *)arg;
1307	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1308	int		s;
1309
1310	debug_asr_print_path(ccb);
1311	debug_asr_printf("timed out");
1312
1313	/*
1314	 *	Check if the adapter has locked up?
1315	 */
1316	if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1317		/* Reset Adapter */
1318		printf ("asr%d: Blink LED 0x%x resetting adapter\n",
1319		  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1320		if (ASR_reset (sc) == ENXIO) {
1321			/* Try again later */
1322			ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1323			  (caddr_t)ccb,
1324			  (ccb->ccb_h.timeout * hz) / 1000);
1325		}
1326		return;
1327	}
1328	/*
1329	 *	Abort does not function on the ASR card!!! Walking away from
1330	 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1331	 * our best bet, followed by a complete adapter reset if that fails.
1332	 */
1333	s = splcam();
1334	/* Check if we already timed out once to raise the issue */
1335	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1336		debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1337		if (ASR_reset (sc) == ENXIO) {
1338			ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1339			  (caddr_t)ccb,
1340			  (ccb->ccb_h.timeout * hz) / 1000);
1341		}
1342		splx(s);
1343		return;
1344	}
1345	debug_asr_printf ("\nresetting bus\n");
1346	/* If the BUS reset does not take, then an adapter reset is next! */
1347	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1348	ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1349	ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
1350	  (ccb->ccb_h.timeout * hz) / 1000);
1351	ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1352	xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1353	splx(s);
1354} /* asr_timeout */
1355
1356/*
1357 * send a message asynchronously
1358 */
1359static int
1360ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
1361{
1362	U32		MessageOffset;
1363	union asr_ccb	*ccb;
1364
1365	debug_asr_printf("Host Command Dump:\n");
1366	debug_asr_dump_message(Message);
1367
1368	ccb = (union asr_ccb *)(long)
1369	  I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1370
1371	if ((MessageOffset = ASR_getMessage(sc)) != EMPTY_QUEUE) {
1372		asr_set_frame(sc, Message, MessageOffset,
1373			      I2O_MESSAGE_FRAME_getMessageSize(Message));
1374		if (ccb) {
1375			ASR_ccbAdd (sc, ccb);
1376		}
1377		/* Post the command */
1378		asr_set_ToFIFO(sc, MessageOffset);
1379	} else {
1380		if (ASR_getBlinkLedCode(sc)) {
1381			/*
1382			 *	Unlikely we can do anything if we can't grab a
1383			 * message frame :-(, but lets give it a try.
1384			 */
1385			(void)ASR_reset(sc);
1386		}
1387	}
1388	return (MessageOffset);
1389} /* ASR_queue */
1390
1391
1392/* Simple Scatter Gather elements */
1393#define	SG(SGL,Index,Flags,Buffer,Size)				   \
1394	I2O_FLAGS_COUNT_setCount(				   \
1395	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1396	  Size);						   \
1397	I2O_FLAGS_COUNT_setFlags(				   \
1398	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1399	  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags));	   \
1400	I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(		   \
1401	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]),		   \
1402	  (Buffer == NULL) ? 0 : KVTOPHYS(Buffer))
1403
1404/*
1405 *	Retrieve Parameter Group.
1406 */
1407static void *
1408ASR_getParams(Asr_softc_t *sc, tid_t TID, int Group, void *Buffer,
1409	      unsigned BufferSize)
1410{
1411	struct paramGetMessage {
1412		I2O_UTIL_PARAMS_GET_MESSAGE M;
1413		char
1414		   F[sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1415		struct Operations {
1416			I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1417			I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1418		}			     O;
1419	}				Message;
1420	struct Operations		*Operations_Ptr;
1421	I2O_UTIL_PARAMS_GET_MESSAGE	*Message_Ptr;
1422	struct ParamBuffer {
1423		I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1424		I2O_PARAM_READ_OPERATION_RESULT	    Read;
1425		char				    Info[1];
1426	}				*Buffer_Ptr;
1427
1428	Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(&Message,
1429	  sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1430	    + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1431	Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1432	  + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1433	  + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1434	bzero(Operations_Ptr, sizeof(struct Operations));
1435	I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1436	  &(Operations_Ptr->Header), 1);
1437	I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1438	  &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1439	I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1440	  &(Operations_Ptr->Template[0]), 0xFFFF);
1441	I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1442	  &(Operations_Ptr->Template[0]), Group);
1443	Buffer_Ptr = (struct ParamBuffer *)Buffer;
1444	bzero(Buffer_Ptr, BufferSize);
1445
1446	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1447	  I2O_VERSION_11
1448	  + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1449	    / sizeof(U32)) << 4));
1450	I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1451	  TID);
1452	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1453	  I2O_UTIL_PARAMS_GET);
1454	/*
1455	 *  Set up the buffers as scatter gather elements.
1456	 */
1457	SG(&(Message_Ptr->SGL), 0,
1458	  I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1459	  Operations_Ptr, sizeof(struct Operations));
1460	SG(&(Message_Ptr->SGL), 1,
1461	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1462	  Buffer_Ptr, BufferSize);
1463
1464	if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1465	 && (Buffer_Ptr->Header.ResultCount)) {
1466		return ((void *)(Buffer_Ptr->Info));
1467	}
1468	return (NULL);
1469} /* ASR_getParams */
1470
1471/*
1472 *	Acquire the LCT information.
1473 */
1474static int
1475ASR_acquireLct(Asr_softc_t *sc)
1476{
1477	PI2O_EXEC_LCT_NOTIFY_MESSAGE	Message_Ptr;
1478	PI2O_SGE_SIMPLE_ELEMENT		sg;
1479	int				MessageSizeInBytes;
1480	caddr_t				v;
1481	int				len;
1482	I2O_LCT				Table;
1483	PI2O_LCT_ENTRY			Entry;
1484
1485	/*
1486	 *	sc value assumed valid
1487	 */
1488	MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1489	    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1490	if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc(
1491	    MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) {
1492		return (ENOMEM);
1493	}
1494	(void)ASR_fillMessage((void *)Message_Ptr, MessageSizeInBytes);
1495	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1496	    (I2O_VERSION_11 + (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1497	    sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)));
1498	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1499	    I2O_EXEC_LCT_NOTIFY);
1500	I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1501	    I2O_CLASS_MATCH_ANYCLASS);
1502	/*
1503	 *	Call the LCT table to determine the number of device entries
1504	 * to reserve space for.
1505	 */
1506	SG(&(Message_Ptr->SGL), 0,
1507	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1508	  sizeof(I2O_LCT));
1509	/*
1510	 *	since this code is reused in several systems, code efficiency
1511	 * is greater by using a shift operation rather than a divide by
1512	 * sizeof(u_int32_t).
1513	 */
1514	I2O_LCT_setTableSize(&Table,
1515	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1516	(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1517	/*
1518	 *	Determine the size of the LCT table.
1519	 */
1520	if (sc->ha_LCT) {
1521		free(sc->ha_LCT, M_TEMP);
1522	}
1523	/*
1524	 *	malloc only generates contiguous memory when less than a
1525	 * page is expected. We must break the request up into an SG list ...
1526	 */
1527	if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1528	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1529	 || (len > (128 * 1024))) {	/* Arbitrary */
1530		free(Message_Ptr, M_TEMP);
1531		return (EINVAL);
1532	}
1533	if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK)) == NULL) {
1534		free(Message_Ptr, M_TEMP);
1535		return (ENOMEM);
1536	}
1537	/*
1538	 *	since this code is reused in several systems, code efficiency
1539	 * is greater by using a shift operation rather than a divide by
1540	 * sizeof(u_int32_t).
1541	 */
1542	I2O_LCT_setTableSize(sc->ha_LCT,
1543	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1544	/*
1545	 *	Convert the access to the LCT table into a SG list.
1546	 */
1547	sg = Message_Ptr->SGL.u.Simple;
1548	v = (caddr_t)(sc->ha_LCT);
1549	for (;;) {
1550		int next, base, span;
1551
1552		span = 0;
1553		next = base = KVTOPHYS(v);
1554		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1555
1556		/* How far can we go contiguously */
1557		while ((len > 0) && (base == next)) {
1558			int size;
1559
1560			next = trunc_page(base) + PAGE_SIZE;
1561			size = next - base;
1562			if (size > len) {
1563				size = len;
1564			}
1565			span += size;
1566			v += size;
1567			len -= size;
1568			base = KVTOPHYS(v);
1569		}
1570
1571		/* Construct the Flags */
1572		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1573		{
1574			int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1575			if (len <= 0) {
1576				rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1577				    | I2O_SGL_FLAGS_LAST_ELEMENT
1578				    | I2O_SGL_FLAGS_END_OF_BUFFER);
1579			}
1580			I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1581		}
1582
1583		if (len <= 0) {
1584			break;
1585		}
1586
1587		/*
1588		 * Incrementing requires resizing of the packet.
1589		 */
1590		++sg;
1591		MessageSizeInBytes += sizeof(*sg);
1592		I2O_MESSAGE_FRAME_setMessageSize(
1593		  &(Message_Ptr->StdMessageFrame),
1594		  I2O_MESSAGE_FRAME_getMessageSize(
1595		    &(Message_Ptr->StdMessageFrame))
1596		  + (sizeof(*sg) / sizeof(U32)));
1597		{
1598			PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1599
1600			if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1601			    malloc(MessageSizeInBytes, M_TEMP, M_WAITOK))
1602			    == NULL) {
1603				free(sc->ha_LCT, M_TEMP);
1604				sc->ha_LCT = NULL;
1605				free(Message_Ptr, M_TEMP);
1606				return (ENOMEM);
1607			}
1608			span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1609			bcopy(Message_Ptr, NewMessage_Ptr, span);
1610			free(Message_Ptr, M_TEMP);
1611			sg = (PI2O_SGE_SIMPLE_ELEMENT)
1612			  (((caddr_t)NewMessage_Ptr) + span);
1613			Message_Ptr = NewMessage_Ptr;
1614		}
1615	}
1616	{	int retval;
1617
1618		retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1619		free(Message_Ptr, M_TEMP);
1620		if (retval != CAM_REQ_CMP) {
1621			return (ENODEV);
1622		}
1623	}
1624	/* If the LCT table grew, lets truncate accesses */
1625	if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1626		I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1627	}
1628	for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1629	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1630	  ++Entry) {
1631		Entry->le_type = I2O_UNKNOWN;
1632		switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1633
1634		case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1635			Entry->le_type = I2O_BSA;
1636			break;
1637
1638		case I2O_CLASS_SCSI_PERIPHERAL:
1639			Entry->le_type = I2O_SCSI;
1640			break;
1641
1642		case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1643			Entry->le_type = I2O_FCA;
1644			break;
1645
1646		case I2O_CLASS_BUS_ADAPTER_PORT:
1647			Entry->le_type = I2O_PORT | I2O_SCSI;
1648			/* FALLTHRU */
1649		case I2O_CLASS_FIBRE_CHANNEL_PORT:
1650			if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1651			  I2O_CLASS_FIBRE_CHANNEL_PORT) {
1652				Entry->le_type = I2O_PORT | I2O_FCA;
1653			}
1654		{	struct ControllerInfo {
1655				I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1656				I2O_PARAM_READ_OPERATION_RESULT	    Read;
1657				I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1658			} Buffer;
1659			PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1660
1661			Entry->le_bus = 0xff;
1662			Entry->le_target = 0xff;
1663			Entry->le_lun = 0xff;
1664
1665			if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1666			  ASR_getParams(sc,
1667			    I2O_LCT_ENTRY_getLocalTID(Entry),
1668			    I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1669			    &Buffer, sizeof(struct ControllerInfo))) == NULL) {
1670				continue;
1671			}
1672			Entry->le_target
1673			  = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1674			    Info);
1675			Entry->le_lun = 0;
1676		}	/* FALLTHRU */
1677		default:
1678			continue;
1679		}
1680		{	struct DeviceInfo {
1681				I2O_PARAM_RESULTS_LIST_HEADER	Header;
1682				I2O_PARAM_READ_OPERATION_RESULT Read;
1683				I2O_DPT_DEVICE_INFO_SCALAR	Info;
1684			} Buffer;
1685			PI2O_DPT_DEVICE_INFO_SCALAR	 Info;
1686
1687			Entry->le_bus = 0xff;
1688			Entry->le_target = 0xff;
1689			Entry->le_lun = 0xff;
1690
1691			if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1692			  ASR_getParams(sc,
1693			    I2O_LCT_ENTRY_getLocalTID(Entry),
1694			    I2O_DPT_DEVICE_INFO_GROUP_NO,
1695			    &Buffer, sizeof(struct DeviceInfo))) == NULL) {
1696				continue;
1697			}
1698			Entry->le_type
1699			  |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1700			Entry->le_bus
1701			  = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1702			if ((Entry->le_bus > sc->ha_MaxBus)
1703			 && (Entry->le_bus <= MAX_CHANNEL)) {
1704				sc->ha_MaxBus = Entry->le_bus;
1705			}
1706			Entry->le_target
1707			  = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1708			Entry->le_lun
1709			  = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1710		}
1711	}
1712	/*
1713	 *	A zero return value indicates success.
1714	 */
1715	return (0);
1716} /* ASR_acquireLct */
1717
1718/*
1719 * Initialize a message frame.
1720 * We assume that the CDB has already been set up, so all we do here is
1721 * generate the Scatter Gather list.
1722 */
1723static PI2O_MESSAGE_FRAME
1724ASR_init_message(union asr_ccb *ccb, PI2O_MESSAGE_FRAME	Message)
1725{
1726	PI2O_MESSAGE_FRAME	Message_Ptr;
1727	PI2O_SGE_SIMPLE_ELEMENT sg;
1728	Asr_softc_t		*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1729	vm_size_t		size, len;
1730	caddr_t			v;
1731	U32			MessageSize;
1732	int			next, span, base, rw;
1733	int			target = ccb->ccb_h.target_id;
1734	int			lun = ccb->ccb_h.target_lun;
1735	int			bus =cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1736	tid_t			TID;
1737
1738	/* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1739	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
1740	bzero(Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
1741	      sizeof(I2O_SG_ELEMENT)));
1742
1743	if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1744		PI2O_LCT_ENTRY Device;
1745
1746		TID = 0;
1747		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1748		    (((U32 *)sc->ha_LCT) + I2O_LCT_getTableSize(sc->ha_LCT));
1749		    ++Device) {
1750			if ((Device->le_type != I2O_UNKNOWN)
1751			 && (Device->le_bus == bus)
1752			 && (Device->le_target == target)
1753			 && (Device->le_lun == lun)
1754			 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1755				TID = I2O_LCT_ENTRY_getLocalTID(Device);
1756				ASR_setTid(sc, Device->le_bus,
1757					   Device->le_target, Device->le_lun,
1758					   TID);
1759				break;
1760			}
1761		}
1762	}
1763	if (TID == (tid_t)0) {
1764		return (NULL);
1765	}
1766	I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1767	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1768	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1769	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1770	  (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1771		/ sizeof(U32)) << 4));
1772	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1773	  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1774	  - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1775	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1776	I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1777	I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1778	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1779	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1780	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1781	    I2O_SCB_FLAG_ENABLE_DISCONNECT
1782	  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1783	  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1784	/*
1785	 * We do not need any (optional byteswapping) method access to
1786	 * the Initiator & Transaction context field.
1787	 */
1788	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1789
1790	I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1791	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1792	/*
1793	 * copy the cdb over
1794	 */
1795	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1796	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1797	bcopy(&(ccb->csio.cdb_io),
1798	    ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB,
1799	    ccb->csio.cdb_len);
1800
1801	/*
1802	 * Given a buffer describing a transfer, set up a scatter/gather map
1803	 * in a ccb to map that SCSI transfer.
1804	 */
1805
1806	rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1807
1808	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1809	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1810	  (ccb->csio.dxfer_len)
1811	    ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1812		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1813		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1814		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1815		    : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1816		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1817		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1818		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
1819	    :	      (I2O_SCB_FLAG_ENABLE_DISCONNECT
1820		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1821		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
1822
1823	/*
1824	 * Given a transfer described by a `data', fill in the SG list.
1825	 */
1826	sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
1827
1828	len = ccb->csio.dxfer_len;
1829	v = ccb->csio.data_ptr;
1830	KASSERT(ccb->csio.dxfer_len >= 0, ("csio.dxfer_len < 0"));
1831	MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
1832	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
1833	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
1834	while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1835	  Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
1836		span = 0;
1837		next = base = KVTOPHYS(v);
1838		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1839
1840		/* How far can we go contiguously */
1841		while ((len > 0) && (base == next)) {
1842			next = trunc_page(base) + PAGE_SIZE;
1843			size = next - base;
1844			if (size > len) {
1845				size = len;
1846			}
1847			span += size;
1848			v += size;
1849			len -= size;
1850			base = KVTOPHYS(v);
1851		}
1852
1853		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1854		if (len == 0) {
1855			rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
1856		}
1857		I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
1858		  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
1859		++sg;
1860		MessageSize += sizeof(*sg) / sizeof(U32);
1861	}
1862	/* We always do the request sense ... */
1863	if ((span = ccb->csio.sense_len) == 0) {
1864		span = sizeof(ccb->csio.sense_data);
1865	}
1866	SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1867	  &(ccb->csio.sense_data), span);
1868	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1869	  MessageSize + (sizeof(*sg) / sizeof(U32)));
1870	return (Message_Ptr);
1871} /* ASR_init_message */
1872
1873/*
1874 *	Reset the adapter.
1875 */
1876static U32
1877ASR_initOutBound(Asr_softc_t *sc)
1878{
1879	struct initOutBoundMessage {
1880		I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
1881		U32			       R;
1882	}				Message;
1883	PI2O_EXEC_OUTBOUND_INIT_MESSAGE	Message_Ptr;
1884	U32				*volatile Reply_Ptr;
1885	U32				Old;
1886
1887	/*
1888	 *  Build up our copy of the Message.
1889	 */
1890	Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(&Message,
1891	  sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
1892	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1893	  I2O_EXEC_OUTBOUND_INIT);
1894	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
1895	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
1896	  sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
1897	/*
1898	 *  Reset the Reply Status
1899	 */
1900	*(Reply_Ptr = (U32 *)((char *)Message_Ptr
1901	  + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
1902	SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
1903	  sizeof(U32));
1904	/*
1905	 *	Send the Message out
1906	 */
1907	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
1908	    0xffffffff) {
1909		u_long size, addr;
1910
1911		/*
1912		 *	Wait for a response (Poll).
1913		 */
1914		while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
1915		/*
1916		 *	Re-enable the interrupts.
1917		 */
1918		asr_set_intr(sc, Old);
1919		/*
1920		 *	Populate the outbound table.
1921		 */
1922		if (sc->ha_Msgs == NULL) {
1923
1924			/* Allocate the reply frames */
1925			size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1926			  * sc->ha_Msgs_Count;
1927
1928			/*
1929			 *	contigmalloc only works reliably at
1930			 * initialization time.
1931			 */
1932			if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1933			  contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
1934			    0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) != NULL) {
1935				bzero(sc->ha_Msgs, size);
1936				sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
1937			}
1938		}
1939
1940		/* Initialize the outbound FIFO */
1941		if (sc->ha_Msgs != NULL)
1942		for(size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
1943		    size; --size) {
1944			asr_set_FromFIFO(sc, addr);
1945			addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
1946		}
1947		return (*Reply_Ptr);
1948	}
1949	return (0);
1950} /* ASR_initOutBound */
1951
1952/*
1953 *	Set the system table
1954 */
1955static int
1956ASR_setSysTab(Asr_softc_t *sc)
1957{
1958	PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
1959	PI2O_SET_SYSTAB_HEADER	      SystemTable;
1960	Asr_softc_t		    * ha;
1961	PI2O_SGE_SIMPLE_ELEMENT	      sg;
1962	int			      retVal;
1963
1964	if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc (
1965	  sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO)) == NULL) {
1966		return (ENOMEM);
1967	}
1968	for (ha = Asr_softc; ha; ha = ha->ha_next) {
1969		++SystemTable->NumberEntries;
1970	}
1971	if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc (
1972	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1973	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
1974	  M_TEMP, M_WAITOK)) == NULL) {
1975		free(SystemTable, M_TEMP);
1976		return (ENOMEM);
1977	}
1978	(void)ASR_fillMessage((void *)Message_Ptr,
1979	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1980	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
1981	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1982	  (I2O_VERSION_11 +
1983	  (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1984			/ sizeof(U32)) << 4)));
1985	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1986	  I2O_EXEC_SYS_TAB_SET);
1987	/*
1988	 *	Call the LCT table to determine the number of device entries
1989	 * to reserve space for.
1990	 *	since this code is reused in several systems, code efficiency
1991	 * is greater by using a shift operation rather than a divide by
1992	 * sizeof(u_int32_t).
1993	 */
1994	sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
1995	  + ((I2O_MESSAGE_FRAME_getVersionOffset(
1996	      &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
1997	SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
1998	++sg;
1999	for (ha = Asr_softc; ha; ha = ha->ha_next) {
2000		SG(sg, 0,
2001		  ((ha->ha_next)
2002		    ? (I2O_SGL_FLAGS_DIR)
2003		    : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2004		  &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2005		++sg;
2006	}
2007	SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2008	SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2009	    | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2010	retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2011	free(Message_Ptr, M_TEMP);
2012	free(SystemTable, M_TEMP);
2013	return (retVal);
2014} /* ASR_setSysTab */
2015
2016static int
2017ASR_acquireHrt(Asr_softc_t *sc)
2018{
2019	I2O_EXEC_HRT_GET_MESSAGE	Message;
2020	I2O_EXEC_HRT_GET_MESSAGE	*Message_Ptr;
2021	struct {
2022		I2O_HRT	      Header;
2023		I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2024	}				Hrt;
2025	u_int8_t			NumberOfEntries;
2026	PI2O_HRT_ENTRY			Entry;
2027
2028	bzero(&Hrt, sizeof (Hrt));
2029	Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(&Message,
2030	  sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2031	  + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2032	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2033	  (I2O_VERSION_11
2034	  + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2035		   / sizeof(U32)) << 4)));
2036	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2037	  I2O_EXEC_HRT_GET);
2038
2039	/*
2040	 *  Set up the buffers as scatter gather elements.
2041	 */
2042	SG(&(Message_Ptr->SGL), 0,
2043	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2044	  &Hrt, sizeof(Hrt));
2045	if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2046		return (ENODEV);
2047	}
2048	if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2049	  > (MAX_CHANNEL + 1)) {
2050		NumberOfEntries = MAX_CHANNEL + 1;
2051	}
2052	for (Entry = Hrt.Header.HRTEntry;
2053	  NumberOfEntries != 0;
2054	  ++Entry, --NumberOfEntries) {
2055		PI2O_LCT_ENTRY Device;
2056
2057		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2058		  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2059		  ++Device) {
2060			if (I2O_LCT_ENTRY_getLocalTID(Device)
2061			  == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2062				Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2063				  Entry) >> 16;
2064				if ((Device->le_bus > sc->ha_MaxBus)
2065				 && (Device->le_bus <= MAX_CHANNEL)) {
2066					sc->ha_MaxBus = Device->le_bus;
2067				}
2068			}
2069		}
2070	}
2071	return (0);
2072} /* ASR_acquireHrt */
2073
2074/*
2075 *	Enable the adapter.
2076 */
2077static int
2078ASR_enableSys(Asr_softc_t *sc)
2079{
2080	I2O_EXEC_SYS_ENABLE_MESSAGE	Message;
2081	PI2O_EXEC_SYS_ENABLE_MESSAGE	Message_Ptr;
2082
2083	Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(&Message,
2084	  sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2085	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2086	  I2O_EXEC_SYS_ENABLE);
2087	return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2088} /* ASR_enableSys */
2089
2090/*
2091 *	Perform the stages necessary to initialize the adapter
2092 */
2093static int
2094ASR_init(Asr_softc_t *sc)
2095{
2096	return ((ASR_initOutBound(sc) == 0)
2097	 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2098	 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2099} /* ASR_init */
2100
2101/*
2102 *	Send a Synchronize Cache command to the target device.
2103 */
2104static void
2105ASR_sync(Asr_softc_t *sc, int bus, int target, int lun)
2106{
2107	tid_t TID;
2108
2109	/*
2110	 * We will not synchronize the device when there are outstanding
2111	 * commands issued by the OS (this is due to a locked up device,
2112	 * as the OS normally would flush all outstanding commands before
2113	 * issuing a shutdown or an adapter reset).
2114	 */
2115	if ((sc != NULL)
2116	 && (LIST_FIRST(&(sc->ha_ccb)) != NULL)
2117	 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2118	 && (TID != (tid_t)0)) {
2119		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2120		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2121
2122		Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message;
2123		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2124		    - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2125
2126		I2O_MESSAGE_FRAME_setVersionOffset(
2127		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2128		  I2O_VERSION_11
2129		    | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2130		    - sizeof(I2O_SG_ELEMENT))
2131			/ sizeof(U32)) << 4));
2132		I2O_MESSAGE_FRAME_setMessageSize(
2133		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2134		  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2135		  - sizeof(I2O_SG_ELEMENT))
2136			/ sizeof(U32));
2137		I2O_MESSAGE_FRAME_setInitiatorAddress (
2138		  (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2139		I2O_MESSAGE_FRAME_setFunction(
2140		  (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2141		I2O_MESSAGE_FRAME_setTargetAddress(
2142		  (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2143		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2144		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2145		  I2O_SCSI_SCB_EXEC);
2146		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2147		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2148		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2149		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2150		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2151		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2152		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2153		  DPT_ORGANIZATION_ID);
2154		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2155		Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2156		Message_Ptr->CDB[1] = (lun << 5);
2157
2158		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2159		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2160		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2161		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2162		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2163
2164		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2165
2166	}
2167}
2168
2169static void
2170ASR_synchronize(Asr_softc_t *sc)
2171{
2172	int bus, target, lun;
2173
2174	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2175		for (target = 0; target <= sc->ha_MaxId; ++target) {
2176			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2177				ASR_sync(sc,bus,target,lun);
2178			}
2179		}
2180	}
2181}
2182
2183/*
2184 *	Reset the HBA, targets and BUS.
2185 *		Currently this resets *all* the SCSI busses.
2186 */
2187static __inline void
2188asr_hbareset(Asr_softc_t *sc)
2189{
2190	ASR_synchronize(sc);
2191	(void)ASR_reset(sc);
2192} /* asr_hbareset */
2193
2194/*
2195 *	A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2196 * limit and a reduction in error checking (in the pre 4.0 case).
2197 */
2198static int
2199asr_pci_map_mem(device_t dev, Asr_softc_t *sc)
2200{
2201	int		rid;
2202	u_int32_t	p, l, s;
2203
2204	/*
2205	 * I2O specification says we must find first *memory* mapped BAR
2206	 */
2207	for (rid = 0; rid < 4; rid++) {
2208		p = pci_read_config(dev, PCIR_BAR(rid), sizeof(p));
2209		if ((p & 1) == 0) {
2210			break;
2211		}
2212	}
2213	/*
2214	 *	Give up?
2215	 */
2216	if (rid >= 4) {
2217		rid = 0;
2218	}
2219	rid = PCIR_BAR(rid);
2220	p = pci_read_config(dev, rid, sizeof(p));
2221	pci_write_config(dev, rid, -1, sizeof(p));
2222	l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2223	pci_write_config(dev, rid, p, sizeof(p));
2224	if (l > MAX_MAP) {
2225		l = MAX_MAP;
2226	}
2227	/*
2228	 * The 2005S Zero Channel RAID solution is not a perfect PCI
2229	 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2230	 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2231	 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2232	 * accessible via BAR0, the messaging registers are accessible
2233	 * via BAR1. If the subdevice code is 50 to 59 decimal.
2234	 */
2235	s = pci_read_config(dev, PCIR_DEVVENDOR, sizeof(s));
2236	if (s != 0xA5111044) {
2237		s = pci_read_config(dev, PCIR_SUBVEND_0, sizeof(s));
2238		if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2239		 && (ADPTDOMINATOR_SUB_ID_START <= s)
2240		 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2241			l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2242		}
2243	}
2244	p &= ~15;
2245	sc->ha_mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2246	  p, p + l, l, RF_ACTIVE);
2247	if (sc->ha_mem_res == NULL) {
2248		return (0);
2249	}
2250	sc->ha_Base = rman_get_start(sc->ha_mem_res);
2251	sc->ha_i2o_bhandle = rman_get_bushandle(sc->ha_mem_res);
2252	sc->ha_i2o_btag = rman_get_bustag(sc->ha_mem_res);
2253
2254	if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2255		if ((rid += sizeof(u_int32_t)) >= PCIR_BAR(4)) {
2256			return (0);
2257		}
2258		p = pci_read_config(dev, rid, sizeof(p));
2259		pci_write_config(dev, rid, -1, sizeof(p));
2260		l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2261		pci_write_config(dev, rid, p, sizeof(p));
2262		if (l > MAX_MAP) {
2263			l = MAX_MAP;
2264		}
2265		p &= ~15;
2266		sc->ha_mes_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2267		  p, p + l, l, RF_ACTIVE);
2268		if (sc->ha_mes_res == NULL) {
2269			return (0);
2270		}
2271		sc->ha_frame_bhandle = rman_get_bushandle(sc->ha_mes_res);
2272		sc->ha_frame_btag = rman_get_bustag(sc->ha_mes_res);
2273	} else {
2274		sc->ha_frame_bhandle = sc->ha_i2o_bhandle;
2275		sc->ha_frame_btag = sc->ha_i2o_btag;
2276	}
2277	return (1);
2278} /* asr_pci_map_mem */
2279
2280/*
2281 *	A simplified copy of the real pci_map_int with additional
2282 * registration requirements.
2283 */
2284static int
2285asr_pci_map_int(device_t dev, Asr_softc_t *sc)
2286{
2287	int rid = 0;
2288
2289	sc->ha_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2290	  RF_ACTIVE | RF_SHAREABLE);
2291	if (sc->ha_irq_res == NULL) {
2292		return (0);
2293	}
2294	if (bus_setup_intr(dev, sc->ha_irq_res, INTR_TYPE_CAM | INTR_ENTROPY,
2295	  (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) {
2296		return (0);
2297	}
2298	sc->ha_irq = pci_read_config(dev, PCIR_INTLINE, sizeof(char));
2299	return (1);
2300} /* asr_pci_map_int */
2301
2302static void
2303asr_status_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2304{
2305	Asr_softc_t *sc;
2306
2307	if (error)
2308		return;
2309
2310	sc = (Asr_softc_t *)arg;
2311
2312	/* XXX
2313	 * The status word can be at a 64-bit address, but the existing
2314	 * accessor macros simply cannot manipulate 64-bit addresses.
2315	 */
2316	sc->ha_status_phys = (u_int32_t)segs[0].ds_addr;
2317}
2318
2319static int
2320asr_alloc_dma(Asr_softc_t *sc)
2321{
2322	device_t dev;
2323
2324	dev = sc->ha_dev;
2325
2326	if (bus_dma_tag_create(NULL,			/* parent */
2327			       1, 0,			/* algnmnt, boundary */
2328			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2329			       BUS_SPACE_MAXADDR,	/* highaddr */
2330			       NULL, NULL,		/* filter, filterarg */
2331			       BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2332			       BUS_SPACE_UNRESTRICTED,	/* nsegments */
2333			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2334			       0,			/* flags */
2335			       NULL, NULL,		/* lockfunc, lockarg */
2336			       &sc->ha_parent_dmat)) {
2337		device_printf(dev, "Cannot allocate parent DMA tag\n");
2338		return (ENOMEM);
2339	}
2340
2341	if (bus_dma_tag_create(sc->ha_parent_dmat,	/* parent */
2342			       1, 0,			/* algnmnt, boundary */
2343			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2344			       BUS_SPACE_MAXADDR,	/* highaddr */
2345			       NULL, NULL,		/* filter, filterarg */
2346			       sizeof(uint32_t),	/* maxsize */
2347			       1,			/* nsegments */
2348			       sizeof(uint32_t),	/* maxsegsize */
2349			       0,			/* flags */
2350			       NULL, NULL,		/* lockfunc, lockarg */
2351			       &sc->ha_status_dmat)) {
2352		device_printf(dev, "Cannot allocate status DMA tag\n");
2353		bus_dma_tag_destroy(sc->ha_parent_dmat);
2354		return (ENOMEM);
2355	}
2356
2357	if (bus_dmamem_alloc(sc->ha_status_dmat, (void **)&sc->ha_status,
2358	    BUS_DMA_NOWAIT, &sc->ha_status_dmamap)) {
2359		device_printf(dev, "Cannot allocate status memory\n");
2360		bus_dma_tag_destroy(sc->ha_status_dmat);
2361		bus_dma_tag_destroy(sc->ha_parent_dmat);
2362		return (ENOMEM);
2363	}
2364	(void)bus_dmamap_load(sc->ha_status_dmat, sc->ha_status_dmamap,
2365	    sc->ha_status, sizeof(sc->ha_status), asr_status_cb, sc, 0);
2366
2367	return (0);
2368}
2369
2370static void
2371asr_release_dma(Asr_softc_t *sc)
2372{
2373
2374	if (sc->ha_status_phys != 0)
2375		bus_dmamap_unload(sc->ha_status_dmat, sc->ha_status_dmamap);
2376	if (sc->ha_status != NULL)
2377		bus_dmamem_free(sc->ha_status_dmat, sc->ha_status,
2378		    sc->ha_status_dmamap);
2379	if (sc->ha_status_dmat != NULL)
2380		bus_dma_tag_destroy(sc->ha_status_dmat);
2381	if (sc->ha_parent_dmat != NULL)
2382		bus_dma_tag_destroy(sc->ha_parent_dmat);
2383}
2384
2385/*
2386 *	Attach the devices, and virtual devices to the driver list.
2387 */
2388static int
2389asr_attach(device_t dev)
2390{
2391	PI2O_EXEC_STATUS_GET_REPLY status;
2392	PI2O_LCT_ENTRY		 Device;
2393	Asr_softc_t		 *sc, **ha;
2394	struct scsi_inquiry_data *iq;
2395	int			 bus, size, unit;
2396	int			 error;
2397
2398	sc = device_get_softc(dev);
2399	unit = device_get_unit(dev);
2400	sc->ha_dev = dev;
2401
2402	if (Asr_softc == NULL) {
2403		/*
2404		 *	Fixup the OS revision as saved in the dptsig for the
2405		 *	engine (dptioctl.h) to pick up.
2406		 */
2407		bcopy(osrelease, &ASR_sig.dsDescription[16], 5);
2408	}
2409	/*
2410	 *	Initialize the software structure
2411	 */
2412	LIST_INIT(&(sc->ha_ccb));
2413	/* Link us into the HA list */
2414	for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next));
2415		*(ha) = sc;
2416
2417	/*
2418	 *	This is the real McCoy!
2419	 */
2420	if (!asr_pci_map_mem(dev, sc)) {
2421		device_printf(dev, "could not map memory\n");
2422		return(ENXIO);
2423	}
2424	/* Enable if not formerly enabled */
2425	pci_write_config(dev, PCIR_COMMAND,
2426	    pci_read_config(dev, PCIR_COMMAND, sizeof(char)) |
2427	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2428
2429	sc->ha_pciBusNum = pci_get_bus(dev);
2430	sc->ha_pciDeviceNum = (pci_get_slot(dev) << 3) | pci_get_function(dev);
2431
2432	if ((error = asr_alloc_dma(sc)) != 0)
2433		return (error);
2434
2435	/* Check if the device is there? */
2436	if (ASR_resetIOP(sc) == 0) {
2437		device_printf(dev, "Cannot reset adapter\n");
2438		asr_release_dma(sc);
2439		return (EIO);
2440	}
2441	if ((status = (PI2O_EXEC_STATUS_GET_REPLY)malloc(
2442	    sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_NOWAIT)) == NULL) {
2443		device_printf(dev, "Cannot allocate memory\n");
2444		asr_release_dma(sc);
2445		return (ENOMEM);
2446	}
2447	if (ASR_getStatus(sc, status) == NULL) {
2448		device_printf(dev, "could not initialize hardware\n");
2449		free(status, M_TEMP);
2450		asr_release_dma(sc);
2451		return(ENODEV);
2452	}
2453	sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2454	sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2455	sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2456	sc->ha_SystemTable.IopState = status->IopState;
2457	sc->ha_SystemTable.MessengerType = status->MessengerType;
2458	sc->ha_SystemTable.InboundMessageFrameSize = status->InboundMFrameSize;
2459	sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow =
2460	    (U32)(sc->ha_Base + I2O_REG_TOFIFO);	/* XXX 64-bit */
2461
2462	if (!asr_pci_map_int(dev, (void *)sc)) {
2463		device_printf(dev, "could not map interrupt\n");
2464		asr_release_dma(sc);
2465		return(ENXIO);
2466	}
2467
2468	/* Adjust the maximim inbound count */
2469	if (((sc->ha_QueueSize =
2470	    I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) >
2471	    MAX_INBOUND) || (sc->ha_QueueSize == 0)) {
2472		sc->ha_QueueSize = MAX_INBOUND;
2473	}
2474
2475	/* Adjust the maximum outbound count */
2476	if (((sc->ha_Msgs_Count =
2477	    I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) >
2478	    MAX_OUTBOUND) || (sc->ha_Msgs_Count == 0)) {
2479		sc->ha_Msgs_Count = MAX_OUTBOUND;
2480	}
2481	if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2482		sc->ha_Msgs_Count = sc->ha_QueueSize;
2483	}
2484
2485	/* Adjust the maximum SG size to adapter */
2486	if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(status) <<
2487	    2)) > MAX_INBOUND_SIZE) {
2488		size = MAX_INBOUND_SIZE;
2489	}
2490	free(status, M_TEMP);
2491	sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2492	  + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2493
2494	/*
2495	 *	Only do a bus/HBA reset on the first time through. On this
2496	 * first time through, we do not send a flush to the devices.
2497	 */
2498	if (ASR_init(sc) == 0) {
2499		struct BufferInfo {
2500			I2O_PARAM_RESULTS_LIST_HEADER	    Header;
2501			I2O_PARAM_READ_OPERATION_RESULT	    Read;
2502			I2O_DPT_EXEC_IOP_BUFFERS_SCALAR	    Info;
2503		} Buffer;
2504		PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2505#define FW_DEBUG_BLED_OFFSET 8
2506
2507		if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2508		    ASR_getParams(sc, 0, I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2509		    &Buffer, sizeof(struct BufferInfo))) != NULL) {
2510			sc->ha_blinkLED = FW_DEBUG_BLED_OFFSET +
2511			    I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info);
2512		}
2513		if (ASR_acquireLct(sc) == 0) {
2514			(void)ASR_acquireHrt(sc);
2515		}
2516	} else {
2517		device_printf(dev, "failed to initialize\n");
2518		asr_release_dma(sc);
2519		return(ENXIO);
2520	}
2521	/*
2522	 *	Add in additional probe responses for more channels. We
2523	 * are reusing the variable `target' for a channel loop counter.
2524	 * Done here because of we need both the acquireLct and
2525	 * acquireHrt data.
2526	 */
2527	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2528	    (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) {
2529		if (Device->le_type == I2O_UNKNOWN) {
2530			continue;
2531		}
2532		if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2533			if (Device->le_target > sc->ha_MaxId) {
2534				sc->ha_MaxId = Device->le_target;
2535			}
2536			if (Device->le_lun > sc->ha_MaxLun) {
2537				sc->ha_MaxLun = Device->le_lun;
2538			}
2539		}
2540		if (((Device->le_type & I2O_PORT) != 0)
2541		 && (Device->le_bus <= MAX_CHANNEL)) {
2542			/* Do not increase MaxId for efficiency */
2543			sc->ha_adapter_target[Device->le_bus] =
2544			    Device->le_target;
2545		}
2546	}
2547
2548	/*
2549	 *	Print the HBA model number as inquired from the card.
2550	 */
2551
2552	device_printf(dev, " ");
2553
2554	if ((iq = (struct scsi_inquiry_data *)malloc(
2555	    sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO)) !=
2556	    NULL) {
2557		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2558		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2559		int					posted = 0;
2560
2561		Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message;
2562		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2563		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2564
2565		I2O_MESSAGE_FRAME_setVersionOffset(
2566		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_VERSION_11 |
2567		    (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2568		    - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4));
2569		I2O_MESSAGE_FRAME_setMessageSize(
2570		    (PI2O_MESSAGE_FRAME)Message_Ptr,
2571		    (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2572		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) /
2573		    sizeof(U32));
2574		I2O_MESSAGE_FRAME_setInitiatorAddress(
2575		    (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2576		I2O_MESSAGE_FRAME_setFunction(
2577		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2578		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode(
2579		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2580		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2581		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2582		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2583		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2584		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2585		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2586		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2587		    DPT_ORGANIZATION_ID);
2588		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2589		Message_Ptr->CDB[0] = INQUIRY;
2590		Message_Ptr->CDB[4] =
2591		    (unsigned char)sizeof(struct scsi_inquiry_data);
2592		if (Message_Ptr->CDB[4] == 0) {
2593			Message_Ptr->CDB[4] = 255;
2594		}
2595
2596		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2597		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2598		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2599		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2600		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2601
2602		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2603		  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2604		  sizeof(struct scsi_inquiry_data));
2605		SG(&(Message_Ptr->SGL), 0,
2606		  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2607		  iq, sizeof(struct scsi_inquiry_data));
2608		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2609
2610		if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2611			printf (" ");
2612			ASR_prstring (iq->vendor, 8);
2613			++posted;
2614		}
2615		if (iq->product[0] && (iq->product[0] != ' ')) {
2616			printf (" ");
2617			ASR_prstring (iq->product, 16);
2618			++posted;
2619		}
2620		if (iq->revision[0] && (iq->revision[0] != ' ')) {
2621			printf (" FW Rev. ");
2622			ASR_prstring (iq->revision, 4);
2623			++posted;
2624		}
2625		free(iq, M_TEMP);
2626		if (posted) {
2627			printf (",");
2628		}
2629	}
2630	printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2631	  (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2632
2633	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2634		struct cam_devq	  * devq;
2635		int		    QueueSize = sc->ha_QueueSize;
2636
2637		if (QueueSize > MAX_INBOUND) {
2638			QueueSize = MAX_INBOUND;
2639		}
2640
2641		/*
2642		 *	Create the device queue for our SIM(s).
2643		 */
2644		if ((devq = cam_simq_alloc(QueueSize)) == NULL) {
2645			continue;
2646		}
2647
2648		/*
2649		 *	Construct our first channel SIM entry
2650		 */
2651		sc->ha_sim[bus] = cam_sim_alloc(asr_action, asr_poll, "asr", sc,
2652						unit, 1, QueueSize, devq);
2653		if (sc->ha_sim[bus] == NULL) {
2654			continue;
2655		}
2656
2657		if (xpt_bus_register(sc->ha_sim[bus], bus) != CAM_SUCCESS) {
2658			cam_sim_free(sc->ha_sim[bus],
2659			  /*free_devq*/TRUE);
2660			sc->ha_sim[bus] = NULL;
2661			continue;
2662		}
2663
2664		if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2665		    cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2666		    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2667			xpt_bus_deregister( cam_sim_path(sc->ha_sim[bus]));
2668			cam_sim_free(sc->ha_sim[bus], /*free_devq*/TRUE);
2669			sc->ha_sim[bus] = NULL;
2670			continue;
2671		}
2672	}
2673
2674	/*
2675	 *	Generate the device node information
2676	 */
2677	sc->ha_devt = make_dev(&asr_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
2678			       "asr%d", unit);
2679	if (sc->ha_devt != NULL)
2680		(void)make_dev_alias(sc->ha_devt, "rdpti%d", unit);
2681	sc->ha_devt->si_drv1 = sc;
2682	return(0);
2683} /* asr_attach */
2684
2685static void
2686asr_poll(struct cam_sim *sim)
2687{
2688	asr_intr(cam_sim_softc(sim));
2689} /* asr_poll */
2690
2691static void
2692asr_action(struct cam_sim *sim, union ccb  *ccb)
2693{
2694	struct Asr_softc *sc;
2695
2696	debug_asr_printf("asr_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb,
2697			 ccb->ccb_h.func_code);
2698
2699	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2700
2701	ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2702
2703	switch (ccb->ccb_h.func_code) {
2704
2705	/* Common cases first */
2706	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2707	{
2708		struct Message {
2709			char M[MAX_INBOUND_SIZE];
2710		} Message;
2711		PI2O_MESSAGE_FRAME   Message_Ptr;
2712
2713		/* Reject incoming commands while we are resetting the card */
2714		if (sc->ha_in_reset != HA_OPERATIONAL) {
2715			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2716			if (sc->ha_in_reset >= HA_OFF_LINE) {
2717				/* HBA is now off-line */
2718				ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2719			} else {
2720				/* HBA currently resetting, try again later. */
2721				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2722			}
2723			debug_asr_cmd_printf (" e\n");
2724			xpt_done(ccb);
2725			debug_asr_cmd_printf (" q\n");
2726			break;
2727		}
2728		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2729			printf(
2730			  "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2731			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2732			  ccb->csio.cdb_io.cdb_bytes[0],
2733			  cam_sim_bus(sim),
2734			  ccb->ccb_h.target_id,
2735			  ccb->ccb_h.target_lun);
2736		}
2737		debug_asr_cmd_printf("(%d,%d,%d,%d)", cam_sim_unit(sim),
2738				     cam_sim_bus(sim), ccb->ccb_h.target_id,
2739				     ccb->ccb_h.target_lun);
2740		debug_asr_dump_ccb(ccb);
2741
2742		if ((Message_Ptr = ASR_init_message((union asr_ccb *)ccb,
2743		  (PI2O_MESSAGE_FRAME)&Message)) != NULL) {
2744			debug_asr_cmd2_printf ("TID=%x:\n",
2745			  PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2746			    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2747			debug_asr_cmd2_dump_message(Message_Ptr);
2748			debug_asr_cmd1_printf (" q");
2749
2750			if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
2751				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2752				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2753				debug_asr_cmd_printf (" E\n");
2754				xpt_done(ccb);
2755			}
2756			debug_asr_cmd_printf(" Q\n");
2757			break;
2758		}
2759		/*
2760		 *	We will get here if there is no valid TID for the device
2761		 * referenced in the scsi command packet.
2762		 */
2763		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2764		ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2765		debug_asr_cmd_printf (" B\n");
2766		xpt_done(ccb);
2767		break;
2768	}
2769
2770	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
2771		/* Rese HBA device ... */
2772		asr_hbareset (sc);
2773		ccb->ccb_h.status = CAM_REQ_CMP;
2774		xpt_done(ccb);
2775		break;
2776
2777#if (defined(REPORT_LUNS))
2778	case REPORT_LUNS:
2779#endif
2780	case XPT_ABORT:			/* Abort the specified CCB */
2781		/* XXX Implement */
2782		ccb->ccb_h.status = CAM_REQ_INVALID;
2783		xpt_done(ccb);
2784		break;
2785
2786	case XPT_SET_TRAN_SETTINGS:
2787		/* XXX Implement */
2788		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2789		xpt_done(ccb);
2790		break;
2791
2792	case XPT_GET_TRAN_SETTINGS:
2793	/* Get default/user set transfer settings for the target */
2794	{
2795		struct	ccb_trans_settings *cts;
2796		u_int	target_mask;
2797
2798		cts = &(ccb->cts);
2799		target_mask = 0x01 << ccb->ccb_h.target_id;
2800		if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
2801			cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
2802			cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2803			cts->sync_period = 6; /* 40MHz */
2804			cts->sync_offset = 15;
2805
2806			cts->valid = CCB_TRANS_SYNC_RATE_VALID
2807				   | CCB_TRANS_SYNC_OFFSET_VALID
2808				   | CCB_TRANS_BUS_WIDTH_VALID
2809				   | CCB_TRANS_DISC_VALID
2810				   | CCB_TRANS_TQ_VALID;
2811			ccb->ccb_h.status = CAM_REQ_CMP;
2812		} else {
2813			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2814		}
2815		xpt_done(ccb);
2816		break;
2817	}
2818
2819	case XPT_CALC_GEOMETRY:
2820	{
2821		struct	  ccb_calc_geometry *ccg;
2822		u_int32_t size_mb;
2823		u_int32_t secs_per_cylinder;
2824
2825		ccg = &(ccb->ccg);
2826		size_mb = ccg->volume_size
2827			/ ((1024L * 1024L) / ccg->block_size);
2828
2829		if (size_mb > 4096) {
2830			ccg->heads = 255;
2831			ccg->secs_per_track = 63;
2832		} else if (size_mb > 2048) {
2833			ccg->heads = 128;
2834			ccg->secs_per_track = 63;
2835		} else if (size_mb > 1024) {
2836			ccg->heads = 65;
2837			ccg->secs_per_track = 63;
2838		} else {
2839			ccg->heads = 64;
2840			ccg->secs_per_track = 32;
2841		}
2842		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2843		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2844		ccb->ccb_h.status = CAM_REQ_CMP;
2845		xpt_done(ccb);
2846		break;
2847	}
2848
2849	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
2850		ASR_resetBus (sc, cam_sim_bus(sim));
2851		ccb->ccb_h.status = CAM_REQ_CMP;
2852		xpt_done(ccb);
2853		break;
2854
2855	case XPT_TERM_IO:		/* Terminate the I/O process */
2856		/* XXX Implement */
2857		ccb->ccb_h.status = CAM_REQ_INVALID;
2858		xpt_done(ccb);
2859		break;
2860
2861	case XPT_PATH_INQ:		/* Path routing inquiry */
2862	{
2863		struct ccb_pathinq *cpi = &(ccb->cpi);
2864
2865		cpi->version_num = 1; /* XXX??? */
2866		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2867		cpi->target_sprt = 0;
2868		/* Not necessary to reset bus, done by HDM initialization */
2869		cpi->hba_misc = PIM_NOBUSRESET;
2870		cpi->hba_eng_cnt = 0;
2871		cpi->max_target = sc->ha_MaxId;
2872		cpi->max_lun = sc->ha_MaxLun;
2873		cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
2874		cpi->bus_id = cam_sim_bus(sim);
2875		cpi->base_transfer_speed = 3300;
2876		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2877		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
2878		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2879		cpi->unit_number = cam_sim_unit(sim);
2880		cpi->ccb_h.status = CAM_REQ_CMP;
2881		xpt_done(ccb);
2882		break;
2883	}
2884	default:
2885		ccb->ccb_h.status = CAM_REQ_INVALID;
2886		xpt_done(ccb);
2887		break;
2888	}
2889} /* asr_action */
2890
2891/*
2892 * Handle processing of current CCB as pointed to by the Status.
2893 */
2894static int
2895asr_intr(Asr_softc_t *sc)
2896{
2897	int processed;
2898
2899	for(processed = 0; asr_get_status(sc) & Mask_InterruptsDisabled;
2900	    processed = 1) {
2901		union asr_ccb			   *ccb;
2902		u_int				    dsc;
2903		U32				    ReplyOffset;
2904		PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
2905
2906		if (((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)
2907		 && ((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)) {
2908			break;
2909		}
2910		Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
2911		  - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
2912		/*
2913		 * We do not need any (optional byteswapping) method access to
2914		 * the Initiator context field.
2915		 */
2916		ccb = (union asr_ccb *)(long)
2917		  I2O_MESSAGE_FRAME_getInitiatorContext64(
2918		    &(Reply->StdReplyFrame.StdMessageFrame));
2919		if (I2O_MESSAGE_FRAME_getMsgFlags(
2920		  &(Reply->StdReplyFrame.StdMessageFrame))
2921		  & I2O_MESSAGE_FLAGS_FAIL) {
2922			I2O_UTIL_NOP_MESSAGE	Message;
2923			PI2O_UTIL_NOP_MESSAGE	Message_Ptr;
2924			U32			MessageOffset;
2925
2926			MessageOffset = (u_long)
2927			  I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
2928			    (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
2929			/*
2930			 *  Get the Original Message Frame's address, and get
2931			 * it's Transaction Context into our space. (Currently
2932			 * unused at original authorship, but better to be
2933			 * safe than sorry). Straight copy means that we
2934			 * need not concern ourselves with the (optional
2935			 * byteswapping) method access.
2936			 */
2937			Reply->StdReplyFrame.TransactionContext =
2938			    bus_space_read_4(sc->ha_frame_btag,
2939			    sc->ha_frame_bhandle, MessageOffset +
2940			    offsetof(I2O_SINGLE_REPLY_MESSAGE_FRAME,
2941			    TransactionContext));
2942			/*
2943			 *	For 64 bit machines, we need to reconstruct the
2944			 * 64 bit context.
2945			 */
2946			ccb = (union asr_ccb *)(long)
2947			  I2O_MESSAGE_FRAME_getInitiatorContext64(
2948			    &(Reply->StdReplyFrame.StdMessageFrame));
2949			/*
2950			 * Unique error code for command failure.
2951			 */
2952			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
2953			  &(Reply->StdReplyFrame), (u_int16_t)-2);
2954			/*
2955			 *  Modify the message frame to contain a NOP and
2956			 * re-issue it to the controller.
2957			 */
2958			Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
2959			    &Message, sizeof(I2O_UTIL_NOP_MESSAGE));
2960#if (I2O_UTIL_NOP != 0)
2961				I2O_MESSAGE_FRAME_setFunction (
2962				  &(Message_Ptr->StdMessageFrame),
2963				  I2O_UTIL_NOP);
2964#endif
2965			/*
2966			 *  Copy the packet out to the Original Message
2967			 */
2968			asr_set_frame(sc, Message_Ptr, MessageOffset,
2969				      sizeof(I2O_UTIL_NOP_MESSAGE));
2970			/*
2971			 *  Issue the NOP
2972			 */
2973			asr_set_ToFIFO(sc, MessageOffset);
2974		}
2975
2976		/*
2977		 *	Asynchronous command with no return requirements,
2978		 * and a generic handler for immunity against odd error
2979		 * returns from the adapter.
2980		 */
2981		if (ccb == NULL) {
2982			/*
2983			 * Return Reply so that it can be used for the
2984			 * next command
2985			 */
2986			asr_set_FromFIFO(sc, ReplyOffset);
2987			continue;
2988		}
2989
2990		/* Welease Wadjah! (and stop timeouts) */
2991		ASR_ccbRemove (sc, ccb);
2992
2993		dsc = I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
2994		    &(Reply->StdReplyFrame));
2995		ccb->csio.scsi_status = dsc & I2O_SCSI_DEVICE_DSC_MASK;
2996		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2997		switch (dsc) {
2998
2999		case I2O_SCSI_DSC_SUCCESS:
3000			ccb->ccb_h.status |= CAM_REQ_CMP;
3001			break;
3002
3003		case I2O_SCSI_DSC_CHECK_CONDITION:
3004			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR |
3005			    CAM_AUTOSNS_VALID;
3006			break;
3007
3008		case I2O_SCSI_DSC_BUSY:
3009			/* FALLTHRU */
3010		case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3011			/* FALLTHRU */
3012		case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3013			/* FALLTHRU */
3014		case I2O_SCSI_HBA_DSC_BUS_BUSY:
3015			ccb->ccb_h.status |= CAM_SCSI_BUSY;
3016			break;
3017
3018		case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3019			ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3020			break;
3021
3022		case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3023			/* FALLTHRU */
3024		case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3025			/* FALLTHRU */
3026		case I2O_SCSI_HBA_DSC_LUN_INVALID:
3027			/* FALLTHRU */
3028		case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3029			ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3030			break;
3031
3032		case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3033			/* FALLTHRU */
3034		case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3035			ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3036			break;
3037
3038		default:
3039			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3040			break;
3041		}
3042		if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3043			ccb->csio.resid -=
3044			  I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3045			    Reply);
3046		}
3047
3048		/* Sense data in reply packet */
3049		if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3050			u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3051
3052			if (size) {
3053				if (size > sizeof(ccb->csio.sense_data)) {
3054					size = sizeof(ccb->csio.sense_data);
3055				}
3056				if (size > I2O_SCSI_SENSE_DATA_SZ) {
3057					size = I2O_SCSI_SENSE_DATA_SZ;
3058				}
3059				if ((ccb->csio.sense_len)
3060				 && (size > ccb->csio.sense_len)) {
3061					size = ccb->csio.sense_len;
3062				}
3063				bcopy(Reply->SenseData,
3064				      &(ccb->csio.sense_data), size);
3065			}
3066		}
3067
3068		/*
3069		 * Return Reply so that it can be used for the next command
3070		 * since we have no more need for it now
3071		 */
3072		asr_set_FromFIFO(sc, ReplyOffset);
3073
3074		if (ccb->ccb_h.path) {
3075			xpt_done ((union ccb *)ccb);
3076		} else {
3077			wakeup (ccb);
3078		}
3079	}
3080	return (processed);
3081} /* asr_intr */
3082
3083#undef QueueSize	/* Grrrr */
3084#undef SG_Size		/* Grrrr */
3085
3086/*
3087 *	Meant to be included at the bottom of asr.c !!!
3088 */
3089
3090/*
3091 *	Included here as hard coded. Done because other necessary include
3092 *	files utilize C++ comment structures which make them a nuisance to
3093 *	included here just to pick up these three typedefs.
3094 */
3095typedef U32   DPT_TAG_T;
3096typedef U32   DPT_MSG_T;
3097typedef U32   DPT_RTN_T;
3098
3099#undef SCSI_RESET	/* Conflicts with "scsi/scsiconf.h" defintion */
3100#include	"dev/asr/osd_unix.h"
3101
3102#define	asr_unit(dev)	  minor(dev)
3103
3104static u_int8_t ASR_ctlr_held;
3105
3106static int
3107asr_open(struct cdev *dev, int32_t flags, int32_t ifmt, struct thread *td)
3108{
3109	int		 s;
3110	int		 error;
3111
3112	if (dev->si_drv1 == NULL) {
3113		return (ENODEV);
3114	}
3115	s = splcam ();
3116	if (ASR_ctlr_held) {
3117		error = EBUSY;
3118	} else if ((error = suser(td)) == 0) {
3119		++ASR_ctlr_held;
3120	}
3121	splx(s);
3122	return (error);
3123} /* asr_open */
3124
3125static int
3126asr_close(struct cdev *dev, int flags, int ifmt, struct thread *td)
3127{
3128
3129	ASR_ctlr_held = 0;
3130	return (0);
3131} /* asr_close */
3132
3133
3134/*-------------------------------------------------------------------------*/
3135/*		      Function ASR_queue_i				   */
3136/*-------------------------------------------------------------------------*/
3137/* The Parameters Passed To This Function Are :				   */
3138/*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
3139/*     PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command	   */
3140/*	I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure	   */
3141/*									   */
3142/* This Function Will Take The User Request Packet And Convert It To An	   */
3143/* I2O MSG And Send It Off To The Adapter.				   */
3144/*									   */
3145/* Return : 0 For OK, Error Code Otherwise				   */
3146/*-------------------------------------------------------------------------*/
3147static int
3148ASR_queue_i(Asr_softc_t	*sc, PI2O_MESSAGE_FRAME	Packet)
3149{
3150	union asr_ccb				   * ccb;
3151	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply;
3152	PI2O_MESSAGE_FRAME			     Message_Ptr;
3153	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply_Ptr;
3154	int					     MessageSizeInBytes;
3155	int					     ReplySizeInBytes;
3156	int					     error;
3157	int					     s;
3158	/* Scatter Gather buffer list */
3159	struct ioctlSgList_S {
3160		SLIST_ENTRY(ioctlSgList_S) link;
3161		caddr_t			   UserSpace;
3162		I2O_FLAGS_COUNT		   FlagsCount;
3163		char			   KernelSpace[sizeof(long)];
3164	}					   * elm;
3165	/* Generates a `first' entry */
3166	SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3167
3168	if (ASR_getBlinkLedCode(sc)) {
3169		debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3170		  ASR_getBlinkLedCode(sc));
3171		return (EIO);
3172	}
3173	/* Copy in the message into a local allocation */
3174	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (
3175	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3176		debug_usr_cmd_printf (
3177		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3178		return (ENOMEM);
3179	}
3180	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3181	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3182		free(Message_Ptr, M_TEMP);
3183		debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3184		return (error);
3185	}
3186	/* Acquire information to determine type of packet */
3187	MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3188	/* The offset of the reply information within the user packet */
3189	Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3190	  + MessageSizeInBytes);
3191
3192	/* Check if the message is a synchronous initialization command */
3193	s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3194	free(Message_Ptr, M_TEMP);
3195	switch (s) {
3196
3197	case I2O_EXEC_IOP_RESET:
3198	{	U32 status;
3199
3200		status = ASR_resetIOP(sc);
3201		ReplySizeInBytes = sizeof(status);
3202		debug_usr_cmd_printf ("resetIOP done\n");
3203		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3204		  ReplySizeInBytes));
3205	}
3206
3207	case I2O_EXEC_STATUS_GET:
3208	{	I2O_EXEC_STATUS_GET_REPLY status;
3209
3210		if (ASR_getStatus(sc, &status) == NULL) {
3211			debug_usr_cmd_printf ("getStatus failed\n");
3212			return (ENXIO);
3213		}
3214		ReplySizeInBytes = sizeof(status);
3215		debug_usr_cmd_printf ("getStatus done\n");
3216		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3217		  ReplySizeInBytes));
3218	}
3219
3220	case I2O_EXEC_OUTBOUND_INIT:
3221	{	U32 status;
3222
3223		status = ASR_initOutBound(sc);
3224		ReplySizeInBytes = sizeof(status);
3225		debug_usr_cmd_printf ("intOutBound done\n");
3226		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3227		  ReplySizeInBytes));
3228	}
3229	}
3230
3231	/* Determine if the message size is valid */
3232	if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3233	 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3234		debug_usr_cmd_printf ("Packet size %d incorrect\n",
3235		  MessageSizeInBytes);
3236		return (EINVAL);
3237	}
3238
3239	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (MessageSizeInBytes,
3240	  M_TEMP, M_WAITOK)) == NULL) {
3241		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3242		  MessageSizeInBytes);
3243		return (ENOMEM);
3244	}
3245	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3246	  MessageSizeInBytes)) != 0) {
3247		free(Message_Ptr, M_TEMP);
3248		debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3249		  MessageSizeInBytes, error);
3250		return (error);
3251	}
3252
3253	/* Check the size of the reply frame, and start constructing */
3254
3255	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3256	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3257		free(Message_Ptr, M_TEMP);
3258		debug_usr_cmd_printf (
3259		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3260		return (ENOMEM);
3261	}
3262	if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3263	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3264		free(Reply_Ptr, M_TEMP);
3265		free(Message_Ptr, M_TEMP);
3266		debug_usr_cmd_printf (
3267		  "Failed to copy in reply frame, errno=%d\n",
3268		  error);
3269		return (error);
3270	}
3271	ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3272	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3273	free(Reply_Ptr, M_TEMP);
3274	if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3275		free(Message_Ptr, M_TEMP);
3276		debug_usr_cmd_printf (
3277		  "Failed to copy in reply frame[%d], errno=%d\n",
3278		  ReplySizeInBytes, error);
3279		return (EINVAL);
3280	}
3281
3282	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3283	  ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3284	    ? ReplySizeInBytes : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3285	  M_TEMP, M_WAITOK)) == NULL) {
3286		free(Message_Ptr, M_TEMP);
3287		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3288		  ReplySizeInBytes);
3289		return (ENOMEM);
3290	}
3291	(void)ASR_fillMessage((void *)Reply_Ptr, ReplySizeInBytes);
3292	Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3293	  = Message_Ptr->InitiatorContext;
3294	Reply_Ptr->StdReplyFrame.TransactionContext
3295	  = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3296	I2O_MESSAGE_FRAME_setMsgFlags(
3297	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3298	  I2O_MESSAGE_FRAME_getMsgFlags(
3299	    &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3300	      | I2O_MESSAGE_FLAGS_REPLY);
3301
3302	/* Check if the message is a special case command */
3303	switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3304	case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3305		if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3306		  Message_Ptr) & 0xF0) >> 2)) {
3307			free(Message_Ptr, M_TEMP);
3308			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3309			  &(Reply_Ptr->StdReplyFrame),
3310			  (ASR_setSysTab(sc) != CAM_REQ_CMP));
3311			I2O_MESSAGE_FRAME_setMessageSize(
3312			  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3313			  sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3314			error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3315			  ReplySizeInBytes);
3316			free(Reply_Ptr, M_TEMP);
3317			return (error);
3318		}
3319	}
3320
3321	/* Deal in the general case */
3322	/* First allocate and optionally copy in each scatter gather element */
3323	SLIST_INIT(&sgList);
3324	if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3325		PI2O_SGE_SIMPLE_ELEMENT sg;
3326
3327		/*
3328		 *	since this code is reused in several systems, code
3329		 * efficiency is greater by using a shift operation rather
3330		 * than a divide by sizeof(u_int32_t).
3331		 */
3332		sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3333		  + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3334		    >> 2));
3335		while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3336		  + MessageSizeInBytes)) {
3337			caddr_t v;
3338			int	len;
3339
3340			if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3341			 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3342				error = EINVAL;
3343				break;
3344			}
3345			len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3346			debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3347			  sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3348			  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3349				Message_Ptr) & 0xF0) >> 2)),
3350			  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3351
3352			if ((elm = (struct ioctlSgList_S *)malloc (
3353			  sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3354			  M_TEMP, M_WAITOK)) == NULL) {
3355				debug_usr_cmd_printf (
3356				  "Failed to allocate SG[%d]\n", len);
3357				error = ENOMEM;
3358				break;
3359			}
3360			SLIST_INSERT_HEAD(&sgList, elm, link);
3361			elm->FlagsCount = sg->FlagsCount;
3362			elm->UserSpace = (caddr_t)
3363			  (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3364			v = elm->KernelSpace;
3365			/* Copy in outgoing data (DIR bit could be invalid) */
3366			if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3367			  != 0) {
3368				break;
3369			}
3370			/*
3371			 *	If the buffer is not contiguous, lets
3372			 * break up the scatter/gather entries.
3373			 */
3374			while ((len > 0)
3375			 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3376			  (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3377				int next, base, span;
3378
3379				span = 0;
3380				next = base = KVTOPHYS(v);
3381				I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3382				  base);
3383
3384				/* How far can we go physically contiguously */
3385				while ((len > 0) && (base == next)) {
3386					int size;
3387
3388					next = trunc_page(base) + PAGE_SIZE;
3389					size = next - base;
3390					if (size > len) {
3391						size = len;
3392					}
3393					span += size;
3394					v += size;
3395					len -= size;
3396					base = KVTOPHYS(v);
3397				}
3398
3399				/* Construct the Flags */
3400				I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3401				  span);
3402				{
3403					int flags = I2O_FLAGS_COUNT_getFlags(
3404					  &(elm->FlagsCount));
3405					/* Any remaining length? */
3406					if (len > 0) {
3407					    flags &=
3408						~(I2O_SGL_FLAGS_END_OF_BUFFER
3409						 | I2O_SGL_FLAGS_LAST_ELEMENT);
3410					}
3411					I2O_FLAGS_COUNT_setFlags(
3412					  &(sg->FlagsCount), flags);
3413				}
3414
3415				debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3416				  sg - (PI2O_SGE_SIMPLE_ELEMENT)
3417				    ((char *)Message_Ptr
3418				  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3419					Message_Ptr) & 0xF0) >> 2)),
3420				  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3421				  span);
3422				if (len <= 0) {
3423					break;
3424				}
3425
3426				/*
3427				 * Incrementing requires resizing of the
3428				 * packet, and moving up the existing SG
3429				 * elements.
3430				 */
3431				++sg;
3432				MessageSizeInBytes += sizeof(*sg);
3433				I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3434				  I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3435				  + (sizeof(*sg) / sizeof(U32)));
3436				{
3437					PI2O_MESSAGE_FRAME NewMessage_Ptr;
3438
3439					if ((NewMessage_Ptr
3440					  = (PI2O_MESSAGE_FRAME)
3441					    malloc (MessageSizeInBytes,
3442					     M_TEMP, M_WAITOK)) == NULL) {
3443						debug_usr_cmd_printf (
3444						  "Failed to acquire frame[%d] memory\n",
3445						  MessageSizeInBytes);
3446						error = ENOMEM;
3447						break;
3448					}
3449					span = ((caddr_t)sg)
3450					     - (caddr_t)Message_Ptr;
3451					bcopy(Message_Ptr,NewMessage_Ptr, span);
3452					bcopy((caddr_t)(sg-1),
3453					  ((caddr_t)NewMessage_Ptr) + span,
3454					  MessageSizeInBytes - span);
3455					free(Message_Ptr, M_TEMP);
3456					sg = (PI2O_SGE_SIMPLE_ELEMENT)
3457					  (((caddr_t)NewMessage_Ptr) + span);
3458					Message_Ptr = NewMessage_Ptr;
3459				}
3460			}
3461			if ((error)
3462			 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3463			  & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3464				break;
3465			}
3466			++sg;
3467		}
3468		if (error) {
3469			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3470				SLIST_REMOVE_HEAD(&sgList, link);
3471				free(elm, M_TEMP);
3472			}
3473			free(Reply_Ptr, M_TEMP);
3474			free(Message_Ptr, M_TEMP);
3475			return (error);
3476		}
3477	}
3478
3479	debug_usr_cmd_printf ("Inbound: ");
3480	debug_usr_cmd_dump_message(Message_Ptr);
3481
3482	/* Send the command */
3483	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
3484		/* Free up in-kernel buffers */
3485		while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3486			SLIST_REMOVE_HEAD(&sgList, link);
3487			free(elm, M_TEMP);
3488		}
3489		free(Reply_Ptr, M_TEMP);
3490		free(Message_Ptr, M_TEMP);
3491		return (ENOMEM);
3492	}
3493
3494	/*
3495	 * We do not need any (optional byteswapping) method access to
3496	 * the Initiator context field.
3497	 */
3498	I2O_MESSAGE_FRAME_setInitiatorContext64(
3499	  (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3500
3501	(void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3502
3503	free(Message_Ptr, M_TEMP);
3504
3505	/*
3506	 * Wait for the board to report a finished instruction.
3507	 */
3508	s = splcam();
3509	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3510		if (ASR_getBlinkLedCode(sc)) {
3511			/* Reset Adapter */
3512			printf ("asr%d: Blink LED 0x%x resetting adapter\n",
3513			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3514			  ASR_getBlinkLedCode(sc));
3515			if (ASR_reset (sc) == ENXIO) {
3516				/* Command Cleanup */
3517				ASR_ccbRemove(sc, ccb);
3518			}
3519			splx(s);
3520			/* Free up in-kernel buffers */
3521			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3522				SLIST_REMOVE_HEAD(&sgList, link);
3523				free(elm, M_TEMP);
3524			}
3525			free(Reply_Ptr, M_TEMP);
3526			asr_free_ccb(ccb);
3527			return (EIO);
3528		}
3529		/* Check every second for BlinkLed */
3530		/* There is no PRICAM, but outwardly PRIBIO is functional */
3531		tsleep(ccb, PRIBIO, "asr", hz);
3532	}
3533	splx(s);
3534
3535	debug_usr_cmd_printf ("Outbound: ");
3536	debug_usr_cmd_dump_message(Reply_Ptr);
3537
3538	I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3539	  &(Reply_Ptr->StdReplyFrame),
3540	  (ccb->ccb_h.status != CAM_REQ_CMP));
3541
3542	if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3543	  - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3544		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3545		  ccb->csio.dxfer_len - ccb->csio.resid);
3546	}
3547	if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3548	 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3549	 - I2O_SCSI_SENSE_DATA_SZ))) {
3550		int size = ReplySizeInBytes
3551		  - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3552		  - I2O_SCSI_SENSE_DATA_SZ;
3553
3554		if (size > sizeof(ccb->csio.sense_data)) {
3555			size = sizeof(ccb->csio.sense_data);
3556		}
3557		bcopy(&(ccb->csio.sense_data), Reply_Ptr->SenseData, size);
3558		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3559		    Reply_Ptr, size);
3560	}
3561
3562	/* Free up in-kernel buffers */
3563	while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3564		/* Copy out as necessary */
3565		if ((error == 0)
3566		/* DIR bit considered `valid', error due to ignorance works */
3567		 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3568		  & I2O_SGL_FLAGS_DIR) == 0)) {
3569			error = copyout((caddr_t)(elm->KernelSpace),
3570			  elm->UserSpace,
3571			  I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3572		}
3573		SLIST_REMOVE_HEAD(&sgList, link);
3574		free(elm, M_TEMP);
3575	}
3576	if (error == 0) {
3577	/* Copy reply frame to user space */
3578		error = copyout((caddr_t)Reply_Ptr, (caddr_t)Reply,
3579				ReplySizeInBytes);
3580	}
3581	free(Reply_Ptr, M_TEMP);
3582	asr_free_ccb(ccb);
3583
3584	return (error);
3585} /* ASR_queue_i */
3586
3587/*----------------------------------------------------------------------*/
3588/*			    Function asr_ioctl			       */
3589/*----------------------------------------------------------------------*/
3590/* The parameters passed to this function are :				*/
3591/*     dev  : Device number.						*/
3592/*     cmd  : Ioctl Command						*/
3593/*     data : User Argument Passed In.					*/
3594/*     flag : Mode Parameter						*/
3595/*     proc : Process Parameter						*/
3596/*									*/
3597/* This function is the user interface into this adapter driver		*/
3598/*									*/
3599/* Return : zero if OK, error code if not				*/
3600/*----------------------------------------------------------------------*/
3601
3602static int
3603asr_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
3604{
3605	Asr_softc_t	*sc = dev->si_drv1;
3606	int		i, error = 0;
3607#ifdef ASR_IOCTL_COMPAT
3608	int		j;
3609#endif /* ASR_IOCTL_COMPAT */
3610
3611	if (sc != NULL)
3612	switch(cmd) {
3613
3614	case DPT_SIGNATURE:
3615#ifdef ASR_IOCTL_COMPAT
3616#if (dsDescription_size != 50)
3617	case DPT_SIGNATURE + ((50 - dsDescription_size) << 16):
3618#endif
3619		if (cmd & 0xFFFF0000) {
3620			bcopy(&ASR_sig, data, sizeof(dpt_sig_S));
3621			return (0);
3622		}
3623	/* Traditional version of the ioctl interface */
3624	case DPT_SIGNATURE & 0x0000FFFF:
3625#endif
3626		return (copyout((caddr_t)(&ASR_sig), *((caddr_t *)data),
3627				sizeof(dpt_sig_S)));
3628
3629	/* Traditional version of the ioctl interface */
3630	case DPT_CTRLINFO & 0x0000FFFF:
3631	case DPT_CTRLINFO: {
3632		struct {
3633			u_int16_t length;
3634			u_int16_t drvrHBAnum;
3635			u_int32_t baseAddr;
3636			u_int16_t blinkState;
3637			u_int8_t  pciBusNum;
3638			u_int8_t  pciDeviceNum;
3639			u_int16_t hbaFlags;
3640			u_int16_t Interrupt;
3641			u_int32_t reserved1;
3642			u_int32_t reserved2;
3643			u_int32_t reserved3;
3644		} CtlrInfo;
3645
3646		bzero(&CtlrInfo, sizeof(CtlrInfo));
3647		CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t);
3648		CtlrInfo.drvrHBAnum = asr_unit(dev);
3649		CtlrInfo.baseAddr = sc->ha_Base;
3650		i = ASR_getBlinkLedCode (sc);
3651		if (i == -1)
3652			i = 0;
3653
3654		CtlrInfo.blinkState = i;
3655		CtlrInfo.pciBusNum = sc->ha_pciBusNum;
3656		CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum;
3657#define	FLG_OSD_PCI_VALID 0x0001
3658#define	FLG_OSD_DMA	  0x0002
3659#define	FLG_OSD_I2O	  0x0004
3660		CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID|FLG_OSD_DMA|FLG_OSD_I2O;
3661		CtlrInfo.Interrupt = sc->ha_irq;
3662#ifdef ASR_IOCTL_COMPAT
3663		if (cmd & 0xffff0000)
3664			bcopy(&CtlrInfo, data, sizeof(CtlrInfo));
3665		else
3666#endif /* ASR_IOCTL_COMPAT */
3667		error = copyout(&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo));
3668	}	return (error);
3669
3670	/* Traditional version of the ioctl interface */
3671	case DPT_SYSINFO & 0x0000FFFF:
3672	case DPT_SYSINFO: {
3673		sysInfo_S	Info;
3674#ifdef ASR_IOCTL_COMPAT
3675		char	      * cp;
3676		/* Kernel Specific ptok `hack' */
3677#define		ptok(a) ((char *)(uintptr_t)(a) + KERNBASE)
3678
3679		bzero(&Info, sizeof(Info));
3680
3681		/* Appears I am the only person in the Kernel doing this */
3682		outb (0x70, 0x12);
3683		i = inb(0x71);
3684		j = i >> 4;
3685		if (i == 0x0f) {
3686			outb (0x70, 0x19);
3687			j = inb (0x71);
3688		}
3689		Info.drive0CMOS = j;
3690
3691		j = i & 0x0f;
3692		if (i == 0x0f) {
3693			outb (0x70, 0x1a);
3694			j = inb (0x71);
3695		}
3696		Info.drive1CMOS = j;
3697
3698		Info.numDrives = *((char *)ptok(0x475));
3699#endif /* ASR_IOCTL_COMPAT */
3700
3701		bzero(&Info, sizeof(Info));
3702
3703		Info.processorFamily = ASR_sig.dsProcessorFamily;
3704#if defined(__i386__)
3705		switch (cpu) {
3706		case CPU_386SX: case CPU_386:
3707			Info.processorType = PROC_386; break;
3708		case CPU_486SX: case CPU_486:
3709			Info.processorType = PROC_486; break;
3710		case CPU_586:
3711			Info.processorType = PROC_PENTIUM; break;
3712		case CPU_686:
3713			Info.processorType = PROC_SEXIUM; break;
3714		}
3715#elif defined(__alpha__)
3716		Info.processorType = PROC_ALPHA;
3717#endif
3718
3719		Info.osType = OS_BSDI_UNIX;
3720		Info.osMajorVersion = osrelease[0] - '0';
3721		Info.osMinorVersion = osrelease[2] - '0';
3722		/* Info.osRevision = 0; */
3723		/* Info.osSubRevision = 0; */
3724		Info.busType = SI_PCI_BUS;
3725		Info.flags = SI_OSversionValid|SI_BusTypeValid|SI_NO_SmartROM;
3726
3727#ifdef ASR_IOCTL_COMPAT
3728		Info.flags |= SI_CMOS_Valid | SI_NumDrivesValid;
3729		/* Go Out And Look For I2O SmartROM */
3730		for(j = 0xC8000; j < 0xE0000; j += 2048) {
3731			int k;
3732
3733			cp = ptok(j);
3734			if (*((unsigned short *)cp) != 0xAA55) {
3735				continue;
3736			}
3737			j += (cp[2] * 512) - 2048;
3738			if ((*((u_long *)(cp + 6))
3739			  != ('S' + (' ' * 256) + (' ' * 65536L)))
3740			 || (*((u_long *)(cp + 10))
3741			  != ('I' + ('2' * 256) + ('0' * 65536L)))) {
3742				continue;
3743			}
3744			cp += 0x24;
3745			for (k = 0; k < 64; ++k) {
3746				if (*((unsigned short *)cp)
3747				 == (' ' + ('v' * 256))) {
3748					break;
3749				}
3750			}
3751			if (k < 64) {
3752				Info.smartROMMajorVersion
3753				    = *((unsigned char *)(cp += 4)) - '0';
3754				Info.smartROMMinorVersion
3755				    = *((unsigned char *)(cp += 2));
3756				Info.smartROMRevision
3757				    = *((unsigned char *)(++cp));
3758				Info.flags |= SI_SmartROMverValid;
3759				Info.flags &= ~SI_NO_SmartROM;
3760				break;
3761			}
3762		}
3763		/* Get The Conventional Memory Size From CMOS */
3764		outb (0x70, 0x16);
3765		j = inb (0x71);
3766		j <<= 8;
3767		outb (0x70, 0x15);
3768		j |= inb(0x71);
3769		Info.conventionalMemSize = j;
3770
3771		/* Get The Extended Memory Found At Power On From CMOS */
3772		outb (0x70, 0x31);
3773		j = inb (0x71);
3774		j <<= 8;
3775		outb (0x70, 0x30);
3776		j |= inb(0x71);
3777		Info.extendedMemSize = j;
3778		Info.flags |= SI_MemorySizeValid;
3779
3780		/* Copy Out The Info Structure To The User */
3781		if (cmd & 0xFFFF0000)
3782			bcopy(&Info, data, sizeof(Info));
3783		else
3784#endif /* ASR_IOCTL_COMPAT */
3785		error = copyout(&Info, *(caddr_t *)data, sizeof(Info));
3786		return (error); }
3787
3788		/* Get The BlinkLED State */
3789	case DPT_BLINKLED:
3790		i = ASR_getBlinkLedCode (sc);
3791		if (i == -1)
3792			i = 0;
3793#ifdef ASR_IOCTL_COMPAT
3794		if (cmd & 0xffff0000)
3795			bcopy(&i, data, sizeof(i));
3796		else
3797#endif /* ASR_IOCTL_COMPAT */
3798		error = copyout(&i, *(caddr_t *)data, sizeof(i));
3799		break;
3800
3801		/* Send an I2O command */
3802	case I2OUSRCMD:
3803		return (ASR_queue_i(sc, *((PI2O_MESSAGE_FRAME *)data)));
3804
3805		/* Reset and re-initialize the adapter */
3806	case I2ORESETCMD:
3807		return (ASR_reset(sc));
3808
3809		/* Rescan the LCT table and resynchronize the information */
3810	case I2ORESCANCMD:
3811		return (ASR_rescan(sc));
3812	}
3813	return (EINVAL);
3814} /* asr_ioctl */
3815