1/*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2001 Scott Long
4 * Copyright (c) 2000 BSDi
5 * Copyright (c) 2001 Adaptec, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD$");
32
33/*
34 * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters.
35 */
36#define AAC_DRIVERNAME			"aac"
37
38#include "opt_aac.h"
39
40/* #include <stddef.h> */
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/malloc.h>
44#include <sys/kernel.h>
45#include <sys/kthread.h>
46#include <sys/sysctl.h>
47#include <sys/poll.h>
48#include <sys/ioccom.h>
49
50#include <sys/bus.h>
51#include <sys/conf.h>
52#include <sys/signalvar.h>
53#include <sys/time.h>
54#include <sys/eventhandler.h>
55#include <sys/rman.h>
56
57#include <machine/bus.h>
58#include <sys/bus_dma.h>
59#include <machine/resource.h>
60
61#include <dev/pci/pcireg.h>
62#include <dev/pci/pcivar.h>
63
64#include <dev/aac/aacreg.h>
65#include <sys/aac_ioctl.h>
66#include <dev/aac/aacvar.h>
67#include <dev/aac/aac_tables.h>
68
69static void	aac_startup(void *arg);
70static void	aac_add_container(struct aac_softc *sc,
71				  struct aac_mntinforesp *mir, int f);
72static void	aac_get_bus_info(struct aac_softc *sc);
73static void	aac_daemon(void *arg);
74
75/* Command Processing */
76static void	aac_timeout(struct aac_softc *sc);
77static void	aac_complete(void *context, int pending);
78static int	aac_bio_command(struct aac_softc *sc, struct aac_command **cmp);
79static void	aac_bio_complete(struct aac_command *cm);
80static int	aac_wait_command(struct aac_command *cm);
81static void	aac_command_thread(struct aac_softc *sc);
82
83/* Command Buffer Management */
84static void	aac_map_command_sg(void *arg, bus_dma_segment_t *segs,
85				   int nseg, int error);
86static void	aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
87				       int nseg, int error);
88static int	aac_alloc_commands(struct aac_softc *sc);
89static void	aac_free_commands(struct aac_softc *sc);
90static void	aac_unmap_command(struct aac_command *cm);
91
92/* Hardware Interface */
93static int	aac_alloc(struct aac_softc *sc);
94static void	aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
95			       int error);
96static int	aac_check_firmware(struct aac_softc *sc);
97static int	aac_init(struct aac_softc *sc);
98static int	aac_sync_command(struct aac_softc *sc, u_int32_t command,
99				 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2,
100				 u_int32_t arg3, u_int32_t *sp);
101static int	aac_setup_intr(struct aac_softc *sc);
102static int	aac_enqueue_fib(struct aac_softc *sc, int queue,
103				struct aac_command *cm);
104static int	aac_dequeue_fib(struct aac_softc *sc, int queue,
105				u_int32_t *fib_size, struct aac_fib **fib_addr);
106static int	aac_enqueue_response(struct aac_softc *sc, int queue,
107				     struct aac_fib *fib);
108
109/* StrongARM interface */
110static int	aac_sa_get_fwstatus(struct aac_softc *sc);
111static void	aac_sa_qnotify(struct aac_softc *sc, int qbit);
112static int	aac_sa_get_istatus(struct aac_softc *sc);
113static void	aac_sa_clear_istatus(struct aac_softc *sc, int mask);
114static void	aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command,
115				   u_int32_t arg0, u_int32_t arg1,
116				   u_int32_t arg2, u_int32_t arg3);
117static int	aac_sa_get_mailbox(struct aac_softc *sc, int mb);
118static void	aac_sa_set_interrupts(struct aac_softc *sc, int enable);
119
120const struct aac_interface aac_sa_interface = {
121	aac_sa_get_fwstatus,
122	aac_sa_qnotify,
123	aac_sa_get_istatus,
124	aac_sa_clear_istatus,
125	aac_sa_set_mailbox,
126	aac_sa_get_mailbox,
127	aac_sa_set_interrupts,
128	NULL, NULL, NULL
129};
130
131/* i960Rx interface */
132static int	aac_rx_get_fwstatus(struct aac_softc *sc);
133static void	aac_rx_qnotify(struct aac_softc *sc, int qbit);
134static int	aac_rx_get_istatus(struct aac_softc *sc);
135static void	aac_rx_clear_istatus(struct aac_softc *sc, int mask);
136static void	aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command,
137				   u_int32_t arg0, u_int32_t arg1,
138				   u_int32_t arg2, u_int32_t arg3);
139static int	aac_rx_get_mailbox(struct aac_softc *sc, int mb);
140static void	aac_rx_set_interrupts(struct aac_softc *sc, int enable);
141static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm);
142static int aac_rx_get_outb_queue(struct aac_softc *sc);
143static void aac_rx_set_outb_queue(struct aac_softc *sc, int index);
144
145const struct aac_interface aac_rx_interface = {
146	aac_rx_get_fwstatus,
147	aac_rx_qnotify,
148	aac_rx_get_istatus,
149	aac_rx_clear_istatus,
150	aac_rx_set_mailbox,
151	aac_rx_get_mailbox,
152	aac_rx_set_interrupts,
153	aac_rx_send_command,
154	aac_rx_get_outb_queue,
155	aac_rx_set_outb_queue
156};
157
158/* Rocket/MIPS interface */
159static int	aac_rkt_get_fwstatus(struct aac_softc *sc);
160static void	aac_rkt_qnotify(struct aac_softc *sc, int qbit);
161static int	aac_rkt_get_istatus(struct aac_softc *sc);
162static void	aac_rkt_clear_istatus(struct aac_softc *sc, int mask);
163static void	aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command,
164				    u_int32_t arg0, u_int32_t arg1,
165				    u_int32_t arg2, u_int32_t arg3);
166static int	aac_rkt_get_mailbox(struct aac_softc *sc, int mb);
167static void	aac_rkt_set_interrupts(struct aac_softc *sc, int enable);
168static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm);
169static int aac_rkt_get_outb_queue(struct aac_softc *sc);
170static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index);
171
172const struct aac_interface aac_rkt_interface = {
173	aac_rkt_get_fwstatus,
174	aac_rkt_qnotify,
175	aac_rkt_get_istatus,
176	aac_rkt_clear_istatus,
177	aac_rkt_set_mailbox,
178	aac_rkt_get_mailbox,
179	aac_rkt_set_interrupts,
180	aac_rkt_send_command,
181	aac_rkt_get_outb_queue,
182	aac_rkt_set_outb_queue
183};
184
185/* Debugging and Diagnostics */
186static void		aac_describe_controller(struct aac_softc *sc);
187static const char	*aac_describe_code(const struct aac_code_lookup *table,
188				   u_int32_t code);
189
190/* Management Interface */
191static d_open_t		aac_open;
192static d_ioctl_t	aac_ioctl;
193static d_poll_t		aac_poll;
194static void		aac_cdevpriv_dtor(void *arg);
195static int		aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
196static int		aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
197static void		aac_handle_aif(struct aac_softc *sc,
198					   struct aac_fib *fib);
199static int		aac_rev_check(struct aac_softc *sc, caddr_t udata);
200static int		aac_open_aif(struct aac_softc *sc, caddr_t arg);
201static int		aac_close_aif(struct aac_softc *sc, caddr_t arg);
202static int		aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
203static int		aac_return_aif(struct aac_softc *sc,
204					struct aac_fib_context *ctx, caddr_t uptr);
205static int		aac_query_disk(struct aac_softc *sc, caddr_t uptr);
206static int		aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
207static int		aac_supported_features(struct aac_softc *sc, caddr_t uptr);
208static void		aac_ioctl_event(struct aac_softc *sc,
209					struct aac_event *event, void *arg);
210static struct aac_mntinforesp *
211	aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid);
212
213static struct cdevsw aac_cdevsw = {
214	.d_version =	D_VERSION,
215	.d_flags =	D_NEEDGIANT,
216	.d_open =	aac_open,
217	.d_ioctl =	aac_ioctl,
218	.d_poll =	aac_poll,
219	.d_name =	"aac",
220};
221
222static MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver");
223
224/* sysctl node */
225SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD, 0, "AAC driver parameters");
226
227/*
228 * Device Interface
229 */
230
231/*
232 * Initialize the controller and softc
233 */
234int
235aac_attach(struct aac_softc *sc)
236{
237	int error, unit;
238
239	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
240
241	/*
242	 * Initialize per-controller queues.
243	 */
244	aac_initq_free(sc);
245	aac_initq_ready(sc);
246	aac_initq_busy(sc);
247	aac_initq_bio(sc);
248
249	/*
250	 * Initialize command-completion task.
251	 */
252	TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc);
253
254	/* mark controller as suspended until we get ourselves organised */
255	sc->aac_state |= AAC_STATE_SUSPEND;
256
257	/*
258	 * Check that the firmware on the card is supported.
259	 */
260	if ((error = aac_check_firmware(sc)) != 0)
261		return(error);
262
263	/*
264	 * Initialize locks
265	 */
266	mtx_init(&sc->aac_aifq_lock, "AAC AIF lock", NULL, MTX_DEF);
267	mtx_init(&sc->aac_io_lock, "AAC I/O lock", NULL, MTX_DEF);
268	mtx_init(&sc->aac_container_lock, "AAC container lock", NULL, MTX_DEF);
269	TAILQ_INIT(&sc->aac_container_tqh);
270	TAILQ_INIT(&sc->aac_ev_cmfree);
271
272	/* Initialize the clock daemon callout. */
273	callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
274
275	/*
276	 * Initialize the adapter.
277	 */
278	if ((error = aac_alloc(sc)) != 0)
279		return(error);
280	if ((error = aac_init(sc)) != 0)
281		return(error);
282
283	/*
284	 * Allocate and connect our interrupt.
285	 */
286	if ((error = aac_setup_intr(sc)) != 0)
287		return(error);
288
289	/*
290	 * Print a little information about the controller.
291	 */
292	aac_describe_controller(sc);
293
294	/*
295	 * Add sysctls.
296	 */
297	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->aac_dev),
298	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->aac_dev)),
299	    OID_AUTO, "firmware_build", CTLFLAG_RD,
300	    &sc->aac_revision.buildNumber, 0,
301	    "firmware build number");
302
303	/*
304	 * Register to probe our containers later.
305	 */
306	sc->aac_ich.ich_func = aac_startup;
307	sc->aac_ich.ich_arg = sc;
308	if (config_intrhook_establish(&sc->aac_ich) != 0) {
309		device_printf(sc->aac_dev,
310			      "can't establish configuration hook\n");
311		return(ENXIO);
312	}
313
314	/*
315	 * Make the control device.
316	 */
317	unit = device_get_unit(sc->aac_dev);
318	sc->aac_dev_t = make_dev(&aac_cdevsw, unit, UID_ROOT, GID_OPERATOR,
319				 0640, "aac%d", unit);
320	(void)make_dev_alias(sc->aac_dev_t, "afa%d", unit);
321	(void)make_dev_alias(sc->aac_dev_t, "hpn%d", unit);
322	sc->aac_dev_t->si_drv1 = sc;
323
324	/* Create the AIF thread */
325	if (kproc_create((void(*)(void *))aac_command_thread, sc,
326		   &sc->aifthread, 0, 0, "aac%daif", unit))
327		panic("Could not create AIF thread");
328
329	/* Register the shutdown method to only be called post-dump */
330	if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aac_shutdown,
331	    sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
332		device_printf(sc->aac_dev,
333			      "shutdown event registration failed\n");
334
335	/* Register with CAM for the non-DASD devices */
336	if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) {
337		TAILQ_INIT(&sc->aac_sim_tqh);
338		aac_get_bus_info(sc);
339	}
340
341	mtx_lock(&sc->aac_io_lock);
342	callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
343	mtx_unlock(&sc->aac_io_lock);
344
345	return(0);
346}
347
348static void
349aac_daemon(void *arg)
350{
351	struct timeval tv;
352	struct aac_softc *sc;
353	struct aac_fib *fib;
354
355	sc = arg;
356	mtx_assert(&sc->aac_io_lock, MA_OWNED);
357
358	if (callout_pending(&sc->aac_daemontime) ||
359	    callout_active(&sc->aac_daemontime) == 0)
360		return;
361	getmicrotime(&tv);
362	aac_alloc_sync_fib(sc, &fib);
363	*(uint32_t *)fib->data = tv.tv_sec;
364	aac_sync_fib(sc, SendHostTime, 0, fib, sizeof(uint32_t));
365	aac_release_sync_fib(sc);
366	callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
367}
368
369void
370aac_add_event(struct aac_softc *sc, struct aac_event *event)
371{
372
373	switch (event->ev_type & AAC_EVENT_MASK) {
374	case AAC_EVENT_CMFREE:
375		TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
376		break;
377	default:
378		device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
379		    event->ev_type);
380		break;
381	}
382}
383
384/*
385 * Request information of container #cid
386 */
387static struct aac_mntinforesp *
388aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid)
389{
390	struct aac_mntinfo *mi;
391
392	mi = (struct aac_mntinfo *)&fib->data[0];
393	/* use 64-bit LBA if enabled */
394	mi->Command = (sc->flags & AAC_FLAGS_LBA_64BIT) ?
395	    VM_NameServe64 : VM_NameServe;
396	mi->MntType = FT_FILESYS;
397	mi->MntCount = cid;
398
399	if (aac_sync_fib(sc, ContainerCommand, 0, fib,
400			 sizeof(struct aac_mntinfo))) {
401		device_printf(sc->aac_dev, "Error probing container %d\n", cid);
402		return (NULL);
403	}
404
405	return ((struct aac_mntinforesp *)&fib->data[0]);
406}
407
408/*
409 * Probe for containers, create disks.
410 */
411static void
412aac_startup(void *arg)
413{
414	struct aac_softc *sc;
415	struct aac_fib *fib;
416	struct aac_mntinforesp *mir;
417	int count = 0, i = 0;
418
419	sc = (struct aac_softc *)arg;
420	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
421
422	/* disconnect ourselves from the intrhook chain */
423	config_intrhook_disestablish(&sc->aac_ich);
424
425	mtx_lock(&sc->aac_io_lock);
426	aac_alloc_sync_fib(sc, &fib);
427
428	/* loop over possible containers */
429	do {
430		if ((mir = aac_get_container_info(sc, fib, i)) == NULL)
431			continue;
432		if (i == 0)
433			count = mir->MntRespCount;
434		aac_add_container(sc, mir, 0);
435		i++;
436	} while ((i < count) && (i < AAC_MAX_CONTAINERS));
437
438	aac_release_sync_fib(sc);
439	mtx_unlock(&sc->aac_io_lock);
440
441	/* poke the bus to actually attach the child devices */
442	if (bus_generic_attach(sc->aac_dev))
443		device_printf(sc->aac_dev, "bus_generic_attach failed\n");
444
445	/* mark the controller up */
446	sc->aac_state &= ~AAC_STATE_SUSPEND;
447
448	/* enable interrupts now */
449	AAC_UNMASK_INTERRUPTS(sc);
450}
451
452/*
453 * Create a device to represent a new container
454 */
455static void
456aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f)
457{
458	struct aac_container *co;
459	device_t child;
460
461	/*
462	 * Check container volume type for validity.  Note that many of
463	 * the possible types may never show up.
464	 */
465	if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
466		co = (struct aac_container *)malloc(sizeof *co, M_AACBUF,
467		       M_NOWAIT | M_ZERO);
468		if (co == NULL)
469			panic("Out of memory?!");
470		fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "id %x  name '%.16s'  size %u  type %d",
471		      mir->MntTable[0].ObjectId,
472		      mir->MntTable[0].FileSystemName,
473		      mir->MntTable[0].Capacity, mir->MntTable[0].VolType);
474
475		if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL)
476			device_printf(sc->aac_dev, "device_add_child failed\n");
477		else
478			device_set_ivars(child, co);
479		device_set_desc(child, aac_describe_code(aac_container_types,
480				mir->MntTable[0].VolType));
481		co->co_disk = child;
482		co->co_found = f;
483		bcopy(&mir->MntTable[0], &co->co_mntobj,
484		      sizeof(struct aac_mntobj));
485		mtx_lock(&sc->aac_container_lock);
486		TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
487		mtx_unlock(&sc->aac_container_lock);
488	}
489}
490
491/*
492 * Allocate resources associated with (sc)
493 */
494static int
495aac_alloc(struct aac_softc *sc)
496{
497
498	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
499
500	/*
501	 * Create DMA tag for mapping buffers into controller-addressable space.
502	 */
503	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
504			       1, 0, 			/* algnmnt, boundary */
505			       (sc->flags & AAC_FLAGS_SG_64BIT) ?
506			       BUS_SPACE_MAXADDR :
507			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
508			       BUS_SPACE_MAXADDR, 	/* highaddr */
509			       NULL, NULL, 		/* filter, filterarg */
510			       MAXBSIZE,		/* maxsize */
511			       sc->aac_sg_tablesize,	/* nsegments */
512			       MAXBSIZE,		/* maxsegsize */
513			       BUS_DMA_ALLOCNOW,	/* flags */
514			       busdma_lock_mutex,	/* lockfunc */
515			       &sc->aac_io_lock,	/* lockfuncarg */
516			       &sc->aac_buffer_dmat)) {
517		device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
518		return (ENOMEM);
519	}
520
521	/*
522	 * Create DMA tag for mapping FIBs into controller-addressable space..
523	 */
524	if (bus_dma_tag_create(sc->aac_parent_dmat,	/* parent */
525			       1, 0, 			/* algnmnt, boundary */
526			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
527			       BUS_SPACE_MAXADDR_32BIT :
528			       0x7fffffff,		/* lowaddr */
529			       BUS_SPACE_MAXADDR, 	/* highaddr */
530			       NULL, NULL, 		/* filter, filterarg */
531			       sc->aac_max_fibs_alloc *
532			       sc->aac_max_fib_size,  /* maxsize */
533			       1,			/* nsegments */
534			       sc->aac_max_fibs_alloc *
535			       sc->aac_max_fib_size,	/* maxsize */
536			       0,			/* flags */
537			       NULL, NULL,		/* No locking needed */
538			       &sc->aac_fib_dmat)) {
539		device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
540		return (ENOMEM);
541	}
542
543	/*
544	 * Create DMA tag for the common structure and allocate it.
545	 */
546	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
547			       1, 0,			/* algnmnt, boundary */
548			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
549			       BUS_SPACE_MAXADDR_32BIT :
550			       0x7fffffff,		/* lowaddr */
551			       BUS_SPACE_MAXADDR, 	/* highaddr */
552			       NULL, NULL, 		/* filter, filterarg */
553			       8192 + sizeof(struct aac_common), /* maxsize */
554			       1,			/* nsegments */
555			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
556			       0,			/* flags */
557			       NULL, NULL,		/* No locking needed */
558			       &sc->aac_common_dmat)) {
559		device_printf(sc->aac_dev,
560			      "can't allocate common structure DMA tag\n");
561		return (ENOMEM);
562	}
563	if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
564			     BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
565		device_printf(sc->aac_dev, "can't allocate common structure\n");
566		return (ENOMEM);
567	}
568
569	/*
570	 * Work around a bug in the 2120 and 2200 that cannot DMA commands
571	 * below address 8192 in physical memory.
572	 * XXX If the padding is not needed, can it be put to use instead
573	 * of ignored?
574	 */
575	(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
576			sc->aac_common, 8192 + sizeof(*sc->aac_common),
577			aac_common_map, sc, 0);
578
579	if (sc->aac_common_busaddr < 8192) {
580		sc->aac_common = (struct aac_common *)
581		    ((uint8_t *)sc->aac_common + 8192);
582		sc->aac_common_busaddr += 8192;
583	}
584	bzero(sc->aac_common, sizeof(*sc->aac_common));
585
586	/* Allocate some FIBs and associated command structs */
587	TAILQ_INIT(&sc->aac_fibmap_tqh);
588	sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
589				  M_AACBUF, M_WAITOK|M_ZERO);
590	while (sc->total_fibs < sc->aac_max_fibs) {
591		if (aac_alloc_commands(sc) != 0)
592			break;
593	}
594	if (sc->total_fibs == 0)
595		return (ENOMEM);
596
597	return (0);
598}
599
600/*
601 * Free all of the resources associated with (sc)
602 *
603 * Should not be called if the controller is active.
604 */
605void
606aac_free(struct aac_softc *sc)
607{
608
609	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
610
611	/* remove the control device */
612	if (sc->aac_dev_t != NULL)
613		destroy_dev(sc->aac_dev_t);
614
615	/* throw away any FIB buffers, discard the FIB DMA tag */
616	aac_free_commands(sc);
617	if (sc->aac_fib_dmat)
618		bus_dma_tag_destroy(sc->aac_fib_dmat);
619
620	free(sc->aac_commands, M_AACBUF);
621
622	/* destroy the common area */
623	if (sc->aac_common) {
624		bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
625		bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
626				sc->aac_common_dmamap);
627	}
628	if (sc->aac_common_dmat)
629		bus_dma_tag_destroy(sc->aac_common_dmat);
630
631	/* disconnect the interrupt handler */
632	if (sc->aac_intr)
633		bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr);
634	if (sc->aac_irq != NULL) {
635		bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
636		    rman_get_rid(sc->aac_irq), sc->aac_irq);
637		pci_release_msi(sc->aac_dev);
638	}
639
640	/* destroy data-transfer DMA tag */
641	if (sc->aac_buffer_dmat)
642		bus_dma_tag_destroy(sc->aac_buffer_dmat);
643
644	/* destroy the parent DMA tag */
645	if (sc->aac_parent_dmat)
646		bus_dma_tag_destroy(sc->aac_parent_dmat);
647
648	/* release the register window mapping */
649	if (sc->aac_regs_res0 != NULL)
650		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
651		    rman_get_rid(sc->aac_regs_res0), sc->aac_regs_res0);
652	if (sc->aac_hwif == AAC_HWIF_NARK && sc->aac_regs_res1 != NULL)
653		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
654		    rman_get_rid(sc->aac_regs_res1), sc->aac_regs_res1);
655}
656
657/*
658 * Disconnect from the controller completely, in preparation for unload.
659 */
660int
661aac_detach(device_t dev)
662{
663	struct aac_softc *sc;
664	struct aac_container *co;
665	struct aac_sim	*sim;
666	int error;
667
668	sc = device_get_softc(dev);
669	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
670
671	callout_drain(&sc->aac_daemontime);
672
673	mtx_lock(&sc->aac_io_lock);
674	while (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
675		sc->aifflags |= AAC_AIFFLAGS_EXIT;
676		wakeup(sc->aifthread);
677		msleep(sc->aac_dev, &sc->aac_io_lock, PUSER, "aacdch", 0);
678	}
679	mtx_unlock(&sc->aac_io_lock);
680	KASSERT((sc->aifflags & AAC_AIFFLAGS_RUNNING) == 0,
681	    ("%s: invalid detach state", __func__));
682
683	/* Remove the child containers */
684	while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
685		error = device_delete_child(dev, co->co_disk);
686		if (error)
687			return (error);
688		TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
689		free(co, M_AACBUF);
690	}
691
692	/* Remove the CAM SIMs */
693	while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
694		TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
695		error = device_delete_child(dev, sim->sim_dev);
696		if (error)
697			return (error);
698		free(sim, M_AACBUF);
699	}
700
701	if ((error = aac_shutdown(dev)))
702		return(error);
703
704	EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
705
706	aac_free(sc);
707
708	mtx_destroy(&sc->aac_aifq_lock);
709	mtx_destroy(&sc->aac_io_lock);
710	mtx_destroy(&sc->aac_container_lock);
711
712	return(0);
713}
714
715/*
716 * Bring the controller down to a dormant state and detach all child devices.
717 *
718 * This function is called before detach or system shutdown.
719 *
720 * Note that we can assume that the bioq on the controller is empty, as we won't
721 * allow shutdown if any device is open.
722 */
723int
724aac_shutdown(device_t dev)
725{
726	struct aac_softc *sc;
727	struct aac_fib *fib;
728	struct aac_close_command *cc;
729
730	sc = device_get_softc(dev);
731	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
732
733	sc->aac_state |= AAC_STATE_SUSPEND;
734
735	/*
736	 * Send a Container shutdown followed by a HostShutdown FIB to the
737	 * controller to convince it that we don't want to talk to it anymore.
738	 * We've been closed and all I/O completed already
739	 */
740	device_printf(sc->aac_dev, "shutting down controller...");
741
742	mtx_lock(&sc->aac_io_lock);
743	aac_alloc_sync_fib(sc, &fib);
744	cc = (struct aac_close_command *)&fib->data[0];
745
746	bzero(cc, sizeof(struct aac_close_command));
747	cc->Command = VM_CloseAll;
748	cc->ContainerId = 0xffffffff;
749	if (aac_sync_fib(sc, ContainerCommand, 0, fib,
750	    sizeof(struct aac_close_command)))
751		printf("FAILED.\n");
752	else
753		printf("done\n");
754#if 0
755	else {
756		fib->data[0] = 0;
757		/*
758		 * XXX Issuing this command to the controller makes it shut down
759		 * but also keeps it from coming back up without a reset of the
760		 * PCI bus.  This is not desirable if you are just unloading the
761		 * driver module with the intent to reload it later.
762		 */
763		if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN,
764		    fib, 1)) {
765			printf("FAILED.\n");
766		} else {
767			printf("done.\n");
768		}
769	}
770#endif
771
772	AAC_MASK_INTERRUPTS(sc);
773	aac_release_sync_fib(sc);
774	mtx_unlock(&sc->aac_io_lock);
775
776	return(0);
777}
778
779/*
780 * Bring the controller to a quiescent state, ready for system suspend.
781 */
782int
783aac_suspend(device_t dev)
784{
785	struct aac_softc *sc;
786
787	sc = device_get_softc(dev);
788
789	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
790	sc->aac_state |= AAC_STATE_SUSPEND;
791
792	AAC_MASK_INTERRUPTS(sc);
793	return(0);
794}
795
796/*
797 * Bring the controller back to a state ready for operation.
798 */
799int
800aac_resume(device_t dev)
801{
802	struct aac_softc *sc;
803
804	sc = device_get_softc(dev);
805
806	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
807	sc->aac_state &= ~AAC_STATE_SUSPEND;
808	AAC_UNMASK_INTERRUPTS(sc);
809	return(0);
810}
811
812/*
813 * Interrupt handler for NEW_COMM interface.
814 */
815void
816aac_new_intr(void *arg)
817{
818	struct aac_softc *sc;
819	u_int32_t index, fast;
820	struct aac_command *cm;
821	struct aac_fib *fib;
822	int i;
823
824	sc = (struct aac_softc *)arg;
825
826	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
827	mtx_lock(&sc->aac_io_lock);
828	while (1) {
829		index = AAC_GET_OUTB_QUEUE(sc);
830		if (index == 0xffffffff)
831			index = AAC_GET_OUTB_QUEUE(sc);
832		if (index == 0xffffffff)
833			break;
834		if (index & 2) {
835			if (index == 0xfffffffe) {
836				/* XXX This means that the controller wants
837				 * more work.  Ignore it for now.
838				 */
839				continue;
840			}
841			/* AIF */
842			fib = (struct aac_fib *)malloc(sizeof *fib, M_AACBUF,
843				   M_NOWAIT | M_ZERO);
844			if (fib == NULL) {
845				/* If we're really this short on memory,
846				 * hopefully breaking out of the handler will
847				 * allow something to get freed.  This
848				 * actually sucks a whole lot.
849				 */
850				break;
851			}
852			index &= ~2;
853			for (i = 0; i < sizeof(struct aac_fib)/4; ++i)
854				((u_int32_t *)fib)[i] = AAC_MEM1_GETREG4(sc, index + i*4);
855			aac_handle_aif(sc, fib);
856			free(fib, M_AACBUF);
857
858			/*
859			 * AIF memory is owned by the adapter, so let it
860			 * know that we are done with it.
861			 */
862			AAC_SET_OUTB_QUEUE(sc, index);
863			AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY);
864		} else {
865			fast = index & 1;
866			cm = sc->aac_commands + (index >> 2);
867			fib = cm->cm_fib;
868			if (fast) {
869				fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
870				*((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL;
871			}
872			aac_remove_busy(cm);
873 			aac_unmap_command(cm);
874			cm->cm_flags |= AAC_CMD_COMPLETED;
875
876			/* is there a completion handler? */
877			if (cm->cm_complete != NULL) {
878				cm->cm_complete(cm);
879			} else {
880				/* assume that someone is sleeping on this
881				 * command
882				 */
883				wakeup(cm);
884			}
885			sc->flags &= ~AAC_QUEUE_FRZN;
886		}
887	}
888	/* see if we can start some more I/O */
889	if ((sc->flags & AAC_QUEUE_FRZN) == 0)
890		aac_startio(sc);
891
892	mtx_unlock(&sc->aac_io_lock);
893}
894
895/*
896 * Interrupt filter for !NEW_COMM interface.
897 */
898int
899aac_filter(void *arg)
900{
901	struct aac_softc *sc;
902	u_int16_t reason;
903
904	sc = (struct aac_softc *)arg;
905
906	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
907	/*
908	 * Read the status register directly.  This is faster than taking the
909	 * driver lock and reading the queues directly.  It also saves having
910	 * to turn parts of the driver lock into a spin mutex, which would be
911	 * ugly.
912	 */
913	reason = AAC_GET_ISTATUS(sc);
914	AAC_CLEAR_ISTATUS(sc, reason);
915
916	/* handle completion processing */
917	if (reason & AAC_DB_RESPONSE_READY)
918		taskqueue_enqueue_fast(taskqueue_fast, &sc->aac_task_complete);
919
920	/* controller wants to talk to us */
921	if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) {
922		/*
923		 * XXX Make sure that we don't get fooled by strange messages
924		 * that start with a NULL.
925		 */
926		if ((reason & AAC_DB_PRINTF) &&
927			(sc->aac_common->ac_printf[0] == 0))
928			sc->aac_common->ac_printf[0] = 32;
929
930		/*
931		 * This might miss doing the actual wakeup.  However, the
932		 * msleep that this is waking up has a timeout, so it will
933		 * wake up eventually.  AIFs and printfs are low enough
934		 * priority that they can handle hanging out for a few seconds
935		 * if needed.
936		 */
937		wakeup(sc->aifthread);
938	}
939	return (FILTER_HANDLED);
940}
941
942/*
943 * Command Processing
944 */
945
946/*
947 * Start as much queued I/O as possible on the controller
948 */
949void
950aac_startio(struct aac_softc *sc)
951{
952	struct aac_command *cm;
953	int error;
954
955	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
956
957	for (;;) {
958		/*
959		 * This flag might be set if the card is out of resources.
960		 * Checking it here prevents an infinite loop of deferrals.
961		 */
962		if (sc->flags & AAC_QUEUE_FRZN)
963			break;
964
965		/*
966		 * Try to get a command that's been put off for lack of
967		 * resources
968		 */
969		cm = aac_dequeue_ready(sc);
970
971		/*
972		 * Try to build a command off the bio queue (ignore error
973		 * return)
974		 */
975		if (cm == NULL)
976			aac_bio_command(sc, &cm);
977
978		/* nothing to do? */
979		if (cm == NULL)
980			break;
981
982		/* don't map more than once */
983		if (cm->cm_flags & AAC_CMD_MAPPED)
984			panic("aac: command %p already mapped", cm);
985
986		/*
987		 * Set up the command to go to the controller.  If there are no
988		 * data buffers associated with the command then it can bypass
989		 * busdma.
990		 */
991		if (cm->cm_datalen != 0) {
992			if (cm->cm_flags & AAC_REQ_BIO)
993				error = bus_dmamap_load_bio(
994				    sc->aac_buffer_dmat, cm->cm_datamap,
995				    (struct bio *)cm->cm_private,
996				    aac_map_command_sg, cm, 0);
997			else
998				error = bus_dmamap_load(sc->aac_buffer_dmat,
999				    cm->cm_datamap, cm->cm_data,
1000				    cm->cm_datalen, aac_map_command_sg, cm, 0);
1001			if (error == EINPROGRESS) {
1002				fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "freezing queue\n");
1003				sc->flags |= AAC_QUEUE_FRZN;
1004			} else if (error != 0)
1005				panic("aac_startio: unexpected error %d from "
1006				      "busdma", error);
1007		} else
1008			aac_map_command_sg(cm, NULL, 0, 0);
1009	}
1010}
1011
1012/*
1013 * Handle notification of one or more FIBs coming from the controller.
1014 */
1015static void
1016aac_command_thread(struct aac_softc *sc)
1017{
1018	struct aac_fib *fib;
1019	u_int32_t fib_size;
1020	int size, retval;
1021
1022	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1023
1024	mtx_lock(&sc->aac_io_lock);
1025	sc->aifflags = AAC_AIFFLAGS_RUNNING;
1026
1027	while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1028
1029		retval = 0;
1030		if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1031			retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1032					"aifthd", AAC_PERIODIC_INTERVAL * hz);
1033
1034		/*
1035		 * First see if any FIBs need to be allocated.  This needs
1036		 * to be called without the driver lock because contigmalloc
1037		 * can sleep.
1038		 */
1039		if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1040			mtx_unlock(&sc->aac_io_lock);
1041			aac_alloc_commands(sc);
1042			mtx_lock(&sc->aac_io_lock);
1043			sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1044			aac_startio(sc);
1045		}
1046
1047		/*
1048		 * While we're here, check to see if any commands are stuck.
1049		 * This is pretty low-priority, so it's ok if it doesn't
1050		 * always fire.
1051		 */
1052		if (retval == EWOULDBLOCK)
1053			aac_timeout(sc);
1054
1055		/* Check the hardware printf message buffer */
1056		if (sc->aac_common->ac_printf[0] != 0)
1057			aac_print_printf(sc);
1058
1059		/* Also check to see if the adapter has a command for us. */
1060		if (sc->flags & AAC_FLAGS_NEW_COMM)
1061			continue;
1062		for (;;) {
1063			if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE,
1064					   &fib_size, &fib))
1065				break;
1066
1067			AAC_PRINT_FIB(sc, fib);
1068
1069			switch (fib->Header.Command) {
1070			case AifRequest:
1071				aac_handle_aif(sc, fib);
1072				break;
1073			default:
1074				device_printf(sc->aac_dev, "unknown command "
1075					      "from controller\n");
1076				break;
1077			}
1078
1079			if ((fib->Header.XferState == 0) ||
1080			    (fib->Header.StructType != AAC_FIBTYPE_TFIB)) {
1081				break;
1082			}
1083
1084			/* Return the AIF to the controller. */
1085			if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) {
1086				fib->Header.XferState |= AAC_FIBSTATE_DONEHOST;
1087				*(AAC_FSAStatus*)fib->data = ST_OK;
1088
1089				/* XXX Compute the Size field? */
1090				size = fib->Header.Size;
1091				if (size > sizeof(struct aac_fib)) {
1092					size = sizeof(struct aac_fib);
1093					fib->Header.Size = size;
1094				}
1095				/*
1096				 * Since we did not generate this command, it
1097				 * cannot go through the normal
1098				 * enqueue->startio chain.
1099				 */
1100				aac_enqueue_response(sc,
1101						 AAC_ADAP_NORM_RESP_QUEUE,
1102						 fib);
1103			}
1104		}
1105	}
1106	sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1107	mtx_unlock(&sc->aac_io_lock);
1108	wakeup(sc->aac_dev);
1109
1110	kproc_exit(0);
1111}
1112
1113/*
1114 * Process completed commands.
1115 */
1116static void
1117aac_complete(void *context, int pending)
1118{
1119	struct aac_softc *sc;
1120	struct aac_command *cm;
1121	struct aac_fib *fib;
1122	u_int32_t fib_size;
1123
1124	sc = (struct aac_softc *)context;
1125	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1126
1127	mtx_lock(&sc->aac_io_lock);
1128
1129	/* pull completed commands off the queue */
1130	for (;;) {
1131		/* look for completed FIBs on our queue */
1132		if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size,
1133							&fib))
1134			break;	/* nothing to do */
1135
1136		/* get the command, unmap and hand off for processing */
1137		cm = sc->aac_commands + fib->Header.SenderData;
1138		if (cm == NULL) {
1139			AAC_PRINT_FIB(sc, fib);
1140			break;
1141		}
1142		if ((cm->cm_flags & AAC_CMD_TIMEDOUT) != 0)
1143			device_printf(sc->aac_dev,
1144			    "COMMAND %p COMPLETED AFTER %d SECONDS\n",
1145			    cm, (int)(time_uptime-cm->cm_timestamp));
1146
1147		aac_remove_busy(cm);
1148
1149 		aac_unmap_command(cm);
1150		cm->cm_flags |= AAC_CMD_COMPLETED;
1151
1152		/* is there a completion handler? */
1153		if (cm->cm_complete != NULL) {
1154			cm->cm_complete(cm);
1155		} else {
1156			/* assume that someone is sleeping on this command */
1157			wakeup(cm);
1158		}
1159	}
1160
1161	/* see if we can start some more I/O */
1162	sc->flags &= ~AAC_QUEUE_FRZN;
1163	aac_startio(sc);
1164
1165	mtx_unlock(&sc->aac_io_lock);
1166}
1167
1168/*
1169 * Handle a bio submitted from a disk device.
1170 */
1171void
1172aac_submit_bio(struct bio *bp)
1173{
1174	struct aac_disk *ad;
1175	struct aac_softc *sc;
1176
1177	ad = (struct aac_disk *)bp->bio_disk->d_drv1;
1178	sc = ad->ad_controller;
1179	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1180
1181	/* queue the BIO and try to get some work done */
1182	aac_enqueue_bio(sc, bp);
1183	aac_startio(sc);
1184}
1185
1186/*
1187 * Get a bio and build a command to go with it.
1188 */
1189static int
1190aac_bio_command(struct aac_softc *sc, struct aac_command **cmp)
1191{
1192	struct aac_command *cm;
1193	struct aac_fib *fib;
1194	struct aac_disk *ad;
1195	struct bio *bp;
1196
1197	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1198
1199	/* get the resources we will need */
1200	cm = NULL;
1201	bp = NULL;
1202	if (aac_alloc_command(sc, &cm))	/* get a command */
1203		goto fail;
1204	if ((bp = aac_dequeue_bio(sc)) == NULL)
1205		goto fail;
1206
1207	/* fill out the command */
1208	cm->cm_datalen = bp->bio_bcount;
1209	cm->cm_complete = aac_bio_complete;
1210	cm->cm_flags = AAC_REQ_BIO;
1211	cm->cm_private = bp;
1212	cm->cm_timestamp = time_uptime;
1213
1214	/* build the FIB */
1215	fib = cm->cm_fib;
1216	fib->Header.Size = sizeof(struct aac_fib_header);
1217	fib->Header.XferState =
1218		AAC_FIBSTATE_HOSTOWNED   |
1219		AAC_FIBSTATE_INITIALISED |
1220		AAC_FIBSTATE_EMPTY	 |
1221		AAC_FIBSTATE_FROMHOST	 |
1222		AAC_FIBSTATE_REXPECTED   |
1223		AAC_FIBSTATE_NORM	 |
1224		AAC_FIBSTATE_ASYNC	 |
1225		AAC_FIBSTATE_FAST_RESPONSE;
1226
1227	/* build the read/write request */
1228	ad = (struct aac_disk *)bp->bio_disk->d_drv1;
1229
1230	if (sc->flags & AAC_FLAGS_RAW_IO) {
1231		struct aac_raw_io *raw;
1232		raw = (struct aac_raw_io *)&fib->data[0];
1233		fib->Header.Command = RawIo;
1234		raw->BlockNumber = (u_int64_t)bp->bio_pblkno;
1235		raw->ByteCount = bp->bio_bcount;
1236		raw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1237		raw->BpTotal = 0;
1238		raw->BpComplete = 0;
1239		fib->Header.Size += sizeof(struct aac_raw_io);
1240		cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw;
1241		if (bp->bio_cmd == BIO_READ) {
1242			raw->Flags = 1;
1243			cm->cm_flags |= AAC_CMD_DATAIN;
1244		} else {
1245			raw->Flags = 0;
1246			cm->cm_flags |= AAC_CMD_DATAOUT;
1247		}
1248	} else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1249		fib->Header.Command = ContainerCommand;
1250		if (bp->bio_cmd == BIO_READ) {
1251			struct aac_blockread *br;
1252			br = (struct aac_blockread *)&fib->data[0];
1253			br->Command = VM_CtBlockRead;
1254			br->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1255			br->BlockNumber = bp->bio_pblkno;
1256			br->ByteCount = bp->bio_bcount;
1257			fib->Header.Size += sizeof(struct aac_blockread);
1258			cm->cm_sgtable = &br->SgMap;
1259			cm->cm_flags |= AAC_CMD_DATAIN;
1260		} else {
1261			struct aac_blockwrite *bw;
1262			bw = (struct aac_blockwrite *)&fib->data[0];
1263			bw->Command = VM_CtBlockWrite;
1264			bw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1265			bw->BlockNumber = bp->bio_pblkno;
1266			bw->ByteCount = bp->bio_bcount;
1267			bw->Stable = CUNSTABLE;
1268			fib->Header.Size += sizeof(struct aac_blockwrite);
1269			cm->cm_flags |= AAC_CMD_DATAOUT;
1270			cm->cm_sgtable = &bw->SgMap;
1271		}
1272	} else {
1273		fib->Header.Command = ContainerCommand64;
1274		if (bp->bio_cmd == BIO_READ) {
1275			struct aac_blockread64 *br;
1276			br = (struct aac_blockread64 *)&fib->data[0];
1277			br->Command = VM_CtHostRead64;
1278			br->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1279			br->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE;
1280			br->BlockNumber = bp->bio_pblkno;
1281			br->Pad = 0;
1282			br->Flags = 0;
1283			fib->Header.Size += sizeof(struct aac_blockread64);
1284			cm->cm_flags |= AAC_CMD_DATAIN;
1285			cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64;
1286		} else {
1287			struct aac_blockwrite64 *bw;
1288			bw = (struct aac_blockwrite64 *)&fib->data[0];
1289			bw->Command = VM_CtHostWrite64;
1290			bw->ContainerId = ad->ad_container->co_mntobj.ObjectId;
1291			bw->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE;
1292			bw->BlockNumber = bp->bio_pblkno;
1293			bw->Pad = 0;
1294			bw->Flags = 0;
1295			fib->Header.Size += sizeof(struct aac_blockwrite64);
1296			cm->cm_flags |= AAC_CMD_DATAOUT;
1297			cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64;
1298		}
1299	}
1300
1301	*cmp = cm;
1302	return(0);
1303
1304fail:
1305	if (bp != NULL)
1306		aac_enqueue_bio(sc, bp);
1307	if (cm != NULL)
1308		aac_release_command(cm);
1309	return(ENOMEM);
1310}
1311
1312/*
1313 * Handle a bio-instigated command that has been completed.
1314 */
1315static void
1316aac_bio_complete(struct aac_command *cm)
1317{
1318	struct aac_blockread_response *brr;
1319	struct aac_blockwrite_response *bwr;
1320	struct bio *bp;
1321	AAC_FSAStatus status;
1322
1323	/* fetch relevant status and then release the command */
1324	bp = (struct bio *)cm->cm_private;
1325	if (bp->bio_cmd == BIO_READ) {
1326		brr = (struct aac_blockread_response *)&cm->cm_fib->data[0];
1327		status = brr->Status;
1328	} else {
1329		bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0];
1330		status = bwr->Status;
1331	}
1332	aac_release_command(cm);
1333
1334	/* fix up the bio based on status */
1335	if (status == ST_OK) {
1336		bp->bio_resid = 0;
1337	} else {
1338		bp->bio_error = EIO;
1339		bp->bio_flags |= BIO_ERROR;
1340	}
1341	aac_biodone(bp);
1342}
1343
1344/*
1345 * Submit a command to the controller, return when it completes.
1346 * XXX This is very dangerous!  If the card has gone out to lunch, we could
1347 *     be stuck here forever.  At the same time, signals are not caught
1348 *     because there is a risk that a signal could wakeup the sleep before
1349 *     the card has a chance to complete the command.  Since there is no way
1350 *     to cancel a command that is in progress, we can't protect against the
1351 *     card completing a command late and spamming the command and data
1352 *     memory.  So, we are held hostage until the command completes.
1353 */
1354static int
1355aac_wait_command(struct aac_command *cm)
1356{
1357	struct aac_softc *sc;
1358	int error;
1359
1360	sc = cm->cm_sc;
1361	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1362
1363	/* Put the command on the ready queue and get things going */
1364	aac_enqueue_ready(cm);
1365	aac_startio(sc);
1366	error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacwait", 0);
1367	return(error);
1368}
1369
1370/*
1371 *Command Buffer Management
1372 */
1373
1374/*
1375 * Allocate a command.
1376 */
1377int
1378aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1379{
1380	struct aac_command *cm;
1381
1382	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1383
1384	if ((cm = aac_dequeue_free(sc)) == NULL) {
1385		if (sc->total_fibs < sc->aac_max_fibs) {
1386			mtx_lock(&sc->aac_io_lock);
1387			sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1388			mtx_unlock(&sc->aac_io_lock);
1389			wakeup(sc->aifthread);
1390		}
1391		return (EBUSY);
1392	}
1393
1394	*cmp = cm;
1395	return(0);
1396}
1397
1398/*
1399 * Release a command back to the freelist.
1400 */
1401void
1402aac_release_command(struct aac_command *cm)
1403{
1404	struct aac_event *event;
1405	struct aac_softc *sc;
1406
1407	sc = cm->cm_sc;
1408	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1409
1410	/* (re)initialize the command/FIB */
1411	cm->cm_datalen = 0;
1412	cm->cm_sgtable = NULL;
1413	cm->cm_flags = 0;
1414	cm->cm_complete = NULL;
1415	cm->cm_private = NULL;
1416	cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE;
1417	cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1418	cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1419	cm->cm_fib->Header.Flags = 0;
1420	cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1421
1422	/*
1423	 * These are duplicated in aac_start to cover the case where an
1424	 * intermediate stage may have destroyed them.  They're left
1425	 * initialized here for debugging purposes only.
1426	 */
1427	cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1428	cm->cm_fib->Header.SenderData = 0;
1429
1430	aac_enqueue_free(cm);
1431
1432	if ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1433		TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1434		event->ev_callback(sc, event, event->ev_arg);
1435	}
1436}
1437
1438/*
1439 * Map helper for command/FIB allocation.
1440 */
1441static void
1442aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1443{
1444	uint64_t	*fibphys;
1445
1446	fibphys = (uint64_t *)arg;
1447
1448	*fibphys = segs[0].ds_addr;
1449}
1450
1451/*
1452 * Allocate and initialize commands/FIBs for this adapter.
1453 */
1454static int
1455aac_alloc_commands(struct aac_softc *sc)
1456{
1457	struct aac_command *cm;
1458	struct aac_fibmap *fm;
1459	uint64_t fibphys;
1460	int i, error;
1461
1462	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1463
1464	if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1465		return (ENOMEM);
1466
1467	fm = malloc(sizeof(struct aac_fibmap), M_AACBUF, M_NOWAIT|M_ZERO);
1468	if (fm == NULL)
1469		return (ENOMEM);
1470
1471	/* allocate the FIBs in DMAable memory and load them */
1472	if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1473			     BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1474		device_printf(sc->aac_dev,
1475			      "Not enough contiguous memory available.\n");
1476		free(fm, M_AACBUF);
1477		return (ENOMEM);
1478	}
1479
1480	/* Ignore errors since this doesn't bounce */
1481	(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1482			      sc->aac_max_fibs_alloc * sc->aac_max_fib_size,
1483			      aac_map_command_helper, &fibphys, 0);
1484
1485	/* initialize constant fields in the command structure */
1486	bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size);
1487	for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1488		cm = sc->aac_commands + sc->total_fibs;
1489		fm->aac_commands = cm;
1490		cm->cm_sc = sc;
1491		cm->cm_fib = (struct aac_fib *)
1492			((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size);
1493		cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size;
1494		cm->cm_index = sc->total_fibs;
1495
1496		if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1497					       &cm->cm_datamap)) != 0)
1498			break;
1499		mtx_lock(&sc->aac_io_lock);
1500		aac_release_command(cm);
1501		sc->total_fibs++;
1502		mtx_unlock(&sc->aac_io_lock);
1503	}
1504
1505	if (i > 0) {
1506		mtx_lock(&sc->aac_io_lock);
1507		TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1508		fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1509		mtx_unlock(&sc->aac_io_lock);
1510		return (0);
1511	}
1512
1513	bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1514	bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1515	free(fm, M_AACBUF);
1516	return (ENOMEM);
1517}
1518
1519/*
1520 * Free FIBs owned by this adapter.
1521 */
1522static void
1523aac_free_commands(struct aac_softc *sc)
1524{
1525	struct aac_fibmap *fm;
1526	struct aac_command *cm;
1527	int i;
1528
1529	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1530
1531	while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1532
1533		TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1534		/*
1535		 * We check against total_fibs to handle partially
1536		 * allocated blocks.
1537		 */
1538		for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1539			cm = fm->aac_commands + i;
1540			bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1541		}
1542		bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1543		bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1544		free(fm, M_AACBUF);
1545	}
1546}
1547
1548/*
1549 * Command-mapping helper function - populate this command's s/g table.
1550 */
1551static void
1552aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1553{
1554	struct aac_softc *sc;
1555	struct aac_command *cm;
1556	struct aac_fib *fib;
1557	int i;
1558
1559	cm = (struct aac_command *)arg;
1560	sc = cm->cm_sc;
1561	fib = cm->cm_fib;
1562	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1563
1564	/* copy into the FIB */
1565	if (cm->cm_sgtable != NULL) {
1566		if (fib->Header.Command == RawIo) {
1567			struct aac_sg_tableraw *sg;
1568			sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1569			sg->SgCount = nseg;
1570			for (i = 0; i < nseg; i++) {
1571				sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1572				sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1573				sg->SgEntryRaw[i].Next = 0;
1574				sg->SgEntryRaw[i].Prev = 0;
1575				sg->SgEntryRaw[i].Flags = 0;
1576			}
1577			/* update the FIB size for the s/g count */
1578			fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1579		} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1580			struct aac_sg_table *sg;
1581			sg = cm->cm_sgtable;
1582			sg->SgCount = nseg;
1583			for (i = 0; i < nseg; i++) {
1584				sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1585				sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1586			}
1587			/* update the FIB size for the s/g count */
1588			fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1589		} else {
1590			struct aac_sg_table64 *sg;
1591			sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1592			sg->SgCount = nseg;
1593			for (i = 0; i < nseg; i++) {
1594				sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1595				sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1596			}
1597			/* update the FIB size for the s/g count */
1598			fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1599		}
1600	}
1601
1602	/* Fix up the address values in the FIB.  Use the command array index
1603	 * instead of a pointer since these fields are only 32 bits.  Shift
1604	 * the SenderFibAddress over to make room for the fast response bit
1605	 * and for the AIF bit
1606	 */
1607	cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1608	cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1609
1610	/* save a pointer to the command for speedy reverse-lookup */
1611	cm->cm_fib->Header.SenderData = cm->cm_index;
1612
1613	if (cm->cm_flags & AAC_CMD_DATAIN)
1614		bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1615				BUS_DMASYNC_PREREAD);
1616	if (cm->cm_flags & AAC_CMD_DATAOUT)
1617		bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1618				BUS_DMASYNC_PREWRITE);
1619	cm->cm_flags |= AAC_CMD_MAPPED;
1620
1621	if (sc->flags & AAC_FLAGS_NEW_COMM) {
1622		int count = 10000000L;
1623		while (AAC_SEND_COMMAND(sc, cm) != 0) {
1624			if (--count == 0) {
1625				aac_unmap_command(cm);
1626				sc->flags |= AAC_QUEUE_FRZN;
1627				aac_requeue_ready(cm);
1628			}
1629			DELAY(5);			/* wait 5 usec. */
1630		}
1631	} else {
1632		/* Put the FIB on the outbound queue */
1633		if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) {
1634			aac_unmap_command(cm);
1635			sc->flags |= AAC_QUEUE_FRZN;
1636			aac_requeue_ready(cm);
1637		}
1638	}
1639}
1640
1641/*
1642 * Unmap a command from controller-visible space.
1643 */
1644static void
1645aac_unmap_command(struct aac_command *cm)
1646{
1647	struct aac_softc *sc;
1648
1649	sc = cm->cm_sc;
1650	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1651
1652	if (!(cm->cm_flags & AAC_CMD_MAPPED))
1653		return;
1654
1655	if (cm->cm_datalen != 0) {
1656		if (cm->cm_flags & AAC_CMD_DATAIN)
1657			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1658					BUS_DMASYNC_POSTREAD);
1659		if (cm->cm_flags & AAC_CMD_DATAOUT)
1660			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1661					BUS_DMASYNC_POSTWRITE);
1662
1663		bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1664	}
1665	cm->cm_flags &= ~AAC_CMD_MAPPED;
1666}
1667
1668/*
1669 * Hardware Interface
1670 */
1671
1672/*
1673 * Initialize the adapter.
1674 */
1675static void
1676aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1677{
1678	struct aac_softc *sc;
1679
1680	sc = (struct aac_softc *)arg;
1681	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1682
1683	sc->aac_common_busaddr = segs[0].ds_addr;
1684}
1685
1686static int
1687aac_check_firmware(struct aac_softc *sc)
1688{
1689	u_int32_t code, major, minor, options = 0, atu_size = 0;
1690	int rid, status;
1691	time_t then;
1692
1693	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1694	/*
1695	 * Wait for the adapter to come ready.
1696	 */
1697	then = time_uptime;
1698	do {
1699		code = AAC_GET_FWSTATUS(sc);
1700		if (code & AAC_SELF_TEST_FAILED) {
1701			device_printf(sc->aac_dev, "FATAL: selftest failed\n");
1702			return(ENXIO);
1703		}
1704		if (code & AAC_KERNEL_PANIC) {
1705			device_printf(sc->aac_dev,
1706				      "FATAL: controller kernel panic");
1707			return(ENXIO);
1708		}
1709		if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1710			device_printf(sc->aac_dev,
1711				      "FATAL: controller not coming ready, "
1712					   "status %x\n", code);
1713			return(ENXIO);
1714		}
1715	} while (!(code & AAC_UP_AND_RUNNING));
1716
1717	/*
1718	 * Retrieve the firmware version numbers.  Dell PERC2/QC cards with
1719	 * firmware version 1.x are not compatible with this driver.
1720	 */
1721	if (sc->flags & AAC_FLAGS_PERC2QC) {
1722		if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1723				     NULL)) {
1724			device_printf(sc->aac_dev,
1725				      "Error reading firmware version\n");
1726			return (EIO);
1727		}
1728
1729		/* These numbers are stored as ASCII! */
1730		major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1731		minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1732		if (major == 1) {
1733			device_printf(sc->aac_dev,
1734			    "Firmware version %d.%d is not supported.\n",
1735			    major, minor);
1736			return (EINVAL);
1737		}
1738	}
1739
1740	/*
1741	 * Retrieve the capabilities/supported options word so we know what
1742	 * work-arounds to enable.  Some firmware revs don't support this
1743	 * command.
1744	 */
1745	if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) {
1746		if (status != AAC_SRB_STS_INVALID_REQUEST) {
1747			device_printf(sc->aac_dev,
1748			     "RequestAdapterInfo failed\n");
1749			return (EIO);
1750		}
1751	} else {
1752		options = AAC_GET_MAILBOX(sc, 1);
1753		atu_size = AAC_GET_MAILBOX(sc, 2);
1754		sc->supported_options = options;
1755
1756		if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1757		    (sc->flags & AAC_FLAGS_NO4GB) == 0)
1758			sc->flags |= AAC_FLAGS_4GB_WINDOW;
1759		if (options & AAC_SUPPORTED_NONDASD)
1760			sc->flags |= AAC_FLAGS_ENABLE_CAM;
1761		if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1762		     && (sizeof(bus_addr_t) > 4)) {
1763			device_printf(sc->aac_dev,
1764			    "Enabling 64-bit address support\n");
1765			sc->flags |= AAC_FLAGS_SG_64BIT;
1766		}
1767		if ((options & AAC_SUPPORTED_NEW_COMM)
1768		 && sc->aac_if->aif_send_command)
1769			sc->flags |= AAC_FLAGS_NEW_COMM;
1770		if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1771			sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1772	}
1773
1774	/* Check for broken hardware that does a lower number of commands */
1775	sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1776
1777	/* Remap mem. resource, if required */
1778	if ((sc->flags & AAC_FLAGS_NEW_COMM) &&
1779	    atu_size > rman_get_size(sc->aac_regs_res1)) {
1780		rid = rman_get_rid(sc->aac_regs_res1);
1781		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, rid,
1782		    sc->aac_regs_res1);
1783		sc->aac_regs_res1 = bus_alloc_resource(sc->aac_dev,
1784		    SYS_RES_MEMORY, &rid, 0ul, ~0ul, atu_size, RF_ACTIVE);
1785		if (sc->aac_regs_res1 == NULL) {
1786			sc->aac_regs_res1 = bus_alloc_resource_any(
1787			    sc->aac_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
1788			if (sc->aac_regs_res1 == NULL) {
1789				device_printf(sc->aac_dev,
1790				    "couldn't allocate register window\n");
1791				return (ENXIO);
1792			}
1793			sc->flags &= ~AAC_FLAGS_NEW_COMM;
1794		}
1795		sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1);
1796		sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1);
1797
1798		if (sc->aac_hwif == AAC_HWIF_NARK) {
1799			sc->aac_regs_res0 = sc->aac_regs_res1;
1800			sc->aac_btag0 = sc->aac_btag1;
1801			sc->aac_bhandle0 = sc->aac_bhandle1;
1802		}
1803	}
1804
1805	/* Read preferred settings */
1806	sc->aac_max_fib_size = sizeof(struct aac_fib);
1807	sc->aac_max_sectors = 128;				/* 64KB */
1808	if (sc->flags & AAC_FLAGS_SG_64BIT)
1809		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1810		 - sizeof(struct aac_blockwrite64))
1811		 / sizeof(struct aac_sg_entry64);
1812	else
1813		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1814		 - sizeof(struct aac_blockwrite))
1815		 / sizeof(struct aac_sg_entry);
1816
1817	if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) {
1818		options = AAC_GET_MAILBOX(sc, 1);
1819		sc->aac_max_fib_size = (options & 0xFFFF);
1820		sc->aac_max_sectors = (options >> 16) << 1;
1821		options = AAC_GET_MAILBOX(sc, 2);
1822		sc->aac_sg_tablesize = (options >> 16);
1823		options = AAC_GET_MAILBOX(sc, 3);
1824		sc->aac_max_fibs = (options & 0xFFFF);
1825	}
1826	if (sc->aac_max_fib_size > PAGE_SIZE)
1827		sc->aac_max_fib_size = PAGE_SIZE;
1828	sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size;
1829
1830	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1831		sc->flags |= AAC_FLAGS_RAW_IO;
1832		device_printf(sc->aac_dev, "Enable Raw I/O\n");
1833	}
1834	if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1835	    (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1836		sc->flags |= AAC_FLAGS_LBA_64BIT;
1837		device_printf(sc->aac_dev, "Enable 64-bit array\n");
1838	}
1839
1840	return (0);
1841}
1842
1843static int
1844aac_init(struct aac_softc *sc)
1845{
1846	struct aac_adapter_init	*ip;
1847	u_int32_t qoffset;
1848	int error;
1849
1850	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1851
1852	/*
1853	 * Fill in the init structure.  This tells the adapter about the
1854	 * physical location of various important shared data structures.
1855	 */
1856	ip = &sc->aac_common->ac_init;
1857	ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1858	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1859		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1860		sc->flags |= AAC_FLAGS_RAW_IO;
1861	}
1862	ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION;
1863
1864	ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1865					 offsetof(struct aac_common, ac_fibs);
1866	ip->AdapterFibsVirtualAddress = 0;
1867	ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1868	ip->AdapterFibAlign = sizeof(struct aac_fib);
1869
1870	ip->PrintfBufferAddress = sc->aac_common_busaddr +
1871				  offsetof(struct aac_common, ac_printf);
1872	ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1873
1874	/*
1875	 * The adapter assumes that pages are 4K in size, except on some
1876 	 * broken firmware versions that do the page->byte conversion twice,
1877	 * therefore 'assuming' that this value is in 16MB units (2^24).
1878	 * Round up since the granularity is so high.
1879	 */
1880	ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1881	if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1882		ip->HostPhysMemPages =
1883		    (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1884	}
1885	ip->HostElapsedSeconds = time_uptime;	/* reset later if invalid */
1886
1887	ip->InitFlags = 0;
1888	if (sc->flags & AAC_FLAGS_NEW_COMM) {
1889		ip->InitFlags |= AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1890		device_printf(sc->aac_dev, "New comm. interface enabled\n");
1891	}
1892
1893	ip->MaxIoCommands = sc->aac_max_fibs;
1894	ip->MaxIoSize = sc->aac_max_sectors << 9;
1895	ip->MaxFibSize = sc->aac_max_fib_size;
1896
1897	/*
1898	 * Initialize FIB queues.  Note that it appears that the layout of the
1899	 * indexes and the segmentation of the entries may be mandated by the
1900	 * adapter, which is only told about the base of the queue index fields.
1901	 *
1902	 * The initial values of the indices are assumed to inform the adapter
1903	 * of the sizes of the respective queues, and theoretically it could
1904	 * work out the entire layout of the queue structures from this.  We
1905	 * take the easy route and just lay this area out like everyone else
1906	 * does.
1907	 *
1908	 * The Linux driver uses a much more complex scheme whereby several
1909	 * header records are kept for each queue.  We use a couple of generic
1910	 * list manipulation functions which 'know' the size of each list by
1911	 * virtue of a table.
1912	 */
1913	qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN;
1914	qoffset &= ~(AAC_QUEUE_ALIGN - 1);
1915	sc->aac_queues =
1916	    (struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset);
1917	ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset;
1918
1919	sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1920		AAC_HOST_NORM_CMD_ENTRIES;
1921	sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1922		AAC_HOST_NORM_CMD_ENTRIES;
1923	sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1924		AAC_HOST_HIGH_CMD_ENTRIES;
1925	sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1926		AAC_HOST_HIGH_CMD_ENTRIES;
1927	sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1928		AAC_ADAP_NORM_CMD_ENTRIES;
1929	sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1930		AAC_ADAP_NORM_CMD_ENTRIES;
1931	sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] =
1932		AAC_ADAP_HIGH_CMD_ENTRIES;
1933	sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] =
1934		AAC_ADAP_HIGH_CMD_ENTRIES;
1935	sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1936		AAC_HOST_NORM_RESP_ENTRIES;
1937	sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1938		AAC_HOST_NORM_RESP_ENTRIES;
1939	sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1940		AAC_HOST_HIGH_RESP_ENTRIES;
1941	sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1942		AAC_HOST_HIGH_RESP_ENTRIES;
1943	sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1944		AAC_ADAP_NORM_RESP_ENTRIES;
1945	sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1946		AAC_ADAP_NORM_RESP_ENTRIES;
1947	sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]=
1948		AAC_ADAP_HIGH_RESP_ENTRIES;
1949	sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]=
1950		AAC_ADAP_HIGH_RESP_ENTRIES;
1951	sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] =
1952		&sc->aac_queues->qt_HostNormCmdQueue[0];
1953	sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] =
1954		&sc->aac_queues->qt_HostHighCmdQueue[0];
1955	sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] =
1956		&sc->aac_queues->qt_AdapNormCmdQueue[0];
1957	sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] =
1958		&sc->aac_queues->qt_AdapHighCmdQueue[0];
1959	sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] =
1960		&sc->aac_queues->qt_HostNormRespQueue[0];
1961	sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] =
1962		&sc->aac_queues->qt_HostHighRespQueue[0];
1963	sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] =
1964		&sc->aac_queues->qt_AdapNormRespQueue[0];
1965	sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] =
1966		&sc->aac_queues->qt_AdapHighRespQueue[0];
1967
1968	/*
1969	 * Do controller-type-specific initialisation
1970	 */
1971	switch (sc->aac_hwif) {
1972	case AAC_HWIF_I960RX:
1973		AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, ~0);
1974		break;
1975	case AAC_HWIF_RKT:
1976		AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, ~0);
1977		break;
1978	default:
1979		break;
1980	}
1981
1982	/*
1983	 * Give the init structure to the controller.
1984	 */
1985	if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT,
1986			     sc->aac_common_busaddr +
1987			     offsetof(struct aac_common, ac_init), 0, 0, 0,
1988			     NULL)) {
1989		device_printf(sc->aac_dev,
1990			      "error establishing init structure\n");
1991		error = EIO;
1992		goto out;
1993	}
1994
1995	error = 0;
1996out:
1997	return(error);
1998}
1999
2000static int
2001aac_setup_intr(struct aac_softc *sc)
2002{
2003
2004	if (sc->flags & AAC_FLAGS_NEW_COMM) {
2005		if (bus_setup_intr(sc->aac_dev, sc->aac_irq,
2006				   INTR_MPSAFE|INTR_TYPE_BIO, NULL,
2007				   aac_new_intr, sc, &sc->aac_intr)) {
2008			device_printf(sc->aac_dev, "can't set up interrupt\n");
2009			return (EINVAL);
2010		}
2011	} else {
2012		if (bus_setup_intr(sc->aac_dev, sc->aac_irq,
2013				   INTR_TYPE_BIO, aac_filter, NULL,
2014				   sc, &sc->aac_intr)) {
2015			device_printf(sc->aac_dev,
2016				      "can't set up interrupt filter\n");
2017			return (EINVAL);
2018		}
2019	}
2020	return (0);
2021}
2022
2023/*
2024 * Send a synchronous command to the controller and wait for a result.
2025 * Indicate if the controller completed the command with an error status.
2026 */
2027static int
2028aac_sync_command(struct aac_softc *sc, u_int32_t command,
2029		 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2030		 u_int32_t *sp)
2031{
2032	time_t then;
2033	u_int32_t status;
2034
2035	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2036
2037	/* populate the mailbox */
2038	AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2039
2040	/* ensure the sync command doorbell flag is cleared */
2041	AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2042
2043	/* then set it to signal the adapter */
2044	AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2045
2046	/* spin waiting for the command to complete */
2047	then = time_uptime;
2048	do {
2049		if (time_uptime > (then + AAC_IMMEDIATE_TIMEOUT)) {
2050			fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2051			return(EIO);
2052		}
2053	} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2054
2055	/* clear the completion flag */
2056	AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2057
2058	/* get the command status */
2059	status = AAC_GET_MAILBOX(sc, 0);
2060	if (sp != NULL)
2061		*sp = status;
2062
2063	if (status != AAC_SRB_STS_SUCCESS)
2064		return (-1);
2065	return(0);
2066}
2067
2068int
2069aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2070		 struct aac_fib *fib, u_int16_t datasize)
2071{
2072	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2073	mtx_assert(&sc->aac_io_lock, MA_OWNED);
2074
2075	if (datasize > AAC_FIB_DATASIZE)
2076		return(EINVAL);
2077
2078	/*
2079	 * Set up the sync FIB
2080	 */
2081	fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2082				AAC_FIBSTATE_INITIALISED |
2083				AAC_FIBSTATE_EMPTY;
2084	fib->Header.XferState |= xferstate;
2085	fib->Header.Command = command;
2086	fib->Header.StructType = AAC_FIBTYPE_TFIB;
2087	fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2088	fib->Header.SenderSize = sizeof(struct aac_fib);
2089	fib->Header.SenderFibAddress = 0;	/* Not needed */
2090	fib->Header.ReceiverFibAddress = sc->aac_common_busaddr +
2091					 offsetof(struct aac_common,
2092						  ac_sync_fib);
2093
2094	/*
2095	 * Give the FIB to the controller, wait for a response.
2096	 */
2097	if (aac_sync_command(sc, AAC_MONKER_SYNCFIB,
2098			     fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) {
2099		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2100		return(EIO);
2101	}
2102
2103	return (0);
2104}
2105
2106/*
2107 * Adapter-space FIB queue manipulation
2108 *
2109 * Note that the queue implementation here is a little funky; neither the PI or
2110 * CI will ever be zero.  This behaviour is a controller feature.
2111 */
2112static const struct {
2113	int		size;
2114	int		notify;
2115} aac_qinfo[] = {
2116	{AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL},
2117	{AAC_HOST_HIGH_CMD_ENTRIES, 0},
2118	{AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY},
2119	{AAC_ADAP_HIGH_CMD_ENTRIES, 0},
2120	{AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL},
2121	{AAC_HOST_HIGH_RESP_ENTRIES, 0},
2122	{AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY},
2123	{AAC_ADAP_HIGH_RESP_ENTRIES, 0}
2124};
2125
2126/*
2127 * Atomically insert an entry into the nominated queue, returns 0 on success or
2128 * EBUSY if the queue is full.
2129 *
2130 * Note: it would be more efficient to defer notifying the controller in
2131 *	 the case where we may be inserting several entries in rapid succession,
2132 *	 but implementing this usefully may be difficult (it would involve a
2133 *	 separate queue/notify interface).
2134 */
2135static int
2136aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm)
2137{
2138	u_int32_t pi, ci;
2139	int error;
2140	u_int32_t fib_size;
2141	u_int32_t fib_addr;
2142
2143	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2144
2145	fib_size = cm->cm_fib->Header.Size;
2146	fib_addr = cm->cm_fib->Header.ReceiverFibAddress;
2147
2148	/* get the producer/consumer indices */
2149	pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2150	ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2151
2152	/* wrap the queue? */
2153	if (pi >= aac_qinfo[queue].size)
2154		pi = 0;
2155
2156	/* check for queue full */
2157	if ((pi + 1) == ci) {
2158		error = EBUSY;
2159		goto out;
2160	}
2161
2162	/*
2163	 * To avoid a race with its completion interrupt, place this command on
2164	 * the busy queue prior to advertising it to the controller.
2165	 */
2166	aac_enqueue_busy(cm);
2167
2168	/* populate queue entry */
2169	(sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
2170	(sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
2171
2172	/* update producer index */
2173	sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
2174
2175	/* notify the adapter if we know how */
2176	if (aac_qinfo[queue].notify != 0)
2177		AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2178
2179	error = 0;
2180
2181out:
2182	return(error);
2183}
2184
2185/*
2186 * Atomically remove one entry from the nominated queue, returns 0 on
2187 * success or ENOENT if the queue is empty.
2188 */
2189static int
2190aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size,
2191		struct aac_fib **fib_addr)
2192{
2193	u_int32_t pi, ci;
2194	u_int32_t fib_index;
2195	int error;
2196	int notify;
2197
2198	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2199
2200	/* get the producer/consumer indices */
2201	pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2202	ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2203
2204	/* check for queue empty */
2205	if (ci == pi) {
2206		error = ENOENT;
2207		goto out;
2208	}
2209
2210	/* wrap the pi so the following test works */
2211	if (pi >= aac_qinfo[queue].size)
2212		pi = 0;
2213
2214	notify = 0;
2215	if (ci == pi + 1)
2216		notify++;
2217
2218	/* wrap the queue? */
2219	if (ci >= aac_qinfo[queue].size)
2220		ci = 0;
2221
2222	/* fetch the entry */
2223	*fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size;
2224
2225	switch (queue) {
2226	case AAC_HOST_NORM_CMD_QUEUE:
2227	case AAC_HOST_HIGH_CMD_QUEUE:
2228		/*
2229		 * The aq_fib_addr is only 32 bits wide so it can't be counted
2230		 * on to hold an address.  For AIF's, the adapter assumes
2231		 * that it's giving us an address into the array of AIF fibs.
2232		 * Therefore, we have to convert it to an index.
2233		 */
2234		fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr /
2235			sizeof(struct aac_fib);
2236		*fib_addr = &sc->aac_common->ac_fibs[fib_index];
2237		break;
2238
2239	case AAC_HOST_NORM_RESP_QUEUE:
2240	case AAC_HOST_HIGH_RESP_QUEUE:
2241	{
2242		struct aac_command *cm;
2243
2244		/*
2245		 * As above, an index is used instead of an actual address.
2246		 * Gotta shift the index to account for the fast response
2247		 * bit.  No other correction is needed since this value was
2248		 * originally provided by the driver via the SenderFibAddress
2249		 * field.
2250		 */
2251		fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr;
2252		cm = sc->aac_commands + (fib_index >> 2);
2253		*fib_addr = cm->cm_fib;
2254
2255		/*
2256		 * Is this a fast response? If it is, update the fib fields in
2257		 * local memory since the whole fib isn't DMA'd back up.
2258		 */
2259		if (fib_index & 0x01) {
2260			(*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP;
2261			*((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL;
2262		}
2263		break;
2264	}
2265	default:
2266		panic("Invalid queue in aac_dequeue_fib()");
2267		break;
2268	}
2269
2270	/* update consumer index */
2271	sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1;
2272
2273	/* if we have made the queue un-full, notify the adapter */
2274	if (notify && (aac_qinfo[queue].notify != 0))
2275		AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2276	error = 0;
2277
2278out:
2279	return(error);
2280}
2281
2282/*
2283 * Put our response to an Adapter Initialed Fib on the response queue
2284 */
2285static int
2286aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib)
2287{
2288	u_int32_t pi, ci;
2289	int error;
2290	u_int32_t fib_size;
2291	u_int32_t fib_addr;
2292
2293	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2294
2295	/* Tell the adapter where the FIB is */
2296	fib_size = fib->Header.Size;
2297	fib_addr = fib->Header.SenderFibAddress;
2298	fib->Header.ReceiverFibAddress = fib_addr;
2299
2300	/* get the producer/consumer indices */
2301	pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
2302	ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
2303
2304	/* wrap the queue? */
2305	if (pi >= aac_qinfo[queue].size)
2306		pi = 0;
2307
2308	/* check for queue full */
2309	if ((pi + 1) == ci) {
2310		error = EBUSY;
2311		goto out;
2312	}
2313
2314	/* populate queue entry */
2315	(sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
2316	(sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
2317
2318	/* update producer index */
2319	sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
2320
2321	/* notify the adapter if we know how */
2322	if (aac_qinfo[queue].notify != 0)
2323		AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
2324
2325	error = 0;
2326
2327out:
2328	return(error);
2329}
2330
2331/*
2332 * Check for commands that have been outstanding for a suspiciously long time,
2333 * and complain about them.
2334 */
2335static void
2336aac_timeout(struct aac_softc *sc)
2337{
2338	struct aac_command *cm;
2339	time_t deadline;
2340	int timedout, code;
2341
2342	/*
2343	 * Traverse the busy command list, bitch about late commands once
2344	 * only.
2345	 */
2346	timedout = 0;
2347	deadline = time_uptime - AAC_CMD_TIMEOUT;
2348	TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2349		if ((cm->cm_timestamp  < deadline)
2350		    && !(cm->cm_flags & AAC_CMD_TIMEDOUT)) {
2351			cm->cm_flags |= AAC_CMD_TIMEDOUT;
2352			device_printf(sc->aac_dev,
2353			    "COMMAND %p (TYPE %d) TIMEOUT AFTER %d SECONDS\n",
2354			    cm, cm->cm_fib->Header.Command,
2355			    (int)(time_uptime-cm->cm_timestamp));
2356			AAC_PRINT_FIB(sc, cm->cm_fib);
2357			timedout++;
2358		}
2359	}
2360
2361	if (timedout) {
2362		code = AAC_GET_FWSTATUS(sc);
2363		if (code != AAC_UP_AND_RUNNING) {
2364			device_printf(sc->aac_dev, "WARNING! Controller is no "
2365				      "longer running! code= 0x%x\n", code);
2366		}
2367	}
2368}
2369
2370/*
2371 * Interface Function Vectors
2372 */
2373
2374/*
2375 * Read the current firmware status word.
2376 */
2377static int
2378aac_sa_get_fwstatus(struct aac_softc *sc)
2379{
2380	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2381
2382	return(AAC_MEM0_GETREG4(sc, AAC_SA_FWSTATUS));
2383}
2384
2385static int
2386aac_rx_get_fwstatus(struct aac_softc *sc)
2387{
2388	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2389
2390	return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ?
2391	    AAC_RX_OMR0 : AAC_RX_FWSTATUS));
2392}
2393
2394static int
2395aac_rkt_get_fwstatus(struct aac_softc *sc)
2396{
2397	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2398
2399	return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ?
2400	    AAC_RKT_OMR0 : AAC_RKT_FWSTATUS));
2401}
2402
2403/*
2404 * Notify the controller of a change in a given queue
2405 */
2406
2407static void
2408aac_sa_qnotify(struct aac_softc *sc, int qbit)
2409{
2410	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2411
2412	AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit);
2413}
2414
2415static void
2416aac_rx_qnotify(struct aac_softc *sc, int qbit)
2417{
2418	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2419
2420	AAC_MEM0_SETREG4(sc, AAC_RX_IDBR, qbit);
2421}
2422
2423static void
2424aac_rkt_qnotify(struct aac_softc *sc, int qbit)
2425{
2426	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2427
2428	AAC_MEM0_SETREG4(sc, AAC_RKT_IDBR, qbit);
2429}
2430
2431/*
2432 * Get the interrupt reason bits
2433 */
2434static int
2435aac_sa_get_istatus(struct aac_softc *sc)
2436{
2437	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2438
2439	return(AAC_MEM0_GETREG2(sc, AAC_SA_DOORBELL0));
2440}
2441
2442static int
2443aac_rx_get_istatus(struct aac_softc *sc)
2444{
2445	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2446
2447	return(AAC_MEM0_GETREG4(sc, AAC_RX_ODBR));
2448}
2449
2450static int
2451aac_rkt_get_istatus(struct aac_softc *sc)
2452{
2453	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2454
2455	return(AAC_MEM0_GETREG4(sc, AAC_RKT_ODBR));
2456}
2457
2458/*
2459 * Clear some interrupt reason bits
2460 */
2461static void
2462aac_sa_clear_istatus(struct aac_softc *sc, int mask)
2463{
2464	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2465
2466	AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask);
2467}
2468
2469static void
2470aac_rx_clear_istatus(struct aac_softc *sc, int mask)
2471{
2472	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2473
2474	AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, mask);
2475}
2476
2477static void
2478aac_rkt_clear_istatus(struct aac_softc *sc, int mask)
2479{
2480	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2481
2482	AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, mask);
2483}
2484
2485/*
2486 * Populate the mailbox and set the command word
2487 */
2488static void
2489aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command,
2490		u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2491{
2492	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2493
2494	AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX, command);
2495	AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0);
2496	AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1);
2497	AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2);
2498	AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3);
2499}
2500
2501static void
2502aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command,
2503		u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2504{
2505	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2506
2507	AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX, command);
2508	AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0);
2509	AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1);
2510	AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2);
2511	AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3);
2512}
2513
2514static void
2515aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2516		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2517{
2518	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2519
2520	AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX, command);
2521	AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0);
2522	AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1);
2523	AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2);
2524	AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3);
2525}
2526
2527/*
2528 * Fetch the immediate command status word
2529 */
2530static int
2531aac_sa_get_mailbox(struct aac_softc *sc, int mb)
2532{
2533	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2534
2535	return(AAC_MEM1_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4)));
2536}
2537
2538static int
2539aac_rx_get_mailbox(struct aac_softc *sc, int mb)
2540{
2541	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2542
2543	return(AAC_MEM1_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4)));
2544}
2545
2546static int
2547aac_rkt_get_mailbox(struct aac_softc *sc, int mb)
2548{
2549	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2550
2551	return(AAC_MEM1_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4)));
2552}
2553
2554/*
2555 * Set/clear interrupt masks
2556 */
2557static void
2558aac_sa_set_interrupts(struct aac_softc *sc, int enable)
2559{
2560	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis");
2561
2562	if (enable) {
2563		AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS);
2564	} else {
2565		AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_SET, ~0);
2566	}
2567}
2568
2569static void
2570aac_rx_set_interrupts(struct aac_softc *sc, int enable)
2571{
2572	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis");
2573
2574	if (enable) {
2575		if (sc->flags & AAC_FLAGS_NEW_COMM)
2576			AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM);
2577		else
2578			AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS);
2579	} else {
2580		AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~0);
2581	}
2582}
2583
2584static void
2585aac_rkt_set_interrupts(struct aac_softc *sc, int enable)
2586{
2587	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis");
2588
2589	if (enable) {
2590		if (sc->flags & AAC_FLAGS_NEW_COMM)
2591			AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM);
2592		else
2593			AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS);
2594	} else {
2595		AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~0);
2596	}
2597}
2598
2599/*
2600 * New comm. interface: Send command functions
2601 */
2602static int
2603aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm)
2604{
2605	u_int32_t index, device;
2606
2607	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)");
2608
2609	index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE);
2610	if (index == 0xffffffffL)
2611		index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE);
2612	if (index == 0xffffffffL)
2613		return index;
2614	aac_enqueue_busy(cm);
2615	device = index;
2616	AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL));
2617	device += 4;
2618	AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32));
2619	device += 4;
2620	AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size);
2621	AAC_MEM0_SETREG4(sc, AAC_RX_IQUE, index);
2622	return 0;
2623}
2624
2625static int
2626aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm)
2627{
2628	u_int32_t index, device;
2629
2630	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)");
2631
2632	index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE);
2633	if (index == 0xffffffffL)
2634		index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE);
2635	if (index == 0xffffffffL)
2636		return index;
2637	aac_enqueue_busy(cm);
2638	device = index;
2639	AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL));
2640	device += 4;
2641	AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32));
2642	device += 4;
2643	AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size);
2644	AAC_MEM0_SETREG4(sc, AAC_RKT_IQUE, index);
2645	return 0;
2646}
2647
2648/*
2649 * New comm. interface: get, set outbound queue index
2650 */
2651static int
2652aac_rx_get_outb_queue(struct aac_softc *sc)
2653{
2654	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2655
2656	return(AAC_MEM0_GETREG4(sc, AAC_RX_OQUE));
2657}
2658
2659static int
2660aac_rkt_get_outb_queue(struct aac_softc *sc)
2661{
2662	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2663
2664	return(AAC_MEM0_GETREG4(sc, AAC_RKT_OQUE));
2665}
2666
2667static void
2668aac_rx_set_outb_queue(struct aac_softc *sc, int index)
2669{
2670	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2671
2672	AAC_MEM0_SETREG4(sc, AAC_RX_OQUE, index);
2673}
2674
2675static void
2676aac_rkt_set_outb_queue(struct aac_softc *sc, int index)
2677{
2678	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2679
2680	AAC_MEM0_SETREG4(sc, AAC_RKT_OQUE, index);
2681}
2682
2683/*
2684 * Debugging and Diagnostics
2685 */
2686
2687/*
2688 * Print some information about the controller.
2689 */
2690static void
2691aac_describe_controller(struct aac_softc *sc)
2692{
2693	struct aac_fib *fib;
2694	struct aac_adapter_info	*info;
2695	char *adapter_type = "Adaptec RAID controller";
2696
2697	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2698
2699	mtx_lock(&sc->aac_io_lock);
2700	aac_alloc_sync_fib(sc, &fib);
2701
2702	fib->data[0] = 0;
2703	if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2704		device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2705		aac_release_sync_fib(sc);
2706		mtx_unlock(&sc->aac_io_lock);
2707		return;
2708	}
2709
2710	/* save the kernel revision structure for later use */
2711	info = (struct aac_adapter_info *)&fib->data[0];
2712	sc->aac_revision = info->KernelRevision;
2713
2714	if (bootverbose) {
2715		device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2716		    "(%dMB cache, %dMB execution), %s\n",
2717		    aac_describe_code(aac_cpu_variant, info->CpuVariant),
2718		    info->ClockSpeed, info->TotalMem / (1024 * 1024),
2719		    info->BufferMem / (1024 * 1024),
2720		    info->ExecutionMem / (1024 * 1024),
2721		    aac_describe_code(aac_battery_platform,
2722		    info->batteryPlatform));
2723
2724		device_printf(sc->aac_dev,
2725		    "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2726		    info->KernelRevision.external.comp.major,
2727		    info->KernelRevision.external.comp.minor,
2728		    info->KernelRevision.external.comp.dash,
2729		    info->KernelRevision.buildNumber,
2730		    (u_int32_t)(info->SerialNumber & 0xffffff));
2731
2732		device_printf(sc->aac_dev, "Supported Options=%b\n",
2733			      sc->supported_options,
2734			      "\20"
2735			      "\1SNAPSHOT"
2736			      "\2CLUSTERS"
2737			      "\3WCACHE"
2738			      "\4DATA64"
2739			      "\5HOSTTIME"
2740			      "\6RAID50"
2741			      "\7WINDOW4GB"
2742			      "\10SCSIUPGD"
2743			      "\11SOFTERR"
2744			      "\12NORECOND"
2745			      "\13SGMAP64"
2746			      "\14ALARM"
2747			      "\15NONDASD"
2748			      "\16SCSIMGT"
2749			      "\17RAIDSCSI"
2750			      "\21ADPTINFO"
2751			      "\22NEWCOMM"
2752			      "\23ARRAY64BIT"
2753			      "\24HEATSENSOR");
2754	}
2755
2756	if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2757		fib->data[0] = 0;
2758		if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2759			device_printf(sc->aac_dev,
2760			    "RequestSupplementAdapterInfo failed\n");
2761		else
2762			adapter_type = ((struct aac_supplement_adapter_info *)
2763			    &fib->data[0])->AdapterTypeText;
2764	}
2765	device_printf(sc->aac_dev, "%s, aac driver %d.%d.%d-%d\n",
2766		adapter_type,
2767		AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2768		AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2769
2770	aac_release_sync_fib(sc);
2771	mtx_unlock(&sc->aac_io_lock);
2772}
2773
2774/*
2775 * Look up a text description of a numeric error code and return a pointer to
2776 * same.
2777 */
2778static const char *
2779aac_describe_code(const struct aac_code_lookup *table, u_int32_t code)
2780{
2781	int i;
2782
2783	for (i = 0; table[i].string != NULL; i++)
2784		if (table[i].code == code)
2785			return(table[i].string);
2786	return(table[i + 1].string);
2787}
2788
2789/*
2790 * Management Interface
2791 */
2792
2793static int
2794aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2795{
2796	struct aac_softc *sc;
2797
2798	sc = dev->si_drv1;
2799	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2800	device_busy(sc->aac_dev);
2801	devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2802
2803	return 0;
2804}
2805
2806static int
2807aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2808{
2809	union aac_statrequest *as;
2810	struct aac_softc *sc;
2811	int error = 0;
2812
2813	as = (union aac_statrequest *)arg;
2814	sc = dev->si_drv1;
2815	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2816
2817	switch (cmd) {
2818	case AACIO_STATS:
2819		switch (as->as_item) {
2820		case AACQ_FREE:
2821		case AACQ_BIO:
2822		case AACQ_READY:
2823		case AACQ_BUSY:
2824			bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2825			      sizeof(struct aac_qstat));
2826			break;
2827		default:
2828			error = ENOENT;
2829			break;
2830		}
2831	break;
2832
2833	case FSACTL_SENDFIB:
2834	case FSACTL_SEND_LARGE_FIB:
2835		arg = *(caddr_t*)arg;
2836	case FSACTL_LNX_SENDFIB:
2837	case FSACTL_LNX_SEND_LARGE_FIB:
2838		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2839		error = aac_ioctl_sendfib(sc, arg);
2840		break;
2841	case FSACTL_SEND_RAW_SRB:
2842		arg = *(caddr_t*)arg;
2843	case FSACTL_LNX_SEND_RAW_SRB:
2844		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2845		error = aac_ioctl_send_raw_srb(sc, arg);
2846		break;
2847	case FSACTL_AIF_THREAD:
2848	case FSACTL_LNX_AIF_THREAD:
2849		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2850		error = EINVAL;
2851		break;
2852	case FSACTL_OPEN_GET_ADAPTER_FIB:
2853		arg = *(caddr_t*)arg;
2854	case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2855		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2856		error = aac_open_aif(sc, arg);
2857		break;
2858	case FSACTL_GET_NEXT_ADAPTER_FIB:
2859		arg = *(caddr_t*)arg;
2860	case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2861		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2862		error = aac_getnext_aif(sc, arg);
2863		break;
2864	case FSACTL_CLOSE_GET_ADAPTER_FIB:
2865		arg = *(caddr_t*)arg;
2866	case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2867		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2868		error = aac_close_aif(sc, arg);
2869		break;
2870	case FSACTL_MINIPORT_REV_CHECK:
2871		arg = *(caddr_t*)arg;
2872	case FSACTL_LNX_MINIPORT_REV_CHECK:
2873		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2874		error = aac_rev_check(sc, arg);
2875		break;
2876	case FSACTL_QUERY_DISK:
2877		arg = *(caddr_t*)arg;
2878	case FSACTL_LNX_QUERY_DISK:
2879		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2880		error = aac_query_disk(sc, arg);
2881		break;
2882	case FSACTL_DELETE_DISK:
2883	case FSACTL_LNX_DELETE_DISK:
2884		/*
2885		 * We don't trust the underland to tell us when to delete a
2886		 * container, rather we rely on an AIF coming from the
2887		 * controller
2888		 */
2889		error = 0;
2890		break;
2891	case FSACTL_GET_PCI_INFO:
2892		arg = *(caddr_t*)arg;
2893	case FSACTL_LNX_GET_PCI_INFO:
2894		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2895		error = aac_get_pci_info(sc, arg);
2896		break;
2897	case FSACTL_GET_FEATURES:
2898		arg = *(caddr_t*)arg;
2899	case FSACTL_LNX_GET_FEATURES:
2900		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2901		error = aac_supported_features(sc, arg);
2902		break;
2903	default:
2904		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2905		error = EINVAL;
2906		break;
2907	}
2908	return(error);
2909}
2910
2911static int
2912aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2913{
2914	struct aac_softc *sc;
2915	struct aac_fib_context *ctx;
2916	int revents;
2917
2918	sc = dev->si_drv1;
2919	revents = 0;
2920
2921	mtx_lock(&sc->aac_aifq_lock);
2922	if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2923		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2924			if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2925				revents |= poll_events & (POLLIN | POLLRDNORM);
2926				break;
2927			}
2928		}
2929	}
2930	mtx_unlock(&sc->aac_aifq_lock);
2931
2932	if (revents == 0) {
2933		if (poll_events & (POLLIN | POLLRDNORM))
2934			selrecord(td, &sc->rcv_select);
2935	}
2936
2937	return (revents);
2938}
2939
2940static void
2941aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2942{
2943
2944	switch (event->ev_type) {
2945	case AAC_EVENT_CMFREE:
2946		mtx_assert(&sc->aac_io_lock, MA_OWNED);
2947		if (aac_alloc_command(sc, (struct aac_command **)arg)) {
2948			aac_add_event(sc, event);
2949			return;
2950		}
2951		free(event, M_AACBUF);
2952		wakeup(arg);
2953		break;
2954	default:
2955		break;
2956	}
2957}
2958
2959/*
2960 * Send a FIB supplied from userspace
2961 */
2962static int
2963aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2964{
2965	struct aac_command *cm;
2966	int size, error;
2967
2968	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2969
2970	cm = NULL;
2971
2972	/*
2973	 * Get a command
2974	 */
2975	mtx_lock(&sc->aac_io_lock);
2976	if (aac_alloc_command(sc, &cm)) {
2977		struct aac_event *event;
2978
2979		event = malloc(sizeof(struct aac_event), M_AACBUF,
2980		    M_NOWAIT | M_ZERO);
2981		if (event == NULL) {
2982			error = EBUSY;
2983			mtx_unlock(&sc->aac_io_lock);
2984			goto out;
2985		}
2986		event->ev_type = AAC_EVENT_CMFREE;
2987		event->ev_callback = aac_ioctl_event;
2988		event->ev_arg = &cm;
2989		aac_add_event(sc, event);
2990		msleep(&cm, &sc->aac_io_lock, 0, "sendfib", 0);
2991	}
2992	mtx_unlock(&sc->aac_io_lock);
2993
2994	/*
2995	 * Fetch the FIB header, then re-copy to get data as well.
2996	 */
2997	if ((error = copyin(ufib, cm->cm_fib,
2998			    sizeof(struct aac_fib_header))) != 0)
2999		goto out;
3000	size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
3001	if (size > sc->aac_max_fib_size) {
3002		device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
3003			      size, sc->aac_max_fib_size);
3004		size = sc->aac_max_fib_size;
3005	}
3006	if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
3007		goto out;
3008	cm->cm_fib->Header.Size = size;
3009	cm->cm_timestamp = time_uptime;
3010
3011	/*
3012	 * Pass the FIB to the controller, wait for it to complete.
3013	 */
3014	mtx_lock(&sc->aac_io_lock);
3015	error = aac_wait_command(cm);
3016	mtx_unlock(&sc->aac_io_lock);
3017	if (error != 0) {
3018		device_printf(sc->aac_dev,
3019			      "aac_wait_command return %d\n", error);
3020		goto out;
3021	}
3022
3023	/*
3024	 * Copy the FIB and data back out to the caller.
3025	 */
3026	size = cm->cm_fib->Header.Size;
3027	if (size > sc->aac_max_fib_size) {
3028		device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
3029			      size, sc->aac_max_fib_size);
3030		size = sc->aac_max_fib_size;
3031	}
3032	error = copyout(cm->cm_fib, ufib, size);
3033
3034out:
3035	if (cm != NULL) {
3036		mtx_lock(&sc->aac_io_lock);
3037		aac_release_command(cm);
3038		mtx_unlock(&sc->aac_io_lock);
3039	}
3040	return(error);
3041}
3042
3043/*
3044 * Send a passthrough FIB supplied from userspace
3045 */
3046static int
3047aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
3048{
3049	struct aac_command *cm;
3050	struct aac_event *event;
3051	struct aac_fib *fib;
3052	struct aac_srb *srbcmd, *user_srb;
3053	struct aac_sg_entry *sge;
3054	struct aac_sg_entry64 *sge64;
3055	void *srb_sg_address, *ureply;
3056	uint32_t fibsize, srb_sg_bytecount;
3057	int error, transfer_data;
3058
3059	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3060
3061	cm = NULL;
3062	transfer_data = 0;
3063	fibsize = 0;
3064	user_srb = (struct aac_srb *)arg;
3065
3066	mtx_lock(&sc->aac_io_lock);
3067	if (aac_alloc_command(sc, &cm)) {
3068		 event = malloc(sizeof(struct aac_event), M_AACBUF,
3069		    M_NOWAIT | M_ZERO);
3070		if (event == NULL) {
3071			error = EBUSY;
3072			mtx_unlock(&sc->aac_io_lock);
3073			goto out;
3074		}
3075		event->ev_type = AAC_EVENT_CMFREE;
3076		event->ev_callback = aac_ioctl_event;
3077		event->ev_arg = &cm;
3078		aac_add_event(sc, event);
3079		msleep(cm, &sc->aac_io_lock, 0, "aacraw", 0);
3080	}
3081	mtx_unlock(&sc->aac_io_lock);
3082
3083	cm->cm_data = NULL;
3084	fib = cm->cm_fib;
3085	srbcmd = (struct aac_srb *)fib->data;
3086	error = copyin(&user_srb->data_len, &fibsize, sizeof(uint32_t));
3087	if (error != 0)
3088		goto out;
3089	if (fibsize > (sc->aac_max_fib_size - sizeof(struct aac_fib_header))) {
3090		error = EINVAL;
3091		goto out;
3092	}
3093	error = copyin(user_srb, srbcmd, fibsize);
3094	if (error != 0)
3095		goto out;
3096	srbcmd->function = 0;
3097	srbcmd->retry_limit = 0;
3098	if (srbcmd->sg_map.SgCount > 1) {
3099		error = EINVAL;
3100		goto out;
3101	}
3102
3103	/* Retrieve correct SG entries. */
3104	if (fibsize == (sizeof(struct aac_srb) +
3105	    srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
3106		sge = srbcmd->sg_map.SgEntry;
3107		sge64 = NULL;
3108		srb_sg_bytecount = sge->SgByteCount;
3109		srb_sg_address = (void *)(uintptr_t)sge->SgAddress;
3110	}
3111#ifdef __amd64__
3112	else if (fibsize == (sizeof(struct aac_srb) +
3113	    srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
3114		sge = NULL;
3115		sge64 = (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
3116		srb_sg_bytecount = sge64->SgByteCount;
3117		srb_sg_address = (void *)sge64->SgAddress;
3118		if (sge64->SgAddress > 0xffffffffull &&
3119		    (sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
3120			error = EINVAL;
3121			goto out;
3122		}
3123	}
3124#endif
3125	else {
3126		error = EINVAL;
3127		goto out;
3128	}
3129	ureply = (char *)arg + fibsize;
3130	srbcmd->data_len = srb_sg_bytecount;
3131	if (srbcmd->sg_map.SgCount == 1)
3132		transfer_data = 1;
3133
3134	cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
3135	if (transfer_data) {
3136		cm->cm_datalen = srb_sg_bytecount;
3137		cm->cm_data = malloc(cm->cm_datalen, M_AACBUF, M_NOWAIT);
3138		if (cm->cm_data == NULL) {
3139			error = ENOMEM;
3140			goto out;
3141		}
3142		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
3143			cm->cm_flags |= AAC_CMD_DATAIN;
3144		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
3145			cm->cm_flags |= AAC_CMD_DATAOUT;
3146			error = copyin(srb_sg_address, cm->cm_data,
3147			    cm->cm_datalen);
3148			if (error != 0)
3149				goto out;
3150		}
3151	}
3152
3153	fib->Header.Size = sizeof(struct aac_fib_header) +
3154	    sizeof(struct aac_srb);
3155	fib->Header.XferState =
3156	    AAC_FIBSTATE_HOSTOWNED   |
3157	    AAC_FIBSTATE_INITIALISED |
3158	    AAC_FIBSTATE_EMPTY       |
3159	    AAC_FIBSTATE_FROMHOST    |
3160	    AAC_FIBSTATE_REXPECTED   |
3161	    AAC_FIBSTATE_NORM        |
3162	    AAC_FIBSTATE_ASYNC       |
3163	    AAC_FIBSTATE_FAST_RESPONSE;
3164	fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) != 0 ?
3165	    ScsiPortCommandU64 : ScsiPortCommand;
3166
3167	mtx_lock(&sc->aac_io_lock);
3168	aac_wait_command(cm);
3169	mtx_unlock(&sc->aac_io_lock);
3170
3171	if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) != 0) {
3172		error = copyout(cm->cm_data, srb_sg_address, cm->cm_datalen);
3173		if (error != 0)
3174			goto out;
3175	}
3176	error = copyout(fib->data, ureply, sizeof(struct aac_srb_response));
3177out:
3178	if (cm != NULL) {
3179		if (cm->cm_data != NULL)
3180			free(cm->cm_data, M_AACBUF);
3181		mtx_lock(&sc->aac_io_lock);
3182		aac_release_command(cm);
3183		mtx_unlock(&sc->aac_io_lock);
3184	}
3185	return(error);
3186}
3187
3188/*
3189 * cdevpriv interface private destructor.
3190 */
3191static void
3192aac_cdevpriv_dtor(void *arg)
3193{
3194	struct aac_softc *sc;
3195
3196	sc = arg;
3197	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3198	mtx_lock(&Giant);
3199	device_unbusy(sc->aac_dev);
3200	mtx_unlock(&Giant);
3201}
3202
3203/*
3204 * Handle an AIF sent to us by the controller; queue it for later reference.
3205 * If the queue fills up, then drop the older entries.
3206 */
3207static void
3208aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3209{
3210	struct aac_aif_command *aif;
3211	struct aac_container *co, *co_next;
3212	struct aac_fib_context *ctx;
3213	struct aac_mntinforesp *mir;
3214	int next, current, found;
3215	int count = 0, added = 0, i = 0;
3216	uint32_t channel;
3217
3218	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3219
3220	aif = (struct aac_aif_command*)&fib->data[0];
3221	aac_print_aif(sc, aif);
3222
3223	/* Is it an event that we should care about? */
3224	switch (aif->command) {
3225	case AifCmdEventNotify:
3226		switch (aif->data.EN.type) {
3227		case AifEnAddContainer:
3228		case AifEnDeleteContainer:
3229			/*
3230			 * A container was added or deleted, but the message
3231			 * doesn't tell us anything else!  Re-enumerate the
3232			 * containers and sort things out.
3233			 */
3234			aac_alloc_sync_fib(sc, &fib);
3235			do {
3236				/*
3237				 * Ask the controller for its containers one at
3238				 * a time.
3239				 * XXX What if the controller's list changes
3240				 * midway through this enumaration?
3241				 * XXX This should be done async.
3242				 */
3243				if ((mir = aac_get_container_info(sc, fib, i)) == NULL)
3244					continue;
3245				if (i == 0)
3246					count = mir->MntRespCount;
3247				/*
3248				 * Check the container against our list.
3249				 * co->co_found was already set to 0 in a
3250				 * previous run.
3251				 */
3252				if ((mir->Status == ST_OK) &&
3253				    (mir->MntTable[0].VolType != CT_NONE)) {
3254					found = 0;
3255					TAILQ_FOREACH(co,
3256						      &sc->aac_container_tqh,
3257						      co_link) {
3258						if (co->co_mntobj.ObjectId ==
3259						    mir->MntTable[0].ObjectId) {
3260							co->co_found = 1;
3261							found = 1;
3262							break;
3263						}
3264					}
3265					/*
3266					 * If the container matched, continue
3267					 * in the list.
3268					 */
3269					if (found) {
3270						i++;
3271						continue;
3272					}
3273
3274					/*
3275					 * This is a new container.  Do all the
3276					 * appropriate things to set it up.
3277					 */
3278					aac_add_container(sc, mir, 1);
3279					added = 1;
3280				}
3281				i++;
3282			} while ((i < count) && (i < AAC_MAX_CONTAINERS));
3283			aac_release_sync_fib(sc);
3284
3285			/*
3286			 * Go through our list of containers and see which ones
3287			 * were not marked 'found'.  Since the controller didn't
3288			 * list them they must have been deleted.  Do the
3289			 * appropriate steps to destroy the device.  Also reset
3290			 * the co->co_found field.
3291			 */
3292			co = TAILQ_FIRST(&sc->aac_container_tqh);
3293			while (co != NULL) {
3294				if (co->co_found == 0) {
3295					mtx_unlock(&sc->aac_io_lock);
3296					mtx_lock(&Giant);
3297					device_delete_child(sc->aac_dev,
3298							    co->co_disk);
3299					mtx_unlock(&Giant);
3300					mtx_lock(&sc->aac_io_lock);
3301					co_next = TAILQ_NEXT(co, co_link);
3302					mtx_lock(&sc->aac_container_lock);
3303					TAILQ_REMOVE(&sc->aac_container_tqh, co,
3304						     co_link);
3305					mtx_unlock(&sc->aac_container_lock);
3306					free(co, M_AACBUF);
3307					co = co_next;
3308				} else {
3309					co->co_found = 0;
3310					co = TAILQ_NEXT(co, co_link);
3311				}
3312			}
3313
3314			/* Attach the newly created containers */
3315			if (added) {
3316				mtx_unlock(&sc->aac_io_lock);
3317				mtx_lock(&Giant);
3318				bus_generic_attach(sc->aac_dev);
3319				mtx_unlock(&Giant);
3320				mtx_lock(&sc->aac_io_lock);
3321			}
3322
3323			break;
3324
3325		case AifEnEnclosureManagement:
3326			switch (aif->data.EN.data.EEE.eventType) {
3327			case AIF_EM_DRIVE_INSERTION:
3328			case AIF_EM_DRIVE_REMOVAL:
3329				channel = aif->data.EN.data.EEE.unitID;
3330				if (sc->cam_rescan_cb != NULL)
3331					sc->cam_rescan_cb(sc,
3332					    (channel >> 24) & 0xF,
3333					    (channel & 0xFFFF));
3334				break;
3335			}
3336			break;
3337
3338		case AifEnAddJBOD:
3339		case AifEnDeleteJBOD:
3340			channel = aif->data.EN.data.ECE.container;
3341			if (sc->cam_rescan_cb != NULL)
3342				sc->cam_rescan_cb(sc, (channel >> 24) & 0xF,
3343				    AAC_CAM_TARGET_WILDCARD);
3344			break;
3345
3346		default:
3347			break;
3348		}
3349
3350	default:
3351		break;
3352	}
3353
3354	/* Copy the AIF data to the AIF queue for ioctl retrieval */
3355	mtx_lock(&sc->aac_aifq_lock);
3356	current = sc->aifq_idx;
3357	next = (current + 1) % AAC_AIFQ_LENGTH;
3358	if (next == 0)
3359		sc->aifq_filled = 1;
3360	bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3361	/* modify AIF contexts */
3362	if (sc->aifq_filled) {
3363		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3364			if (next == ctx->ctx_idx)
3365				ctx->ctx_wrap = 1;
3366			else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3367				ctx->ctx_idx = next;
3368		}
3369	}
3370	sc->aifq_idx = next;
3371	/* On the off chance that someone is sleeping for an aif... */
3372	if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3373		wakeup(sc->aac_aifq);
3374	/* Wakeup any poll()ers */
3375	selwakeuppri(&sc->rcv_select, PRIBIO);
3376	mtx_unlock(&sc->aac_aifq_lock);
3377}
3378
3379/*
3380 * Return the Revision of the driver to userspace and check to see if the
3381 * userspace app is possibly compatible.  This is extremely bogus since
3382 * our driver doesn't follow Adaptec's versioning system.  Cheat by just
3383 * returning what the card reported.
3384 */
3385static int
3386aac_rev_check(struct aac_softc *sc, caddr_t udata)
3387{
3388	struct aac_rev_check rev_check;
3389	struct aac_rev_check_resp rev_check_resp;
3390	int error = 0;
3391
3392	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3393
3394	/*
3395	 * Copyin the revision struct from userspace
3396	 */
3397	if ((error = copyin(udata, (caddr_t)&rev_check,
3398			sizeof(struct aac_rev_check))) != 0) {
3399		return error;
3400	}
3401
3402	fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3403	      rev_check.callingRevision.buildNumber);
3404
3405	/*
3406	 * Doctor up the response struct.
3407	 */
3408	rev_check_resp.possiblyCompatible = 1;
3409	rev_check_resp.adapterSWRevision.external.comp.major =
3410	    AAC_DRIVER_MAJOR_VERSION;
3411	rev_check_resp.adapterSWRevision.external.comp.minor =
3412	    AAC_DRIVER_MINOR_VERSION;
3413	rev_check_resp.adapterSWRevision.external.comp.type =
3414	    AAC_DRIVER_TYPE;
3415	rev_check_resp.adapterSWRevision.external.comp.dash =
3416	    AAC_DRIVER_BUGFIX_LEVEL;
3417	rev_check_resp.adapterSWRevision.buildNumber =
3418	    AAC_DRIVER_BUILD;
3419
3420	return(copyout((caddr_t)&rev_check_resp, udata,
3421			sizeof(struct aac_rev_check_resp)));
3422}
3423
3424/*
3425 * Pass the fib context to the caller
3426 */
3427static int
3428aac_open_aif(struct aac_softc *sc, caddr_t arg)
3429{
3430	struct aac_fib_context *fibctx, *ctx;
3431	int error = 0;
3432
3433	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3434
3435	fibctx = malloc(sizeof(struct aac_fib_context), M_AACBUF, M_NOWAIT|M_ZERO);
3436	if (fibctx == NULL)
3437		return (ENOMEM);
3438
3439	mtx_lock(&sc->aac_aifq_lock);
3440	/* all elements are already 0, add to queue */
3441	if (sc->fibctx == NULL)
3442		sc->fibctx = fibctx;
3443	else {
3444		for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3445			;
3446		ctx->next = fibctx;
3447		fibctx->prev = ctx;
3448	}
3449
3450	/* evaluate unique value */
3451	fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3452	ctx = sc->fibctx;
3453	while (ctx != fibctx) {
3454		if (ctx->unique == fibctx->unique) {
3455			fibctx->unique++;
3456			ctx = sc->fibctx;
3457		} else {
3458			ctx = ctx->next;
3459		}
3460	}
3461	mtx_unlock(&sc->aac_aifq_lock);
3462
3463	error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3464	if (error)
3465		aac_close_aif(sc, (caddr_t)ctx);
3466	return error;
3467}
3468
3469/*
3470 * Close the caller's fib context
3471 */
3472static int
3473aac_close_aif(struct aac_softc *sc, caddr_t arg)
3474{
3475	struct aac_fib_context *ctx;
3476
3477	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3478
3479	mtx_lock(&sc->aac_aifq_lock);
3480	for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3481		if (ctx->unique == *(uint32_t *)&arg) {
3482			if (ctx == sc->fibctx)
3483				sc->fibctx = NULL;
3484			else {
3485				ctx->prev->next = ctx->next;
3486				if (ctx->next)
3487					ctx->next->prev = ctx->prev;
3488			}
3489			break;
3490		}
3491	}
3492	mtx_unlock(&sc->aac_aifq_lock);
3493	if (ctx)
3494		free(ctx, M_AACBUF);
3495
3496	return 0;
3497}
3498
3499/*
3500 * Pass the caller the next AIF in their queue
3501 */
3502static int
3503aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3504{
3505	struct get_adapter_fib_ioctl agf;
3506	struct aac_fib_context *ctx;
3507	int error;
3508
3509	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3510
3511	if ((error = copyin(arg, &agf, sizeof(agf))) == 0) {
3512		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3513			if (agf.AdapterFibContext == ctx->unique)
3514				break;
3515		}
3516		if (!ctx)
3517			return (EFAULT);
3518
3519		error = aac_return_aif(sc, ctx, agf.AifFib);
3520		if (error == EAGAIN && agf.Wait) {
3521			fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3522			sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3523			while (error == EAGAIN) {
3524				error = tsleep(sc->aac_aifq, PRIBIO |
3525					       PCATCH, "aacaif", 0);
3526				if (error == 0)
3527					error = aac_return_aif(sc, ctx, agf.AifFib);
3528			}
3529			sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3530		}
3531	}
3532	return(error);
3533}
3534
3535/*
3536 * Hand the next AIF off the top of the queue out to userspace.
3537 */
3538static int
3539aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3540{
3541	int current, error;
3542
3543	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3544
3545	mtx_lock(&sc->aac_aifq_lock);
3546	current = ctx->ctx_idx;
3547	if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3548		/* empty */
3549		mtx_unlock(&sc->aac_aifq_lock);
3550		return (EAGAIN);
3551	}
3552	error =
3553		copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3554	if (error)
3555		device_printf(sc->aac_dev,
3556		    "aac_return_aif: copyout returned %d\n", error);
3557	else {
3558		ctx->ctx_wrap = 0;
3559		ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3560	}
3561	mtx_unlock(&sc->aac_aifq_lock);
3562	return(error);
3563}
3564
3565static int
3566aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3567{
3568	struct aac_pci_info {
3569		u_int32_t bus;
3570		u_int32_t slot;
3571	} pciinf;
3572	int error;
3573
3574	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3575
3576	pciinf.bus = pci_get_bus(sc->aac_dev);
3577	pciinf.slot = pci_get_slot(sc->aac_dev);
3578
3579	error = copyout((caddr_t)&pciinf, uptr,
3580			sizeof(struct aac_pci_info));
3581
3582	return (error);
3583}
3584
3585static int
3586aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3587{
3588	struct aac_features f;
3589	int error;
3590
3591	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3592
3593	if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3594		return (error);
3595
3596	/*
3597	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3598	 * ALL zero in the featuresState, the driver will return the current
3599	 * state of all the supported features, the data field will not be
3600	 * valid.
3601	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3602	 * a specific bit set in the featuresState, the driver will return the
3603	 * current state of this specific feature and whatever data that are
3604	 * associated with the feature in the data field or perform whatever
3605	 * action needed indicates in the data field.
3606	 */
3607	if (f.feat.fValue == 0) {
3608		f.feat.fBits.largeLBA =
3609		    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3610		/* TODO: In the future, add other features state here as well */
3611	} else {
3612		if (f.feat.fBits.largeLBA)
3613			f.feat.fBits.largeLBA =
3614			    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3615		/* TODO: Add other features state and data in the future */
3616	}
3617
3618	error = copyout(&f, uptr, sizeof (f));
3619	return (error);
3620}
3621
3622/*
3623 * Give the userland some information about the container.  The AAC arch
3624 * expects the driver to be a SCSI passthrough type driver, so it expects
3625 * the containers to have b:t:l numbers.  Fake it.
3626 */
3627static int
3628aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3629{
3630	struct aac_query_disk query_disk;
3631	struct aac_container *co;
3632	struct aac_disk	*disk;
3633	int error, id;
3634
3635	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3636
3637	disk = NULL;
3638
3639	error = copyin(uptr, (caddr_t)&query_disk,
3640		       sizeof(struct aac_query_disk));
3641	if (error)
3642		return (error);
3643
3644	id = query_disk.ContainerNumber;
3645	if (id == -1)
3646		return (EINVAL);
3647
3648	mtx_lock(&sc->aac_container_lock);
3649	TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3650		if (co->co_mntobj.ObjectId == id)
3651			break;
3652		}
3653
3654	if (co == NULL) {
3655			query_disk.Valid = 0;
3656			query_disk.Locked = 0;
3657			query_disk.Deleted = 1;		/* XXX is this right? */
3658	} else {
3659		disk = device_get_softc(co->co_disk);
3660		query_disk.Valid = 1;
3661		query_disk.Locked =
3662		    (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0;
3663		query_disk.Deleted = 0;
3664		query_disk.Bus = device_get_unit(sc->aac_dev);
3665		query_disk.Target = disk->unit;
3666		query_disk.Lun = 0;
3667		query_disk.UnMapped = 0;
3668		sprintf(&query_disk.diskDeviceName[0], "%s%d",
3669			disk->ad_disk->d_name, disk->ad_disk->d_unit);
3670	}
3671	mtx_unlock(&sc->aac_container_lock);
3672
3673	error = copyout((caddr_t)&query_disk, uptr,
3674			sizeof(struct aac_query_disk));
3675
3676	return (error);
3677}
3678
3679static void
3680aac_get_bus_info(struct aac_softc *sc)
3681{
3682	struct aac_fib *fib;
3683	struct aac_ctcfg *c_cmd;
3684	struct aac_ctcfg_resp *c_resp;
3685	struct aac_vmioctl *vmi;
3686	struct aac_vmi_businf_resp *vmi_resp;
3687	struct aac_getbusinf businfo;
3688	struct aac_sim *caminf;
3689	device_t child;
3690	int i, found, error;
3691
3692	mtx_lock(&sc->aac_io_lock);
3693	aac_alloc_sync_fib(sc, &fib);
3694	c_cmd = (struct aac_ctcfg *)&fib->data[0];
3695	bzero(c_cmd, sizeof(struct aac_ctcfg));
3696
3697	c_cmd->Command = VM_ContainerConfig;
3698	c_cmd->cmd = CT_GET_SCSI_METHOD;
3699	c_cmd->param = 0;
3700
3701	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3702	    sizeof(struct aac_ctcfg));
3703	if (error) {
3704		device_printf(sc->aac_dev, "Error %d sending "
3705		    "VM_ContainerConfig command\n", error);
3706		aac_release_sync_fib(sc);
3707		mtx_unlock(&sc->aac_io_lock);
3708		return;
3709	}
3710
3711	c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3712	if (c_resp->Status != ST_OK) {
3713		device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3714		    c_resp->Status);
3715		aac_release_sync_fib(sc);
3716		mtx_unlock(&sc->aac_io_lock);
3717		return;
3718	}
3719
3720	sc->scsi_method_id = c_resp->param;
3721
3722	vmi = (struct aac_vmioctl *)&fib->data[0];
3723	bzero(vmi, sizeof(struct aac_vmioctl));
3724
3725	vmi->Command = VM_Ioctl;
3726	vmi->ObjType = FT_DRIVE;
3727	vmi->MethId = sc->scsi_method_id;
3728	vmi->ObjId = 0;
3729	vmi->IoctlCmd = GetBusInfo;
3730
3731	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3732	    sizeof(struct aac_vmi_businf_resp));
3733	if (error) {
3734		device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3735		    error);
3736		aac_release_sync_fib(sc);
3737		mtx_unlock(&sc->aac_io_lock);
3738		return;
3739	}
3740
3741	vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3742	if (vmi_resp->Status != ST_OK) {
3743		device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3744		    vmi_resp->Status);
3745		aac_release_sync_fib(sc);
3746		mtx_unlock(&sc->aac_io_lock);
3747		return;
3748	}
3749
3750	bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3751	aac_release_sync_fib(sc);
3752	mtx_unlock(&sc->aac_io_lock);
3753
3754	found = 0;
3755	for (i = 0; i < businfo.BusCount; i++) {
3756		if (businfo.BusValid[i] != AAC_BUS_VALID)
3757			continue;
3758
3759		caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3760		    M_AACBUF, M_NOWAIT | M_ZERO);
3761		if (caminf == NULL) {
3762			device_printf(sc->aac_dev,
3763			    "No memory to add passthrough bus %d\n", i);
3764			break;
3765		};
3766
3767		child = device_add_child(sc->aac_dev, "aacp", -1);
3768		if (child == NULL) {
3769			device_printf(sc->aac_dev,
3770			    "device_add_child failed for passthrough bus %d\n",
3771			    i);
3772			free(caminf, M_AACBUF);
3773			break;
3774		}
3775
3776		caminf->TargetsPerBus = businfo.TargetsPerBus;
3777		caminf->BusNumber = i;
3778		caminf->InitiatorBusId = businfo.InitiatorBusId[i];
3779		caminf->aac_sc = sc;
3780		caminf->sim_dev = child;
3781
3782		device_set_ivars(child, caminf);
3783		device_set_desc(child, "SCSI Passthrough Bus");
3784		TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3785
3786		found = 1;
3787	}
3788
3789	if (found)
3790		bus_generic_attach(sc->aac_dev);
3791}
3792