1/*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2001 Scott Long
4 * Copyright (c) 2000 BSDi
5 * Copyright (c) 2001-2010 Adaptec, Inc.
6 * Copyright (c) 2010-2012 PMC-Sierra, Inc.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34/*
35 * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
36 */
37#define AAC_DRIVERNAME			"aacraid"
38
39#include "opt_aacraid.h"
40
41/* #include <stddef.h> */
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/malloc.h>
45#include <sys/kernel.h>
46#include <sys/kthread.h>
47#include <sys/sysctl.h>
48#include <sys/poll.h>
49#include <sys/ioccom.h>
50
51#include <sys/bus.h>
52#include <sys/conf.h>
53#include <sys/signalvar.h>
54#include <sys/time.h>
55#include <sys/eventhandler.h>
56#include <sys/rman.h>
57
58#include <machine/bus.h>
59#include <sys/bus_dma.h>
60#include <machine/resource.h>
61
62#include <dev/pci/pcireg.h>
63#include <dev/pci/pcivar.h>
64
65#include <dev/aacraid/aacraid_reg.h>
66#include <sys/aac_ioctl.h>
67#include <dev/aacraid/aacraid_debug.h>
68#include <dev/aacraid/aacraid_var.h>
69
70#ifndef FILTER_HANDLED
71#define FILTER_HANDLED	0x02
72#endif
73
74static void	aac_add_container(struct aac_softc *sc,
75				  struct aac_mntinforesp *mir, int f,
76				  u_int32_t uid);
77static void	aac_get_bus_info(struct aac_softc *sc);
78static void	aac_container_bus(struct aac_softc *sc);
79static void	aac_daemon(void *arg);
80static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
81							  int pages, int nseg, int nseg_new);
82
83/* Command Processing */
84static void	aac_timeout(struct aac_softc *sc);
85static void	aac_command_thread(struct aac_softc *sc);
86static int	aac_sync_fib(struct aac_softc *sc, u_int32_t command,
87				     u_int32_t xferstate, struct aac_fib *fib,
88				     u_int16_t datasize);
89/* Command Buffer Management */
90static void	aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
91				       int nseg, int error);
92static int	aac_alloc_commands(struct aac_softc *sc);
93static void	aac_free_commands(struct aac_softc *sc);
94static void	aac_unmap_command(struct aac_command *cm);
95
96/* Hardware Interface */
97static int	aac_alloc(struct aac_softc *sc);
98static void	aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
99			       int error);
100static int	aac_check_firmware(struct aac_softc *sc);
101static void	aac_define_int_mode(struct aac_softc *sc);
102static int	aac_init(struct aac_softc *sc);
103static int	aac_find_pci_capability(struct aac_softc *sc, int cap);
104static int	aac_setup_intr(struct aac_softc *sc);
105static int	aac_check_config(struct aac_softc *sc);
106
107/* PMC SRC interface */
108static int	aac_src_get_fwstatus(struct aac_softc *sc);
109static void	aac_src_qnotify(struct aac_softc *sc, int qbit);
110static int	aac_src_get_istatus(struct aac_softc *sc);
111static void	aac_src_clear_istatus(struct aac_softc *sc, int mask);
112static void	aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
113				    u_int32_t arg0, u_int32_t arg1,
114				    u_int32_t arg2, u_int32_t arg3);
115static int	aac_src_get_mailbox(struct aac_softc *sc, int mb);
116static void	aac_src_access_devreg(struct aac_softc *sc, int mode);
117static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
118static int aac_src_get_outb_queue(struct aac_softc *sc);
119static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
120
121struct aac_interface aacraid_src_interface = {
122	aac_src_get_fwstatus,
123	aac_src_qnotify,
124	aac_src_get_istatus,
125	aac_src_clear_istatus,
126	aac_src_set_mailbox,
127	aac_src_get_mailbox,
128	aac_src_access_devreg,
129	aac_src_send_command,
130	aac_src_get_outb_queue,
131	aac_src_set_outb_queue
132};
133
134/* PMC SRCv interface */
135static void	aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
136				    u_int32_t arg0, u_int32_t arg1,
137				    u_int32_t arg2, u_int32_t arg3);
138static int	aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
139
140struct aac_interface aacraid_srcv_interface = {
141	aac_src_get_fwstatus,
142	aac_src_qnotify,
143	aac_src_get_istatus,
144	aac_src_clear_istatus,
145	aac_srcv_set_mailbox,
146	aac_srcv_get_mailbox,
147	aac_src_access_devreg,
148	aac_src_send_command,
149	aac_src_get_outb_queue,
150	aac_src_set_outb_queue
151};
152
153/* Debugging and Diagnostics */
154static struct aac_code_lookup aac_cpu_variant[] = {
155	{"i960JX",		CPUI960_JX},
156	{"i960CX",		CPUI960_CX},
157	{"i960HX",		CPUI960_HX},
158	{"i960RX",		CPUI960_RX},
159	{"i960 80303",		CPUI960_80303},
160	{"StrongARM SA110",	CPUARM_SA110},
161	{"PPC603e",		CPUPPC_603e},
162	{"XScale 80321",	CPU_XSCALE_80321},
163	{"MIPS 4KC",		CPU_MIPS_4KC},
164	{"MIPS 5KC",		CPU_MIPS_5KC},
165	{"Unknown StrongARM",	CPUARM_xxx},
166	{"Unknown PowerPC",	CPUPPC_xxx},
167	{NULL, 0},
168	{"Unknown processor",	0}
169};
170
171static struct aac_code_lookup aac_battery_platform[] = {
172	{"required battery present",		PLATFORM_BAT_REQ_PRESENT},
173	{"REQUIRED BATTERY NOT PRESENT",	PLATFORM_BAT_REQ_NOTPRESENT},
174	{"optional battery present",		PLATFORM_BAT_OPT_PRESENT},
175	{"optional battery not installed",	PLATFORM_BAT_OPT_NOTPRESENT},
176	{"no battery support",			PLATFORM_BAT_NOT_SUPPORTED},
177	{NULL, 0},
178	{"unknown battery platform",		0}
179};
180static void	aac_describe_controller(struct aac_softc *sc);
181static char	*aac_describe_code(struct aac_code_lookup *table,
182				   u_int32_t code);
183
184/* Management Interface */
185static d_open_t		aac_open;
186static d_ioctl_t	aac_ioctl;
187static d_poll_t		aac_poll;
188#if __FreeBSD_version >= 702000
189static void		aac_cdevpriv_dtor(void *arg);
190#else
191static d_close_t	aac_close;
192#endif
193static int	aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
194static int	aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
195static void	aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
196static void	aac_request_aif(struct aac_softc *sc);
197static int	aac_rev_check(struct aac_softc *sc, caddr_t udata);
198static int	aac_open_aif(struct aac_softc *sc, caddr_t arg);
199static int	aac_close_aif(struct aac_softc *sc, caddr_t arg);
200static int	aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
201static int	aac_return_aif(struct aac_softc *sc,
202			       struct aac_fib_context *ctx, caddr_t uptr);
203static int	aac_query_disk(struct aac_softc *sc, caddr_t uptr);
204static int	aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
205static int	aac_supported_features(struct aac_softc *sc, caddr_t uptr);
206static void	aac_ioctl_event(struct aac_softc *sc,
207				struct aac_event *event, void *arg);
208static int	aac_reset_adapter(struct aac_softc *sc);
209static int	aac_get_container_info(struct aac_softc *sc,
210				       struct aac_fib *fib, int cid,
211				       struct aac_mntinforesp *mir,
212				       u_int32_t *uid);
213static u_int32_t
214	aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
215
216static struct cdevsw aacraid_cdevsw = {
217	.d_version =	D_VERSION,
218	.d_flags =	D_NEEDGIANT,
219	.d_open =	aac_open,
220#if __FreeBSD_version < 702000
221	.d_close =	aac_close,
222#endif
223	.d_ioctl =	aac_ioctl,
224	.d_poll =	aac_poll,
225	.d_name =	"aacraid",
226};
227
228MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
229
230/* sysctl node */
231SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD, 0, "AACRAID driver parameters");
232
233/*
234 * Device Interface
235 */
236
237/*
238 * Initialize the controller and softc
239 */
240int
241aacraid_attach(struct aac_softc *sc)
242{
243	int error, unit;
244	struct aac_fib *fib;
245	struct aac_mntinforesp mir;
246	int count = 0, i = 0;
247	u_int32_t uid;
248
249	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
250	sc->hint_flags = device_get_flags(sc->aac_dev);
251	/*
252	 * Initialize per-controller queues.
253	 */
254	aac_initq_free(sc);
255	aac_initq_ready(sc);
256	aac_initq_busy(sc);
257
258	/* mark controller as suspended until we get ourselves organised */
259	sc->aac_state |= AAC_STATE_SUSPEND;
260
261	/*
262	 * Check that the firmware on the card is supported.
263	 */
264	sc->msi_enabled = FALSE;
265	if ((error = aac_check_firmware(sc)) != 0)
266		return(error);
267
268	/*
269	 * Initialize locks
270	 */
271	mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
272	TAILQ_INIT(&sc->aac_container_tqh);
273	TAILQ_INIT(&sc->aac_ev_cmfree);
274
275#if __FreeBSD_version >= 800000
276	/* Initialize the clock daemon callout. */
277	callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
278#endif
279	/*
280	 * Initialize the adapter.
281	 */
282	if ((error = aac_alloc(sc)) != 0)
283		return(error);
284	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
285		aac_define_int_mode(sc);
286		if ((error = aac_init(sc)) != 0)
287			return(error);
288	}
289
290	/*
291	 * Allocate and connect our interrupt.
292	 */
293	if ((error = aac_setup_intr(sc)) != 0)
294		return(error);
295
296	/*
297	 * Print a little information about the controller.
298	 */
299	aac_describe_controller(sc);
300
301	/*
302	 * Make the control device.
303	 */
304	unit = device_get_unit(sc->aac_dev);
305	sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
306				 0640, "aacraid%d", unit);
307	sc->aac_dev_t->si_drv1 = sc;
308
309	/* Create the AIF thread */
310	if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
311		   &sc->aifthread, 0, 0, "aacraid%daif", unit))
312		panic("Could not create AIF thread");
313
314	/* Register the shutdown method to only be called post-dump */
315	if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
316	    sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
317		device_printf(sc->aac_dev,
318			      "shutdown event registration failed\n");
319
320	/* Find containers */
321	mtx_lock(&sc->aac_io_lock);
322	aac_alloc_sync_fib(sc, &fib);
323	/* loop over possible containers */
324	do {
325		if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
326			continue;
327		if (i == 0)
328			count = mir.MntRespCount;
329		aac_add_container(sc, &mir, 0, uid);
330		i++;
331	} while ((i < count) && (i < AAC_MAX_CONTAINERS));
332	aac_release_sync_fib(sc);
333	mtx_unlock(&sc->aac_io_lock);
334
335	/* Register with CAM for the containers */
336	TAILQ_INIT(&sc->aac_sim_tqh);
337	aac_container_bus(sc);
338	/* Register with CAM for the non-DASD devices */
339	if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
340		aac_get_bus_info(sc);
341
342	/* poke the bus to actually attach the child devices */
343	bus_generic_attach(sc->aac_dev);
344
345	/* mark the controller up */
346	sc->aac_state &= ~AAC_STATE_SUSPEND;
347
348	/* enable interrupts now */
349	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
350
351#if __FreeBSD_version >= 800000
352	mtx_lock(&sc->aac_io_lock);
353	callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
354	mtx_unlock(&sc->aac_io_lock);
355#else
356	{
357		struct timeval tv;
358		tv.tv_sec = 60;
359		tv.tv_usec = 0;
360		sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
361	}
362#endif
363
364	return(0);
365}
366
367static void
368aac_daemon(void *arg)
369{
370	struct aac_softc *sc;
371	struct timeval tv;
372	struct aac_command *cm;
373	struct aac_fib *fib;
374
375	sc = arg;
376	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
377
378#if __FreeBSD_version >= 800000
379	mtx_assert(&sc->aac_io_lock, MA_OWNED);
380	if (callout_pending(&sc->aac_daemontime) ||
381	    callout_active(&sc->aac_daemontime) == 0)
382		return;
383#else
384	mtx_lock(&sc->aac_io_lock);
385#endif
386	getmicrotime(&tv);
387
388	if (!aacraid_alloc_command(sc, &cm)) {
389		fib = cm->cm_fib;
390		cm->cm_timestamp = time_uptime;
391		cm->cm_datalen = 0;
392		cm->cm_flags |= AAC_CMD_WAIT;
393
394		fib->Header.Size =
395			sizeof(struct aac_fib_header) + sizeof(u_int32_t);
396		fib->Header.XferState =
397			AAC_FIBSTATE_HOSTOWNED   |
398			AAC_FIBSTATE_INITIALISED |
399			AAC_FIBSTATE_EMPTY	 |
400			AAC_FIBSTATE_FROMHOST	 |
401			AAC_FIBSTATE_REXPECTED   |
402			AAC_FIBSTATE_NORM	 |
403			AAC_FIBSTATE_ASYNC	 |
404			AAC_FIBSTATE_FAST_RESPONSE;
405		fib->Header.Command = SendHostTime;
406		*(uint32_t *)fib->data = tv.tv_sec;
407
408		aacraid_map_command_sg(cm, NULL, 0, 0);
409		aacraid_release_command(cm);
410	}
411
412#if __FreeBSD_version >= 800000
413	callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
414#else
415	mtx_unlock(&sc->aac_io_lock);
416	tv.tv_sec = 30 * 60;
417	tv.tv_usec = 0;
418	sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
419#endif
420}
421
422void
423aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
424{
425
426	switch (event->ev_type & AAC_EVENT_MASK) {
427	case AAC_EVENT_CMFREE:
428		TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
429		break;
430	default:
431		device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
432		    event->ev_type);
433		break;
434	}
435
436	return;
437}
438
439/*
440 * Request information of container #cid
441 */
442static int
443aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
444		       struct aac_mntinforesp *mir, u_int32_t *uid)
445{
446	struct aac_command *cm;
447	struct aac_fib *fib;
448	struct aac_mntinfo *mi;
449	struct aac_cnt_config *ccfg;
450	int rval;
451
452	if (sync_fib == NULL) {
453		if (aacraid_alloc_command(sc, &cm)) {
454			device_printf(sc->aac_dev,
455				"Warning, no free command available\n");
456			return (-1);
457		}
458		fib = cm->cm_fib;
459	} else {
460		fib = sync_fib;
461	}
462
463	mi = (struct aac_mntinfo *)&fib->data[0];
464	/* 4KB support?, 64-bit LBA? */
465	if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
466		mi->Command = VM_NameServeAllBlk;
467	else if (sc->flags & AAC_FLAGS_LBA_64BIT)
468		mi->Command = VM_NameServe64;
469	else
470		mi->Command = VM_NameServe;
471	mi->MntType = FT_FILESYS;
472	mi->MntCount = cid;
473
474	if (sync_fib) {
475		if (aac_sync_fib(sc, ContainerCommand, 0, fib,
476			 sizeof(struct aac_mntinfo))) {
477			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
478			return (-1);
479		}
480	} else {
481		cm->cm_timestamp = time_uptime;
482		cm->cm_datalen = 0;
483
484		fib->Header.Size =
485			sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
486		fib->Header.XferState =
487			AAC_FIBSTATE_HOSTOWNED   |
488			AAC_FIBSTATE_INITIALISED |
489			AAC_FIBSTATE_EMPTY	 |
490			AAC_FIBSTATE_FROMHOST	 |
491			AAC_FIBSTATE_REXPECTED   |
492			AAC_FIBSTATE_NORM	 |
493			AAC_FIBSTATE_ASYNC	 |
494			AAC_FIBSTATE_FAST_RESPONSE;
495		fib->Header.Command = ContainerCommand;
496		if (aacraid_wait_command(cm) != 0) {
497			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
498			aacraid_release_command(cm);
499			return (-1);
500		}
501	}
502	bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
503
504	/* UID */
505	*uid = cid;
506	if (mir->MntTable[0].VolType != CT_NONE &&
507		!(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
508		if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
509			mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
510			mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
511		}
512		ccfg = (struct aac_cnt_config *)&fib->data[0];
513		bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
514		ccfg->Command = VM_ContainerConfig;
515		ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
516		ccfg->CTCommand.param[0] = cid;
517
518		if (sync_fib) {
519			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
520				sizeof(struct aac_cnt_config));
521			if (rval == 0 && ccfg->Command == ST_OK &&
522				ccfg->CTCommand.param[0] == CT_OK &&
523				mir->MntTable[0].VolType != CT_PASSTHRU)
524				*uid = ccfg->CTCommand.param[1];
525		} else {
526			fib->Header.Size =
527				sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
528			fib->Header.XferState =
529				AAC_FIBSTATE_HOSTOWNED   |
530				AAC_FIBSTATE_INITIALISED |
531				AAC_FIBSTATE_EMPTY	 |
532				AAC_FIBSTATE_FROMHOST	 |
533				AAC_FIBSTATE_REXPECTED   |
534				AAC_FIBSTATE_NORM	 |
535				AAC_FIBSTATE_ASYNC	 |
536				AAC_FIBSTATE_FAST_RESPONSE;
537			fib->Header.Command = ContainerCommand;
538			rval = aacraid_wait_command(cm);
539			if (rval == 0 && ccfg->Command == ST_OK &&
540				ccfg->CTCommand.param[0] == CT_OK &&
541				mir->MntTable[0].VolType != CT_PASSTHRU)
542				*uid = ccfg->CTCommand.param[1];
543			aacraid_release_command(cm);
544		}
545	}
546
547	return (0);
548}
549
550/*
551 * Create a device to represent a new container
552 */
553static void
554aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
555		  u_int32_t uid)
556{
557	struct aac_container *co;
558
559	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
560
561	/*
562	 * Check container volume type for validity.  Note that many of
563	 * the possible types may never show up.
564	 */
565	if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
566		co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
567		       M_NOWAIT | M_ZERO);
568		if (co == NULL) {
569			panic("Out of memory?!");
570		}
571
572		co->co_found = f;
573		bcopy(&mir->MntTable[0], &co->co_mntobj,
574		      sizeof(struct aac_mntobj));
575		co->co_uid = uid;
576		TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
577	}
578}
579
580/*
581 * Allocate resources associated with (sc)
582 */
583static int
584aac_alloc(struct aac_softc *sc)
585{
586	bus_size_t maxsize;
587
588	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
589
590	/*
591	 * Create DMA tag for mapping buffers into controller-addressable space.
592	 */
593	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
594			       1, 0, 			/* algnmnt, boundary */
595			       (sc->flags & AAC_FLAGS_SG_64BIT) ?
596			       BUS_SPACE_MAXADDR :
597			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
598			       BUS_SPACE_MAXADDR, 	/* highaddr */
599			       NULL, NULL, 		/* filter, filterarg */
600			       MAXBSIZE,		/* maxsize */
601			       sc->aac_sg_tablesize,	/* nsegments */
602			       MAXBSIZE,		/* maxsegsize */
603			       BUS_DMA_ALLOCNOW,	/* flags */
604			       busdma_lock_mutex,	/* lockfunc */
605			       &sc->aac_io_lock,	/* lockfuncarg */
606			       &sc->aac_buffer_dmat)) {
607		device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
608		return (ENOMEM);
609	}
610
611	/*
612	 * Create DMA tag for mapping FIBs into controller-addressable space..
613	 */
614	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
615		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
616			sizeof(struct aac_fib_xporthdr) + 31);
617	else
618		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
619	if (bus_dma_tag_create(sc->aac_parent_dmat,	/* parent */
620			       1, 0, 			/* algnmnt, boundary */
621			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
622			       BUS_SPACE_MAXADDR_32BIT :
623			       0x7fffffff,		/* lowaddr */
624			       BUS_SPACE_MAXADDR, 	/* highaddr */
625			       NULL, NULL, 		/* filter, filterarg */
626			       maxsize,  		/* maxsize */
627			       1,			/* nsegments */
628			       maxsize,			/* maxsize */
629			       0,			/* flags */
630			       NULL, NULL,		/* No locking needed */
631			       &sc->aac_fib_dmat)) {
632		device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
633		return (ENOMEM);
634	}
635
636	/*
637	 * Create DMA tag for the common structure and allocate it.
638	 */
639	maxsize = sizeof(struct aac_common);
640	maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
641	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
642			       1, 0,			/* algnmnt, boundary */
643			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
644			       BUS_SPACE_MAXADDR_32BIT :
645			       0x7fffffff,		/* lowaddr */
646			       BUS_SPACE_MAXADDR, 	/* highaddr */
647			       NULL, NULL, 		/* filter, filterarg */
648			       maxsize, 		/* maxsize */
649			       1,			/* nsegments */
650			       maxsize,			/* maxsegsize */
651			       0,			/* flags */
652			       NULL, NULL,		/* No locking needed */
653			       &sc->aac_common_dmat)) {
654		device_printf(sc->aac_dev,
655			      "can't allocate common structure DMA tag\n");
656		return (ENOMEM);
657	}
658	if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
659			     BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
660		device_printf(sc->aac_dev, "can't allocate common structure\n");
661		return (ENOMEM);
662	}
663
664	(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
665			sc->aac_common, maxsize,
666			aac_common_map, sc, 0);
667	bzero(sc->aac_common, maxsize);
668
669	/* Allocate some FIBs and associated command structs */
670	TAILQ_INIT(&sc->aac_fibmap_tqh);
671	sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
672				  M_AACRAIDBUF, M_WAITOK|M_ZERO);
673	mtx_lock(&sc->aac_io_lock);
674	while (sc->total_fibs < sc->aac_max_fibs) {
675		if (aac_alloc_commands(sc) != 0)
676			break;
677	}
678	mtx_unlock(&sc->aac_io_lock);
679	if (sc->total_fibs == 0)
680		return (ENOMEM);
681
682	return (0);
683}
684
685/*
686 * Free all of the resources associated with (sc)
687 *
688 * Should not be called if the controller is active.
689 */
690void
691aacraid_free(struct aac_softc *sc)
692{
693	int i;
694
695	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
696
697	/* remove the control device */
698	if (sc->aac_dev_t != NULL)
699		destroy_dev(sc->aac_dev_t);
700
701	/* throw away any FIB buffers, discard the FIB DMA tag */
702	aac_free_commands(sc);
703	if (sc->aac_fib_dmat)
704		bus_dma_tag_destroy(sc->aac_fib_dmat);
705
706	free(sc->aac_commands, M_AACRAIDBUF);
707
708	/* destroy the common area */
709	if (sc->aac_common) {
710		bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
711		bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
712				sc->aac_common_dmamap);
713	}
714	if (sc->aac_common_dmat)
715		bus_dma_tag_destroy(sc->aac_common_dmat);
716
717	/* disconnect the interrupt handler */
718	for (i = 0; i < AAC_MAX_MSIX; ++i) {
719		if (sc->aac_intr[i])
720			bus_teardown_intr(sc->aac_dev,
721				sc->aac_irq[i], sc->aac_intr[i]);
722		if (sc->aac_irq[i])
723			bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
724				sc->aac_irq_rid[i], sc->aac_irq[i]);
725		else
726			break;
727	}
728	if (sc->msi_enabled)
729		pci_release_msi(sc->aac_dev);
730
731	/* destroy data-transfer DMA tag */
732	if (sc->aac_buffer_dmat)
733		bus_dma_tag_destroy(sc->aac_buffer_dmat);
734
735	/* destroy the parent DMA tag */
736	if (sc->aac_parent_dmat)
737		bus_dma_tag_destroy(sc->aac_parent_dmat);
738
739	/* release the register window mapping */
740	if (sc->aac_regs_res0 != NULL)
741		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
742				     sc->aac_regs_rid0, sc->aac_regs_res0);
743	if (sc->aac_regs_res1 != NULL)
744		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
745				     sc->aac_regs_rid1, sc->aac_regs_res1);
746}
747
748/*
749 * Disconnect from the controller completely, in preparation for unload.
750 */
751int
752aacraid_detach(device_t dev)
753{
754	struct aac_softc *sc;
755	struct aac_container *co;
756	struct aac_sim	*sim;
757	int error;
758
759	sc = device_get_softc(dev);
760	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
761
762#if __FreeBSD_version >= 800000
763	callout_drain(&sc->aac_daemontime);
764#else
765	untimeout(aac_daemon, (void *)sc, sc->timeout_id);
766#endif
767	/* Remove the child containers */
768	while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
769		TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
770		free(co, M_AACRAIDBUF);
771	}
772
773	/* Remove the CAM SIMs */
774	while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
775		TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
776		error = device_delete_child(dev, sim->sim_dev);
777		if (error)
778			return (error);
779		free(sim, M_AACRAIDBUF);
780	}
781
782	if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
783		sc->aifflags |= AAC_AIFFLAGS_EXIT;
784		wakeup(sc->aifthread);
785		tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
786	}
787
788	if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
789		panic("Cannot shutdown AIF thread");
790
791	if ((error = aacraid_shutdown(dev)))
792		return(error);
793
794	EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
795
796	aacraid_free(sc);
797
798	mtx_destroy(&sc->aac_io_lock);
799
800	return(0);
801}
802
803/*
804 * Bring the controller down to a dormant state and detach all child devices.
805 *
806 * This function is called before detach or system shutdown.
807 *
808 * Note that we can assume that the bioq on the controller is empty, as we won't
809 * allow shutdown if any device is open.
810 */
811int
812aacraid_shutdown(device_t dev)
813{
814	struct aac_softc *sc;
815	struct aac_fib *fib;
816	struct aac_close_command *cc;
817
818	sc = device_get_softc(dev);
819	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
820
821	sc->aac_state |= AAC_STATE_SUSPEND;
822
823	/*
824	 * Send a Container shutdown followed by a HostShutdown FIB to the
825	 * controller to convince it that we don't want to talk to it anymore.
826	 * We've been closed and all I/O completed already
827	 */
828	device_printf(sc->aac_dev, "shutting down controller...");
829
830	mtx_lock(&sc->aac_io_lock);
831	aac_alloc_sync_fib(sc, &fib);
832	cc = (struct aac_close_command *)&fib->data[0];
833
834	bzero(cc, sizeof(struct aac_close_command));
835	cc->Command = VM_CloseAll;
836	cc->ContainerId = 0xfffffffe;
837	if (aac_sync_fib(sc, ContainerCommand, 0, fib,
838	    sizeof(struct aac_close_command)))
839		printf("FAILED.\n");
840	else
841		printf("done\n");
842
843	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
844	aac_release_sync_fib(sc);
845	mtx_unlock(&sc->aac_io_lock);
846
847	return(0);
848}
849
850/*
851 * Bring the controller to a quiescent state, ready for system suspend.
852 */
853int
854aacraid_suspend(device_t dev)
855{
856	struct aac_softc *sc;
857
858	sc = device_get_softc(dev);
859
860	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
861	sc->aac_state |= AAC_STATE_SUSPEND;
862
863	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
864	return(0);
865}
866
867/*
868 * Bring the controller back to a state ready for operation.
869 */
870int
871aacraid_resume(device_t dev)
872{
873	struct aac_softc *sc;
874
875	sc = device_get_softc(dev);
876
877	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
878	sc->aac_state &= ~AAC_STATE_SUSPEND;
879	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
880	return(0);
881}
882
883/*
884 * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
885 */
886void
887aacraid_new_intr_type1(void *arg)
888{
889	struct aac_msix_ctx *ctx;
890	struct aac_softc *sc;
891	int vector_no;
892	struct aac_command *cm;
893	struct aac_fib *fib;
894	u_int32_t bellbits, bellbits_shifted, index, handle;
895	int isFastResponse, isAif, noMoreAif, mode;
896
897	ctx = (struct aac_msix_ctx *)arg;
898	sc = ctx->sc;
899	vector_no = ctx->vector_no;
900
901	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
902	mtx_lock(&sc->aac_io_lock);
903
904	if (sc->msi_enabled) {
905		mode = AAC_INT_MODE_MSI;
906		if (vector_no == 0) {
907			bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
908			if (bellbits & 0x40000)
909				mode |= AAC_INT_MODE_AIF;
910			else if (bellbits & 0x1000)
911				mode |= AAC_INT_MODE_SYNC;
912		}
913	} else {
914		mode = AAC_INT_MODE_INTX;
915		bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
916		if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
917			bellbits = AAC_DB_RESPONSE_SENT_NS;
918			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
919		} else {
920			bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
921			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
922			if (bellbits_shifted & AAC_DB_AIF_PENDING)
923				mode |= AAC_INT_MODE_AIF;
924			else if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
925				mode |= AAC_INT_MODE_SYNC;
926		}
927		/* ODR readback, Prep #238630 */
928		AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
929	}
930
931	if (mode & AAC_INT_MODE_SYNC) {
932		if (sc->aac_sync_cm) {
933			cm = sc->aac_sync_cm;
934			cm->cm_flags |= AAC_CMD_COMPLETED;
935			/* is there a completion handler? */
936			if (cm->cm_complete != NULL) {
937				cm->cm_complete(cm);
938			} else {
939				/* assume that someone is sleeping on this command */
940				wakeup(cm);
941			}
942			sc->flags &= ~AAC_QUEUE_FRZN;
943			sc->aac_sync_cm = NULL;
944		}
945		mode = 0;
946	}
947
948	if (mode & AAC_INT_MODE_AIF) {
949		if (mode & AAC_INT_MODE_INTX) {
950			aac_request_aif(sc);
951			mode = 0;
952		}
953	}
954
955	if (mode) {
956		/* handle async. status */
957		index = sc->aac_host_rrq_idx[vector_no];
958		for (;;) {
959			isFastResponse = isAif = noMoreAif = 0;
960			/* remove toggle bit (31) */
961			handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff);
962			/* check fast response bit (30) */
963			if (handle & 0x40000000)
964				isFastResponse = 1;
965			/* check AIF bit (23) */
966			else if (handle & 0x00800000)
967				isAif = TRUE;
968			handle &= 0x0000ffff;
969			if (handle == 0)
970				break;
971
972			cm = sc->aac_commands + (handle - 1);
973			fib = cm->cm_fib;
974			sc->aac_rrq_outstanding[vector_no]--;
975			if (isAif) {
976				noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
977				if (!noMoreAif)
978					aac_handle_aif(sc, fib);
979				aac_remove_busy(cm);
980				aacraid_release_command(cm);
981			} else {
982				if (isFastResponse) {
983					fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
984					*((u_int32_t *)(fib->data)) = ST_OK;
985					cm->cm_flags |= AAC_CMD_FASTRESP;
986				}
987				aac_remove_busy(cm);
988				aac_unmap_command(cm);
989				cm->cm_flags |= AAC_CMD_COMPLETED;
990
991				/* is there a completion handler? */
992				if (cm->cm_complete != NULL) {
993					cm->cm_complete(cm);
994				} else {
995					/* assume that someone is sleeping on this command */
996					wakeup(cm);
997				}
998				sc->flags &= ~AAC_QUEUE_FRZN;
999			}
1000
1001			sc->aac_common->ac_host_rrq[index++] = 0;
1002			if (index == (vector_no + 1) * sc->aac_vector_cap)
1003				index = vector_no * sc->aac_vector_cap;
1004			sc->aac_host_rrq_idx[vector_no] = index;
1005
1006			if ((isAif && !noMoreAif) || sc->aif_pending)
1007				aac_request_aif(sc);
1008		}
1009	}
1010
1011	if (mode & AAC_INT_MODE_AIF) {
1012		aac_request_aif(sc);
1013		AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
1014		mode = 0;
1015	}
1016
1017	/* see if we can start some more I/O */
1018	if ((sc->flags & AAC_QUEUE_FRZN) == 0)
1019		aacraid_startio(sc);
1020	mtx_unlock(&sc->aac_io_lock);
1021}
1022
1023/*
1024 * Handle notification of one or more FIBs coming from the controller.
1025 */
1026static void
1027aac_command_thread(struct aac_softc *sc)
1028{
1029	int retval;
1030
1031	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1032
1033	mtx_lock(&sc->aac_io_lock);
1034	sc->aifflags = AAC_AIFFLAGS_RUNNING;
1035
1036	while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1037
1038		retval = 0;
1039		if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1040			retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1041					"aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1042
1043		/*
1044		 * First see if any FIBs need to be allocated.  This needs
1045		 * to be called without the driver lock because contigmalloc
1046		 * will grab Giant, and would result in an LOR.
1047		 */
1048		if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1049			aac_alloc_commands(sc);
1050			sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1051			aacraid_startio(sc);
1052		}
1053
1054		/*
1055		 * While we're here, check to see if any commands are stuck.
1056		 * This is pretty low-priority, so it's ok if it doesn't
1057		 * always fire.
1058		 */
1059		if (retval == EWOULDBLOCK)
1060			aac_timeout(sc);
1061
1062		/* Check the hardware printf message buffer */
1063		if (sc->aac_common->ac_printf[0] != 0)
1064			aac_print_printf(sc);
1065	}
1066	sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1067	mtx_unlock(&sc->aac_io_lock);
1068	wakeup(sc->aac_dev);
1069
1070	aac_kthread_exit(0);
1071}
1072
1073/*
1074 * Submit a command to the controller, return when it completes.
1075 * XXX This is very dangerous!  If the card has gone out to lunch, we could
1076 *     be stuck here forever.  At the same time, signals are not caught
1077 *     because there is a risk that a signal could wakeup the sleep before
1078 *     the card has a chance to complete the command.  Since there is no way
1079 *     to cancel a command that is in progress, we can't protect against the
1080 *     card completing a command late and spamming the command and data
1081 *     memory.  So, we are held hostage until the command completes.
1082 */
1083int
1084aacraid_wait_command(struct aac_command *cm)
1085{
1086	struct aac_softc *sc;
1087	int error;
1088
1089	sc = cm->cm_sc;
1090	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1091	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1092
1093	/* Put the command on the ready queue and get things going */
1094	aac_enqueue_ready(cm);
1095	aacraid_startio(sc);
1096	error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1097	return(error);
1098}
1099
1100/*
1101 *Command Buffer Management
1102 */
1103
1104/*
1105 * Allocate a command.
1106 */
1107int
1108aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1109{
1110	struct aac_command *cm;
1111
1112	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1113
1114	if ((cm = aac_dequeue_free(sc)) == NULL) {
1115		if (sc->total_fibs < sc->aac_max_fibs) {
1116			sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1117			wakeup(sc->aifthread);
1118		}
1119		return (EBUSY);
1120	}
1121
1122	*cmp = cm;
1123	return(0);
1124}
1125
1126/*
1127 * Release a command back to the freelist.
1128 */
1129void
1130aacraid_release_command(struct aac_command *cm)
1131{
1132	struct aac_event *event;
1133	struct aac_softc *sc;
1134
1135	sc = cm->cm_sc;
1136	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1137	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1138
1139	/* (re)initialize the command/FIB */
1140	cm->cm_sgtable = NULL;
1141	cm->cm_flags = 0;
1142	cm->cm_complete = NULL;
1143	cm->cm_ccb = NULL;
1144	cm->cm_passthr_dmat = 0;
1145	cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1146	cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1147	cm->cm_fib->Header.Unused = 0;
1148	cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1149
1150	/*
1151	 * These are duplicated in aac_start to cover the case where an
1152	 * intermediate stage may have destroyed them.  They're left
1153	 * initialized here for debugging purposes only.
1154	 */
1155	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1156	cm->cm_fib->Header.Handle = 0;
1157
1158	aac_enqueue_free(cm);
1159
1160	/*
1161	 * Dequeue all events so that there's no risk of events getting
1162	 * stranded.
1163	 */
1164	while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1165		TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1166		event->ev_callback(sc, event, event->ev_arg);
1167	}
1168}
1169
1170/*
1171 * Map helper for command/FIB allocation.
1172 */
1173static void
1174aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1175{
1176	uint64_t	*fibphys;
1177
1178	fibphys = (uint64_t *)arg;
1179
1180	*fibphys = segs[0].ds_addr;
1181}
1182
1183/*
1184 * Allocate and initialize commands/FIBs for this adapter.
1185 */
1186static int
1187aac_alloc_commands(struct aac_softc *sc)
1188{
1189	struct aac_command *cm;
1190	struct aac_fibmap *fm;
1191	uint64_t fibphys;
1192	int i, error;
1193	u_int32_t maxsize;
1194
1195	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1196	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1197
1198	if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1199		return (ENOMEM);
1200
1201	fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1202	if (fm == NULL)
1203		return (ENOMEM);
1204
1205	mtx_unlock(&sc->aac_io_lock);
1206	/* allocate the FIBs in DMAable memory and load them */
1207	if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1208			     BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1209		device_printf(sc->aac_dev,
1210			      "Not enough contiguous memory available.\n");
1211		free(fm, M_AACRAIDBUF);
1212		mtx_lock(&sc->aac_io_lock);
1213		return (ENOMEM);
1214	}
1215
1216	maxsize = sc->aac_max_fib_size + 31;
1217	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1218		maxsize += sizeof(struct aac_fib_xporthdr);
1219	/* Ignore errors since this doesn't bounce */
1220	(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1221			      sc->aac_max_fibs_alloc * maxsize,
1222			      aac_map_command_helper, &fibphys, 0);
1223	mtx_lock(&sc->aac_io_lock);
1224
1225	/* initialize constant fields in the command structure */
1226	bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1227	for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1228		cm = sc->aac_commands + sc->total_fibs;
1229		fm->aac_commands = cm;
1230		cm->cm_sc = sc;
1231		cm->cm_fib = (struct aac_fib *)
1232			((u_int8_t *)fm->aac_fibs + i * maxsize);
1233		cm->cm_fibphys = fibphys + i * maxsize;
1234		if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1235			u_int64_t fibphys_aligned;
1236			fibphys_aligned =
1237				(cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1238			cm->cm_fib = (struct aac_fib *)
1239				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1240			cm->cm_fibphys = fibphys_aligned;
1241		} else {
1242			u_int64_t fibphys_aligned;
1243			fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1244			cm->cm_fib = (struct aac_fib *)
1245				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1246			cm->cm_fibphys = fibphys_aligned;
1247		}
1248		cm->cm_index = sc->total_fibs;
1249
1250		if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1251					       &cm->cm_datamap)) != 0)
1252			break;
1253		if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1254			aacraid_release_command(cm);
1255		sc->total_fibs++;
1256	}
1257
1258	if (i > 0) {
1259		TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1260		fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1261		return (0);
1262	}
1263
1264	bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1265	bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1266	free(fm, M_AACRAIDBUF);
1267	return (ENOMEM);
1268}
1269
1270/*
1271 * Free FIBs owned by this adapter.
1272 */
1273static void
1274aac_free_commands(struct aac_softc *sc)
1275{
1276	struct aac_fibmap *fm;
1277	struct aac_command *cm;
1278	int i;
1279
1280	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1281
1282	while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1283
1284		TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1285		/*
1286		 * We check against total_fibs to handle partially
1287		 * allocated blocks.
1288		 */
1289		for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1290			cm = fm->aac_commands + i;
1291			bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1292		}
1293		bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1294		bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1295		free(fm, M_AACRAIDBUF);
1296	}
1297}
1298
1299/*
1300 * Command-mapping helper function - populate this command's s/g table.
1301 */
1302void
1303aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1304{
1305	struct aac_softc *sc;
1306	struct aac_command *cm;
1307	struct aac_fib *fib;
1308	int i;
1309
1310	cm = (struct aac_command *)arg;
1311	sc = cm->cm_sc;
1312	fib = cm->cm_fib;
1313	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1314	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1315
1316	/* copy into the FIB */
1317	if (cm->cm_sgtable != NULL) {
1318		if (fib->Header.Command == RawIo2) {
1319			struct aac_raw_io2 *raw;
1320			struct aac_sge_ieee1212 *sg;
1321			u_int32_t min_size = PAGE_SIZE, cur_size;
1322			int conformable = TRUE;
1323
1324			raw = (struct aac_raw_io2 *)&fib->data[0];
1325			sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1326			raw->sgeCnt = nseg;
1327
1328			for (i = 0; i < nseg; i++) {
1329				cur_size = segs[i].ds_len;
1330				sg[i].addrHigh = 0;
1331				*(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1332				sg[i].length = cur_size;
1333				sg[i].flags = 0;
1334				if (i == 0) {
1335					raw->sgeFirstSize = cur_size;
1336				} else if (i == 1) {
1337					raw->sgeNominalSize = cur_size;
1338					min_size = cur_size;
1339				} else if ((i+1) < nseg &&
1340					cur_size != raw->sgeNominalSize) {
1341					conformable = FALSE;
1342					if (cur_size < min_size)
1343						min_size = cur_size;
1344				}
1345			}
1346
1347			/* not conformable: evaluate required sg elements */
1348			if (!conformable) {
1349				int j, err_found, nseg_new = nseg;
1350				for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1351					err_found = FALSE;
1352					nseg_new = 2;
1353					for (j = 1; j < nseg - 1; ++j) {
1354						if (sg[j].length % (i*PAGE_SIZE)) {
1355							err_found = TRUE;
1356							break;
1357						}
1358						nseg_new += (sg[j].length / (i*PAGE_SIZE));
1359					}
1360					if (!err_found)
1361						break;
1362				}
1363				if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1364					!(sc->hint_flags & 4))
1365					nseg = aac_convert_sgraw2(sc,
1366						raw, i, nseg, nseg_new);
1367			} else {
1368				raw->flags |= RIO2_SGL_CONFORMANT;
1369			}
1370
1371			/* update the FIB size for the s/g count */
1372			fib->Header.Size += nseg *
1373				sizeof(struct aac_sge_ieee1212);
1374
1375		} else if (fib->Header.Command == RawIo) {
1376			struct aac_sg_tableraw *sg;
1377			sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1378			sg->SgCount = nseg;
1379			for (i = 0; i < nseg; i++) {
1380				sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1381				sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1382				sg->SgEntryRaw[i].Next = 0;
1383				sg->SgEntryRaw[i].Prev = 0;
1384				sg->SgEntryRaw[i].Flags = 0;
1385			}
1386			/* update the FIB size for the s/g count */
1387			fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1388		} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1389			struct aac_sg_table *sg;
1390			sg = cm->cm_sgtable;
1391			sg->SgCount = nseg;
1392			for (i = 0; i < nseg; i++) {
1393				sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1394				sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1395			}
1396			/* update the FIB size for the s/g count */
1397			fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1398		} else {
1399			struct aac_sg_table64 *sg;
1400			sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1401			sg->SgCount = nseg;
1402			for (i = 0; i < nseg; i++) {
1403				sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1404				sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1405			}
1406			/* update the FIB size for the s/g count */
1407			fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1408		}
1409	}
1410
1411	/* Fix up the address values in the FIB.  Use the command array index
1412	 * instead of a pointer since these fields are only 32 bits.  Shift
1413	 * the SenderFibAddress over to make room for the fast response bit
1414	 * and for the AIF bit
1415	 */
1416	cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1417	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1418
1419	/* save a pointer to the command for speedy reverse-lookup */
1420	cm->cm_fib->Header.Handle += cm->cm_index + 1;
1421
1422	if (cm->cm_passthr_dmat == 0) {
1423		if (cm->cm_flags & AAC_CMD_DATAIN)
1424			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1425							BUS_DMASYNC_PREREAD);
1426		if (cm->cm_flags & AAC_CMD_DATAOUT)
1427			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1428							BUS_DMASYNC_PREWRITE);
1429	}
1430
1431	cm->cm_flags |= AAC_CMD_MAPPED;
1432
1433	if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1434		u_int32_t wait = 0;
1435		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1436	} else if (cm->cm_flags & AAC_CMD_WAIT) {
1437		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1438	} else {
1439		int count = 10000000L;
1440		while (AAC_SEND_COMMAND(sc, cm) != 0) {
1441			if (--count == 0) {
1442				aac_unmap_command(cm);
1443				sc->flags |= AAC_QUEUE_FRZN;
1444				aac_requeue_ready(cm);
1445			}
1446			DELAY(5);			/* wait 5 usec. */
1447		}
1448	}
1449}
1450
1451
1452static int
1453aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1454				   int pages, int nseg, int nseg_new)
1455{
1456	struct aac_sge_ieee1212 *sge;
1457	int i, j, pos;
1458	u_int32_t addr_low;
1459
1460	sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1461		M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1462	if (sge == NULL)
1463		return nseg;
1464
1465	for (i = 1, pos = 1; i < nseg - 1; ++i) {
1466		for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1467			addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1468			sge[pos].addrLow = addr_low;
1469			sge[pos].addrHigh = raw->sge[i].addrHigh;
1470			if (addr_low < raw->sge[i].addrLow)
1471				sge[pos].addrHigh++;
1472			sge[pos].length = pages * PAGE_SIZE;
1473			sge[pos].flags = 0;
1474			pos++;
1475		}
1476	}
1477	sge[pos] = raw->sge[nseg-1];
1478	for (i = 1; i < nseg_new; ++i)
1479		raw->sge[i] = sge[i];
1480
1481	free(sge, M_AACRAIDBUF);
1482	raw->sgeCnt = nseg_new;
1483	raw->flags |= RIO2_SGL_CONFORMANT;
1484	raw->sgeNominalSize = pages * PAGE_SIZE;
1485	return nseg_new;
1486}
1487
1488
1489/*
1490 * Unmap a command from controller-visible space.
1491 */
1492static void
1493aac_unmap_command(struct aac_command *cm)
1494{
1495	struct aac_softc *sc;
1496
1497	sc = cm->cm_sc;
1498	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1499
1500	if (!(cm->cm_flags & AAC_CMD_MAPPED))
1501		return;
1502
1503	if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1504		if (cm->cm_flags & AAC_CMD_DATAIN)
1505			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1506					BUS_DMASYNC_POSTREAD);
1507		if (cm->cm_flags & AAC_CMD_DATAOUT)
1508			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1509					BUS_DMASYNC_POSTWRITE);
1510
1511		bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1512	}
1513	cm->cm_flags &= ~AAC_CMD_MAPPED;
1514}
1515
1516/*
1517 * Hardware Interface
1518 */
1519
1520/*
1521 * Initialize the adapter.
1522 */
1523static void
1524aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1525{
1526	struct aac_softc *sc;
1527
1528	sc = (struct aac_softc *)arg;
1529	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1530
1531	sc->aac_common_busaddr = segs[0].ds_addr;
1532}
1533
1534static int
1535aac_check_firmware(struct aac_softc *sc)
1536{
1537	u_int32_t code, major, minor, maxsize;
1538	u_int32_t options = 0, atu_size = 0, status, waitCount;
1539	time_t then;
1540
1541	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1542
1543	/* check if flash update is running */
1544	if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1545		then = time_uptime;
1546		do {
1547			code = AAC_GET_FWSTATUS(sc);
1548			if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1549				device_printf(sc->aac_dev,
1550						  "FATAL: controller not coming ready, "
1551						   "status %x\n", code);
1552				return(ENXIO);
1553			}
1554		} while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1555		/*
1556		 * Delay 10 seconds. Because right now FW is doing a soft reset,
1557		 * do not read scratch pad register at this time
1558		 */
1559		waitCount = 10 * 10000;
1560		while (waitCount) {
1561			DELAY(100);		/* delay 100 microseconds */
1562			waitCount--;
1563		}
1564	}
1565
1566	/*
1567	 * Wait for the adapter to come ready.
1568	 */
1569	then = time_uptime;
1570	do {
1571		code = AAC_GET_FWSTATUS(sc);
1572		if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1573			device_printf(sc->aac_dev,
1574				      "FATAL: controller not coming ready, "
1575					   "status %x\n", code);
1576			return(ENXIO);
1577		}
1578	} while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1579
1580	/*
1581	 * Retrieve the firmware version numbers.  Dell PERC2/QC cards with
1582	 * firmware version 1.x are not compatible with this driver.
1583	 */
1584	if (sc->flags & AAC_FLAGS_PERC2QC) {
1585		if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1586				     NULL, NULL)) {
1587			device_printf(sc->aac_dev,
1588				      "Error reading firmware version\n");
1589			return (EIO);
1590		}
1591
1592		/* These numbers are stored as ASCII! */
1593		major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1594		minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1595		if (major == 1) {
1596			device_printf(sc->aac_dev,
1597			    "Firmware version %d.%d is not supported.\n",
1598			    major, minor);
1599			return (EINVAL);
1600		}
1601	}
1602	/*
1603	 * Retrieve the capabilities/supported options word so we know what
1604	 * work-arounds to enable.  Some firmware revs don't support this
1605	 * command.
1606	 */
1607	if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1608		if (status != AAC_SRB_STS_INVALID_REQUEST) {
1609			device_printf(sc->aac_dev,
1610			     "RequestAdapterInfo failed\n");
1611			return (EIO);
1612		}
1613	} else {
1614		options = AAC_GET_MAILBOX(sc, 1);
1615		atu_size = AAC_GET_MAILBOX(sc, 2);
1616		sc->supported_options = options;
1617
1618		if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1619		    (sc->flags & AAC_FLAGS_NO4GB) == 0)
1620			sc->flags |= AAC_FLAGS_4GB_WINDOW;
1621		if (options & AAC_SUPPORTED_NONDASD)
1622			sc->flags |= AAC_FLAGS_ENABLE_CAM;
1623		if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1624			&& (sizeof(bus_addr_t) > 4)
1625			&& (sc->hint_flags & 0x1)) {
1626			device_printf(sc->aac_dev,
1627			    "Enabling 64-bit address support\n");
1628			sc->flags |= AAC_FLAGS_SG_64BIT;
1629		}
1630		if (sc->aac_if.aif_send_command) {
1631			if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1632				(options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1633				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1634			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1635				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1636			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1637				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1638		}
1639		if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1640			sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1641	}
1642
1643	if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1644		device_printf(sc->aac_dev, "Communication interface not supported!\n");
1645		return (ENXIO);
1646	}
1647
1648	if (sc->hint_flags & 2) {
1649		device_printf(sc->aac_dev,
1650			"Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1651		sc->flags |= AAC_FLAGS_SYNC_MODE;
1652	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1653		device_printf(sc->aac_dev,
1654			"Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1655		sc->flags |= AAC_FLAGS_SYNC_MODE;
1656	}
1657
1658	/* Check for broken hardware that does a lower number of commands */
1659	sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1660
1661	/* Remap mem. resource, if required */
1662	if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1663		bus_release_resource(
1664			sc->aac_dev, SYS_RES_MEMORY,
1665			sc->aac_regs_rid0, sc->aac_regs_res0);
1666		sc->aac_regs_res0 = bus_alloc_resource(
1667			sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1668			0ul, ~0ul, atu_size, RF_ACTIVE);
1669		if (sc->aac_regs_res0 == NULL) {
1670			sc->aac_regs_res0 = bus_alloc_resource_any(
1671				sc->aac_dev, SYS_RES_MEMORY,
1672				&sc->aac_regs_rid0, RF_ACTIVE);
1673			if (sc->aac_regs_res0 == NULL) {
1674				device_printf(sc->aac_dev,
1675					"couldn't allocate register window\n");
1676				return (ENXIO);
1677			}
1678		}
1679		sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1680		sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1681	}
1682
1683	/* Read preferred settings */
1684	sc->aac_max_fib_size = sizeof(struct aac_fib);
1685	sc->aac_max_sectors = 128;				/* 64KB */
1686	sc->aac_max_aif = 1;
1687	if (sc->flags & AAC_FLAGS_SG_64BIT)
1688		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1689		 - sizeof(struct aac_blockwrite64))
1690		 / sizeof(struct aac_sg_entry64);
1691	else
1692		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1693		 - sizeof(struct aac_blockwrite))
1694		 / sizeof(struct aac_sg_entry);
1695
1696	if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1697		options = AAC_GET_MAILBOX(sc, 1);
1698		sc->aac_max_fib_size = (options & 0xFFFF);
1699		sc->aac_max_sectors = (options >> 16) << 1;
1700		options = AAC_GET_MAILBOX(sc, 2);
1701		sc->aac_sg_tablesize = (options >> 16);
1702		options = AAC_GET_MAILBOX(sc, 3);
1703		sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1704		if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1705			sc->aac_max_fibs = (options & 0xFFFF);
1706		options = AAC_GET_MAILBOX(sc, 4);
1707		sc->aac_max_aif = (options & 0xFFFF);
1708		options = AAC_GET_MAILBOX(sc, 5);
1709		sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1710	}
1711
1712	maxsize = sc->aac_max_fib_size + 31;
1713	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1714		maxsize += sizeof(struct aac_fib_xporthdr);
1715	if (maxsize > PAGE_SIZE) {
1716    	sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1717		maxsize = PAGE_SIZE;
1718	}
1719	sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1720
1721	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1722		sc->flags |= AAC_FLAGS_RAW_IO;
1723		device_printf(sc->aac_dev, "Enable Raw I/O\n");
1724	}
1725	if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1726	    (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1727		sc->flags |= AAC_FLAGS_LBA_64BIT;
1728		device_printf(sc->aac_dev, "Enable 64-bit array\n");
1729	}
1730
1731#ifdef AACRAID_DEBUG
1732	aacraid_get_fw_debug_buffer(sc);
1733#endif
1734	return (0);
1735}
1736
1737static int
1738aac_init(struct aac_softc *sc)
1739{
1740	struct aac_adapter_init	*ip;
1741	int i, error;
1742
1743	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1744
1745	/* reset rrq index */
1746	sc->aac_fibs_pushed_no = 0;
1747	for (i = 0; i < sc->aac_max_msix; i++)
1748		sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1749
1750	/*
1751	 * Fill in the init structure.  This tells the adapter about the
1752	 * physical location of various important shared data structures.
1753	 */
1754	ip = &sc->aac_common->ac_init;
1755	ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1756	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1757		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1758		sc->flags |= AAC_FLAGS_RAW_IO;
1759	}
1760	ip->NoOfMSIXVectors = sc->aac_max_msix;
1761
1762	ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1763					 offsetof(struct aac_common, ac_fibs);
1764	ip->AdapterFibsVirtualAddress = 0;
1765	ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1766	ip->AdapterFibAlign = sizeof(struct aac_fib);
1767
1768	ip->PrintfBufferAddress = sc->aac_common_busaddr +
1769				  offsetof(struct aac_common, ac_printf);
1770	ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1771
1772	/*
1773	 * The adapter assumes that pages are 4K in size, except on some
1774 	 * broken firmware versions that do the page->byte conversion twice,
1775	 * therefore 'assuming' that this value is in 16MB units (2^24).
1776	 * Round up since the granularity is so high.
1777	 */
1778	ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1779	if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1780		ip->HostPhysMemPages =
1781		    (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1782	}
1783	ip->HostElapsedSeconds = time_uptime;	/* reset later if invalid */
1784
1785	ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1786	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1787		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1788		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1789			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1790		device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1791	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1792		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1793		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1794			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1795		device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1796	}
1797	ip->MaxNumAif = sc->aac_max_aif;
1798	ip->HostRRQ_AddrLow =
1799		sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1800	/* always 32-bit address */
1801	ip->HostRRQ_AddrHigh = 0;
1802
1803	if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1804		ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1805		ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1806		device_printf(sc->aac_dev, "Power Management enabled\n");
1807	}
1808
1809	ip->MaxIoCommands = sc->aac_max_fibs;
1810	ip->MaxIoSize = sc->aac_max_sectors << 9;
1811	ip->MaxFibSize = sc->aac_max_fib_size;
1812
1813	/*
1814	 * Do controller-type-specific initialisation
1815	 */
1816	AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1817
1818	/*
1819	 * Give the init structure to the controller.
1820	 */
1821	if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1822			     sc->aac_common_busaddr +
1823			     offsetof(struct aac_common, ac_init), 0, 0, 0,
1824			     NULL, NULL)) {
1825		device_printf(sc->aac_dev,
1826			      "error establishing init structure\n");
1827		error = EIO;
1828		goto out;
1829	}
1830
1831	/*
1832	 * Check configuration issues
1833	 */
1834	if ((error = aac_check_config(sc)) != 0)
1835		goto out;
1836
1837	error = 0;
1838out:
1839	return(error);
1840}
1841
1842static void
1843aac_define_int_mode(struct aac_softc *sc)
1844{
1845	device_t dev;
1846	int cap, msi_count, error = 0;
1847	uint32_t val;
1848
1849	dev = sc->aac_dev;
1850
1851	/* max. vectors from AAC_MONKER_GETCOMMPREF */
1852	if (sc->aac_max_msix == 0) {
1853		sc->aac_max_msix = 1;
1854		sc->aac_vector_cap = sc->aac_max_fibs;
1855		return;
1856	}
1857
1858	/* OS capability */
1859	msi_count = pci_msix_count(dev);
1860	if (msi_count > AAC_MAX_MSIX)
1861		msi_count = AAC_MAX_MSIX;
1862	if (msi_count > sc->aac_max_msix)
1863		msi_count = sc->aac_max_msix;
1864	if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1865		device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1866				   "will try MSI\n", msi_count, error);
1867		pci_release_msi(dev);
1868	} else {
1869		sc->msi_enabled = TRUE;
1870		device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1871			msi_count);
1872	}
1873
1874	if (!sc->msi_enabled) {
1875		msi_count = 1;
1876		if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1877			device_printf(dev, "alloc msi failed - err=%d; "
1878				           "will use INTx\n", error);
1879			pci_release_msi(dev);
1880		} else {
1881			sc->msi_enabled = TRUE;
1882			device_printf(dev, "using MSI interrupts\n");
1883		}
1884	}
1885
1886	if (sc->msi_enabled) {
1887		/* now read controller capability from PCI config. space */
1888		cap = aac_find_pci_capability(sc, PCIY_MSIX);
1889		val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1890		if (!(val & AAC_PCI_MSI_ENABLE)) {
1891			pci_release_msi(dev);
1892			sc->msi_enabled = FALSE;
1893		}
1894	}
1895
1896	if (!sc->msi_enabled) {
1897		device_printf(dev, "using legacy interrupts\n");
1898		sc->aac_max_msix = 1;
1899	} else {
1900		AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1901		if (sc->aac_max_msix > msi_count)
1902			sc->aac_max_msix = msi_count;
1903	}
1904	sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1905
1906	fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1907		sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1908}
1909
1910static int
1911aac_find_pci_capability(struct aac_softc *sc, int cap)
1912{
1913	device_t dev;
1914	uint32_t status;
1915	uint8_t ptr;
1916
1917	dev = sc->aac_dev;
1918
1919	status = pci_read_config(dev, PCIR_STATUS, 2);
1920	if (!(status & PCIM_STATUS_CAPPRESENT))
1921		return (0);
1922
1923	status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1924	switch (status & PCIM_HDRTYPE) {
1925	case 0:
1926	case 1:
1927		ptr = PCIR_CAP_PTR;
1928		break;
1929	case 2:
1930		ptr = PCIR_CAP_PTR_2;
1931		break;
1932	default:
1933		return (0);
1934		break;
1935	}
1936	ptr = pci_read_config(dev, ptr, 1);
1937
1938	while (ptr != 0) {
1939		int next, val;
1940		next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1941		val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1942		if (val == cap)
1943			return (ptr);
1944		ptr = next;
1945	}
1946
1947	return (0);
1948}
1949
1950static int
1951aac_setup_intr(struct aac_softc *sc)
1952{
1953	int i, msi_count, rid;
1954	struct resource *res;
1955	void *tag;
1956
1957	msi_count = sc->aac_max_msix;
1958	rid = (sc->msi_enabled ? 1:0);
1959
1960	for (i = 0; i < msi_count; i++, rid++) {
1961		if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1962			RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1963			device_printf(sc->aac_dev,"can't allocate interrupt\n");
1964			return (EINVAL);
1965		}
1966		sc->aac_irq_rid[i] = rid;
1967		sc->aac_irq[i] = res;
1968		if (aac_bus_setup_intr(sc->aac_dev, res,
1969			INTR_MPSAFE | INTR_TYPE_BIO, NULL,
1970			aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
1971			device_printf(sc->aac_dev, "can't set up interrupt\n");
1972			return (EINVAL);
1973		}
1974		sc->aac_msix[i].vector_no = i;
1975		sc->aac_msix[i].sc = sc;
1976		sc->aac_intr[i] = tag;
1977	}
1978
1979	return (0);
1980}
1981
1982static int
1983aac_check_config(struct aac_softc *sc)
1984{
1985	struct aac_fib *fib;
1986	struct aac_cnt_config *ccfg;
1987	struct aac_cf_status_hdr *cf_shdr;
1988	int rval;
1989
1990	mtx_lock(&sc->aac_io_lock);
1991	aac_alloc_sync_fib(sc, &fib);
1992
1993	ccfg = (struct aac_cnt_config *)&fib->data[0];
1994	bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
1995	ccfg->Command = VM_ContainerConfig;
1996	ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
1997	ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
1998
1999	rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2000		sizeof (struct aac_cnt_config));
2001	cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2002	if (rval == 0 && ccfg->Command == ST_OK &&
2003		ccfg->CTCommand.param[0] == CT_OK) {
2004		if (cf_shdr->action <= CFACT_PAUSE) {
2005			bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2006			ccfg->Command = VM_ContainerConfig;
2007			ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2008
2009			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2010				sizeof (struct aac_cnt_config));
2011			if (rval == 0 && ccfg->Command == ST_OK &&
2012				ccfg->CTCommand.param[0] == CT_OK) {
2013				/* successful completion */
2014				rval = 0;
2015			} else {
2016				/* auto commit aborted due to error(s) */
2017				rval = -2;
2018			}
2019		} else {
2020			/* auto commit aborted due to adapter indicating
2021			   config. issues too dangerous to auto commit  */
2022			rval = -3;
2023		}
2024	} else {
2025		/* error */
2026		rval = -1;
2027	}
2028
2029	aac_release_sync_fib(sc);
2030	mtx_unlock(&sc->aac_io_lock);
2031	return(rval);
2032}
2033
2034/*
2035 * Send a synchronous command to the controller and wait for a result.
2036 * Indicate if the controller completed the command with an error status.
2037 */
2038int
2039aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2040		 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2041		 u_int32_t *sp, u_int32_t *r1)
2042{
2043	time_t then;
2044	u_int32_t status;
2045
2046	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2047
2048	/* populate the mailbox */
2049	AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2050
2051	/* ensure the sync command doorbell flag is cleared */
2052	if (!sc->msi_enabled)
2053		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2054
2055	/* then set it to signal the adapter */
2056	AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2057
2058	if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2059		/* spin waiting for the command to complete */
2060		then = time_uptime;
2061		do {
2062			if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2063				fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2064				return(EIO);
2065			}
2066		} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2067
2068		/* clear the completion flag */
2069		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2070
2071		/* get the command status */
2072		status = AAC_GET_MAILBOX(sc, 0);
2073		if (sp != NULL)
2074			*sp = status;
2075
2076		/* return parameter */
2077		if (r1 != NULL)
2078			*r1 = AAC_GET_MAILBOX(sc, 1);
2079
2080		if (status != AAC_SRB_STS_SUCCESS)
2081			return (-1);
2082	}
2083	return(0);
2084}
2085
2086static int
2087aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2088		 struct aac_fib *fib, u_int16_t datasize)
2089{
2090	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2091	mtx_assert(&sc->aac_io_lock, MA_OWNED);
2092
2093	if (datasize > AAC_FIB_DATASIZE)
2094		return(EINVAL);
2095
2096	/*
2097	 * Set up the sync FIB
2098	 */
2099	fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2100				AAC_FIBSTATE_INITIALISED |
2101				AAC_FIBSTATE_EMPTY;
2102	fib->Header.XferState |= xferstate;
2103	fib->Header.Command = command;
2104	fib->Header.StructType = AAC_FIBTYPE_TFIB;
2105	fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2106	fib->Header.SenderSize = sizeof(struct aac_fib);
2107	fib->Header.SenderFibAddress = 0;	/* Not needed */
2108	fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr +
2109		offsetof(struct aac_common, ac_sync_fib);
2110
2111	/*
2112	 * Give the FIB to the controller, wait for a response.
2113	 */
2114	if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2115		fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2116		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2117		return(EIO);
2118	}
2119
2120	return (0);
2121}
2122
2123/*
2124 * Check for commands that have been outstanding for a suspiciously long time,
2125 * and complain about them.
2126 */
2127static void
2128aac_timeout(struct aac_softc *sc)
2129{
2130	struct aac_command *cm;
2131	time_t deadline;
2132	int timedout;
2133
2134	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2135	/*
2136	 * Traverse the busy command list, bitch about late commands once
2137	 * only.
2138	 */
2139	timedout = 0;
2140	deadline = time_uptime - AAC_CMD_TIMEOUT;
2141	TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2142		if (cm->cm_timestamp < deadline) {
2143			device_printf(sc->aac_dev,
2144				      "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2145				      cm, (int)(time_uptime-cm->cm_timestamp));
2146			AAC_PRINT_FIB(sc, cm->cm_fib);
2147			timedout++;
2148		}
2149	}
2150
2151	if (timedout)
2152		aac_reset_adapter(sc);
2153	aacraid_print_queues(sc);
2154}
2155
2156/*
2157 * Interface Function Vectors
2158 */
2159
2160/*
2161 * Read the current firmware status word.
2162 */
2163static int
2164aac_src_get_fwstatus(struct aac_softc *sc)
2165{
2166	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2167
2168	return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2169}
2170
2171/*
2172 * Notify the controller of a change in a given queue
2173 */
2174static void
2175aac_src_qnotify(struct aac_softc *sc, int qbit)
2176{
2177	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2178
2179	AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2180}
2181
2182/*
2183 * Get the interrupt reason bits
2184 */
2185static int
2186aac_src_get_istatus(struct aac_softc *sc)
2187{
2188	int val;
2189
2190	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2191
2192	if (sc->msi_enabled) {
2193		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2194		if (val & AAC_MSI_SYNC_STATUS)
2195			val = AAC_DB_SYNC_COMMAND;
2196		else
2197			val = 0;
2198	} else {
2199		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2200	}
2201	return(val);
2202}
2203
2204/*
2205 * Clear some interrupt reason bits
2206 */
2207static void
2208aac_src_clear_istatus(struct aac_softc *sc, int mask)
2209{
2210	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2211
2212	if (sc->msi_enabled) {
2213		if (mask == AAC_DB_SYNC_COMMAND)
2214			AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2215	} else {
2216		AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2217	}
2218}
2219
2220/*
2221 * Populate the mailbox and set the command word
2222 */
2223static void
2224aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2225		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2226{
2227	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2228
2229	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2230	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2231	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2232	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2233	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2234}
2235
2236static void
2237aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2238		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2239{
2240	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2241
2242	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2243	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2244	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2245	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2246	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2247}
2248
2249/*
2250 * Fetch the immediate command status word
2251 */
2252static int
2253aac_src_get_mailbox(struct aac_softc *sc, int mb)
2254{
2255	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2256
2257	return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2258}
2259
2260static int
2261aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2262{
2263	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2264
2265	return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2266}
2267
2268/*
2269 * Set/clear interrupt masks
2270 */
2271static void
2272aac_src_access_devreg(struct aac_softc *sc, int mode)
2273{
2274	u_int32_t val;
2275
2276	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2277
2278	switch (mode) {
2279	case AAC_ENABLE_INTERRUPT:
2280		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2281			(sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2282				           AAC_INT_ENABLE_TYPE1_INTX));
2283		break;
2284
2285	case AAC_DISABLE_INTERRUPT:
2286		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2287		break;
2288
2289	case AAC_ENABLE_MSIX:
2290		/* set bit 6 */
2291		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2292		val |= 0x40;
2293		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2294		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2295		/* unmask int. */
2296		val = PMC_ALL_INTERRUPT_BITS;
2297		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2298		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2299		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2300			val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2301		break;
2302
2303	case AAC_DISABLE_MSIX:
2304		/* reset bit 6 */
2305		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2306		val &= ~0x40;
2307		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2308		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2309		break;
2310
2311	case AAC_CLEAR_AIF_BIT:
2312		/* set bit 5 */
2313		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2314		val |= 0x20;
2315		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2316		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2317		break;
2318
2319	case AAC_CLEAR_SYNC_BIT:
2320		/* set bit 4 */
2321		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2322		val |= 0x10;
2323		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2324		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2325		break;
2326
2327	case AAC_ENABLE_INTX:
2328		/* set bit 7 */
2329		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2330		val |= 0x80;
2331		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2332		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2333		/* unmask int. */
2334		val = PMC_ALL_INTERRUPT_BITS;
2335		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2336		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2337		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2338			val & (~(PMC_GLOBAL_INT_BIT2)));
2339		break;
2340
2341	default:
2342		break;
2343	}
2344}
2345
2346/*
2347 * New comm. interface: Send command functions
2348 */
2349static int
2350aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2351{
2352	struct aac_fib_xporthdr *pFibX;
2353	u_int32_t fibsize, high_addr;
2354	u_int64_t address;
2355
2356	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2357
2358	if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2359		sc->aac_max_msix > 1) {
2360		u_int16_t vector_no, first_choice = 0xffff;
2361
2362		vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2363		do {
2364			vector_no += 1;
2365			if (vector_no == sc->aac_max_msix)
2366				vector_no = 1;
2367			if (sc->aac_rrq_outstanding[vector_no] <
2368				sc->aac_vector_cap)
2369				break;
2370			if (0xffff == first_choice)
2371				first_choice = vector_no;
2372			else if (vector_no == first_choice)
2373				break;
2374		} while (1);
2375		if (vector_no == first_choice)
2376			vector_no = 0;
2377		sc->aac_rrq_outstanding[vector_no]++;
2378		if (sc->aac_fibs_pushed_no == 0xffffffff)
2379			sc->aac_fibs_pushed_no = 0;
2380		else
2381			sc->aac_fibs_pushed_no++;
2382
2383		cm->cm_fib->Header.Handle += (vector_no << 16);
2384	}
2385
2386	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2387		/* Calculate the amount to the fibsize bits */
2388		fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2389		/* Fill new FIB header */
2390		address = cm->cm_fibphys;
2391		high_addr = (u_int32_t)(address >> 32);
2392		if (high_addr == 0L) {
2393			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2394			cm->cm_fib->Header.u.TimeStamp = 0L;
2395		} else {
2396			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2397			cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2398		}
2399		cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2400	} else {
2401		/* Calculate the amount to the fibsize bits */
2402		fibsize = (sizeof(struct aac_fib_xporthdr) +
2403		   cm->cm_fib->Header.Size + 127) / 128 - 1;
2404		/* Fill XPORT header */
2405		pFibX = (struct aac_fib_xporthdr *)
2406			((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2407		pFibX->Handle = cm->cm_fib->Header.Handle;
2408		pFibX->HostAddress = cm->cm_fibphys;
2409		pFibX->Size = cm->cm_fib->Header.Size;
2410		address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2411		high_addr = (u_int32_t)(address >> 32);
2412	}
2413
2414	if (fibsize > 31)
2415		fibsize = 31;
2416	aac_enqueue_busy(cm);
2417	if (high_addr) {
2418		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2419		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2420	} else {
2421		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2422	}
2423	return 0;
2424}
2425
2426/*
2427 * New comm. interface: get, set outbound queue index
2428 */
2429static int
2430aac_src_get_outb_queue(struct aac_softc *sc)
2431{
2432	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2433
2434	return(-1);
2435}
2436
2437static void
2438aac_src_set_outb_queue(struct aac_softc *sc, int index)
2439{
2440	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2441}
2442
2443/*
2444 * Debugging and Diagnostics
2445 */
2446
2447/*
2448 * Print some information about the controller.
2449 */
2450static void
2451aac_describe_controller(struct aac_softc *sc)
2452{
2453	struct aac_fib *fib;
2454	struct aac_adapter_info	*info;
2455	char *adapter_type = "Adaptec RAID controller";
2456
2457	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2458
2459	mtx_lock(&sc->aac_io_lock);
2460	aac_alloc_sync_fib(sc, &fib);
2461
2462	if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2463		fib->data[0] = 0;
2464		if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2465			device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2466		else {
2467			struct aac_supplement_adapter_info *supp_info;
2468
2469			supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2470			adapter_type = (char *)supp_info->AdapterTypeText;
2471			sc->aac_feature_bits = supp_info->FeatureBits;
2472			sc->aac_support_opt2 = supp_info->SupportedOptions2;
2473		}
2474	}
2475	device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2476		adapter_type,
2477		AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2478		AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2479
2480	fib->data[0] = 0;
2481	if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2482		device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2483		aac_release_sync_fib(sc);
2484		mtx_unlock(&sc->aac_io_lock);
2485		return;
2486	}
2487
2488	/* save the kernel revision structure for later use */
2489	info = (struct aac_adapter_info *)&fib->data[0];
2490	sc->aac_revision = info->KernelRevision;
2491
2492	if (bootverbose) {
2493		device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2494		    "(%dMB cache, %dMB execution), %s\n",
2495		    aac_describe_code(aac_cpu_variant, info->CpuVariant),
2496		    info->ClockSpeed, info->TotalMem / (1024 * 1024),
2497		    info->BufferMem / (1024 * 1024),
2498		    info->ExecutionMem / (1024 * 1024),
2499		    aac_describe_code(aac_battery_platform,
2500		    info->batteryPlatform));
2501
2502		device_printf(sc->aac_dev,
2503		    "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2504		    info->KernelRevision.external.comp.major,
2505		    info->KernelRevision.external.comp.minor,
2506		    info->KernelRevision.external.comp.dash,
2507		    info->KernelRevision.buildNumber,
2508		    (u_int32_t)(info->SerialNumber & 0xffffff));
2509
2510		device_printf(sc->aac_dev, "Supported Options=%b\n",
2511			      sc->supported_options,
2512			      "\20"
2513			      "\1SNAPSHOT"
2514			      "\2CLUSTERS"
2515			      "\3WCACHE"
2516			      "\4DATA64"
2517			      "\5HOSTTIME"
2518			      "\6RAID50"
2519			      "\7WINDOW4GB"
2520			      "\10SCSIUPGD"
2521			      "\11SOFTERR"
2522			      "\12NORECOND"
2523			      "\13SGMAP64"
2524			      "\14ALARM"
2525			      "\15NONDASD"
2526			      "\16SCSIMGT"
2527			      "\17RAIDSCSI"
2528			      "\21ADPTINFO"
2529			      "\22NEWCOMM"
2530			      "\23ARRAY64BIT"
2531			      "\24HEATSENSOR");
2532	}
2533
2534	aac_release_sync_fib(sc);
2535	mtx_unlock(&sc->aac_io_lock);
2536}
2537
2538/*
2539 * Look up a text description of a numeric error code and return a pointer to
2540 * same.
2541 */
2542static char *
2543aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2544{
2545	int i;
2546
2547	for (i = 0; table[i].string != NULL; i++)
2548		if (table[i].code == code)
2549			return(table[i].string);
2550	return(table[i + 1].string);
2551}
2552
2553/*
2554 * Management Interface
2555 */
2556
2557static int
2558aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2559{
2560	struct aac_softc *sc;
2561
2562	sc = dev->si_drv1;
2563	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2564#if __FreeBSD_version >= 702000
2565	device_busy(sc->aac_dev);
2566	devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2567#endif
2568	return 0;
2569}
2570
2571static int
2572aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2573{
2574	union aac_statrequest *as;
2575	struct aac_softc *sc;
2576	int error = 0;
2577
2578	as = (union aac_statrequest *)arg;
2579	sc = dev->si_drv1;
2580	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2581
2582	switch (cmd) {
2583	case AACIO_STATS:
2584		switch (as->as_item) {
2585		case AACQ_FREE:
2586		case AACQ_READY:
2587		case AACQ_BUSY:
2588			bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2589			      sizeof(struct aac_qstat));
2590			break;
2591		default:
2592			error = ENOENT;
2593			break;
2594		}
2595	break;
2596
2597	case FSACTL_SENDFIB:
2598	case FSACTL_SEND_LARGE_FIB:
2599		arg = *(caddr_t*)arg;
2600	case FSACTL_LNX_SENDFIB:
2601	case FSACTL_LNX_SEND_LARGE_FIB:
2602		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2603		error = aac_ioctl_sendfib(sc, arg);
2604		break;
2605	case FSACTL_SEND_RAW_SRB:
2606		arg = *(caddr_t*)arg;
2607	case FSACTL_LNX_SEND_RAW_SRB:
2608		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2609		error = aac_ioctl_send_raw_srb(sc, arg);
2610		break;
2611	case FSACTL_AIF_THREAD:
2612	case FSACTL_LNX_AIF_THREAD:
2613		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2614		error = EINVAL;
2615		break;
2616	case FSACTL_OPEN_GET_ADAPTER_FIB:
2617		arg = *(caddr_t*)arg;
2618	case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2619		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2620		error = aac_open_aif(sc, arg);
2621		break;
2622	case FSACTL_GET_NEXT_ADAPTER_FIB:
2623		arg = *(caddr_t*)arg;
2624	case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2625		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2626		error = aac_getnext_aif(sc, arg);
2627		break;
2628	case FSACTL_CLOSE_GET_ADAPTER_FIB:
2629		arg = *(caddr_t*)arg;
2630	case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2631		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2632		error = aac_close_aif(sc, arg);
2633		break;
2634	case FSACTL_MINIPORT_REV_CHECK:
2635		arg = *(caddr_t*)arg;
2636	case FSACTL_LNX_MINIPORT_REV_CHECK:
2637		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2638		error = aac_rev_check(sc, arg);
2639		break;
2640	case FSACTL_QUERY_DISK:
2641		arg = *(caddr_t*)arg;
2642	case FSACTL_LNX_QUERY_DISK:
2643		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2644		error = aac_query_disk(sc, arg);
2645		break;
2646	case FSACTL_DELETE_DISK:
2647	case FSACTL_LNX_DELETE_DISK:
2648		/*
2649		 * We don't trust the underland to tell us when to delete a
2650		 * container, rather we rely on an AIF coming from the
2651		 * controller
2652		 */
2653		error = 0;
2654		break;
2655	case FSACTL_GET_PCI_INFO:
2656		arg = *(caddr_t*)arg;
2657	case FSACTL_LNX_GET_PCI_INFO:
2658		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2659		error = aac_get_pci_info(sc, arg);
2660		break;
2661	case FSACTL_GET_FEATURES:
2662		arg = *(caddr_t*)arg;
2663	case FSACTL_LNX_GET_FEATURES:
2664		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2665		error = aac_supported_features(sc, arg);
2666		break;
2667	default:
2668		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2669		error = EINVAL;
2670		break;
2671	}
2672	return(error);
2673}
2674
2675static int
2676aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2677{
2678	struct aac_softc *sc;
2679	struct aac_fib_context *ctx;
2680	int revents;
2681
2682	sc = dev->si_drv1;
2683	revents = 0;
2684
2685	mtx_lock(&sc->aac_io_lock);
2686	if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2687		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2688			if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2689				revents |= poll_events & (POLLIN | POLLRDNORM);
2690				break;
2691			}
2692		}
2693	}
2694	mtx_unlock(&sc->aac_io_lock);
2695
2696	if (revents == 0) {
2697		if (poll_events & (POLLIN | POLLRDNORM))
2698			selrecord(td, &sc->rcv_select);
2699	}
2700
2701	return (revents);
2702}
2703
2704static void
2705aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2706{
2707
2708	switch (event->ev_type) {
2709	case AAC_EVENT_CMFREE:
2710		mtx_assert(&sc->aac_io_lock, MA_OWNED);
2711		if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2712			aacraid_add_event(sc, event);
2713			return;
2714		}
2715		free(event, M_AACRAIDBUF);
2716		wakeup(arg);
2717		break;
2718	default:
2719		break;
2720	}
2721}
2722
2723/*
2724 * Send a FIB supplied from userspace
2725 */
2726static int
2727aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2728{
2729	struct aac_command *cm;
2730	int size, error;
2731
2732	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2733
2734	cm = NULL;
2735
2736	/*
2737	 * Get a command
2738	 */
2739	mtx_lock(&sc->aac_io_lock);
2740	if (aacraid_alloc_command(sc, &cm)) {
2741		struct aac_event *event;
2742
2743		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2744		    M_NOWAIT | M_ZERO);
2745		if (event == NULL) {
2746			error = EBUSY;
2747			mtx_unlock(&sc->aac_io_lock);
2748			goto out;
2749		}
2750		event->ev_type = AAC_EVENT_CMFREE;
2751		event->ev_callback = aac_ioctl_event;
2752		event->ev_arg = &cm;
2753		aacraid_add_event(sc, event);
2754		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2755	}
2756	mtx_unlock(&sc->aac_io_lock);
2757
2758	/*
2759	 * Fetch the FIB header, then re-copy to get data as well.
2760	 */
2761	if ((error = copyin(ufib, cm->cm_fib,
2762			    sizeof(struct aac_fib_header))) != 0)
2763		goto out;
2764	size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2765	if (size > sc->aac_max_fib_size) {
2766		device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2767			      size, sc->aac_max_fib_size);
2768		size = sc->aac_max_fib_size;
2769	}
2770	if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2771		goto out;
2772	cm->cm_fib->Header.Size = size;
2773	cm->cm_timestamp = time_uptime;
2774	cm->cm_datalen = 0;
2775
2776	/*
2777	 * Pass the FIB to the controller, wait for it to complete.
2778	 */
2779	mtx_lock(&sc->aac_io_lock);
2780	error = aacraid_wait_command(cm);
2781	mtx_unlock(&sc->aac_io_lock);
2782	if (error != 0) {
2783		device_printf(sc->aac_dev,
2784			      "aacraid_wait_command return %d\n", error);
2785		goto out;
2786	}
2787
2788	/*
2789	 * Copy the FIB and data back out to the caller.
2790	 */
2791	size = cm->cm_fib->Header.Size;
2792	if (size > sc->aac_max_fib_size) {
2793		device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2794			      size, sc->aac_max_fib_size);
2795		size = sc->aac_max_fib_size;
2796	}
2797	error = copyout(cm->cm_fib, ufib, size);
2798
2799out:
2800	if (cm != NULL) {
2801		mtx_lock(&sc->aac_io_lock);
2802		aacraid_release_command(cm);
2803		mtx_unlock(&sc->aac_io_lock);
2804	}
2805	return(error);
2806}
2807
2808/*
2809 * Send a passthrough FIB supplied from userspace
2810 */
2811static int
2812aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2813{
2814	struct aac_command *cm;
2815	struct aac_fib *fib;
2816	struct aac_srb *srbcmd;
2817	struct aac_srb *user_srb = (struct aac_srb *)arg;
2818	void *user_reply;
2819	int error, transfer_data = 0;
2820	bus_dmamap_t orig_map = 0;
2821	u_int32_t fibsize = 0;
2822	u_int64_t srb_sg_address;
2823	u_int32_t srb_sg_bytecount;
2824
2825	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2826
2827	cm = NULL;
2828
2829	mtx_lock(&sc->aac_io_lock);
2830	if (aacraid_alloc_command(sc, &cm)) {
2831		struct aac_event *event;
2832
2833		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2834		    M_NOWAIT | M_ZERO);
2835		if (event == NULL) {
2836			error = EBUSY;
2837			mtx_unlock(&sc->aac_io_lock);
2838			goto out;
2839		}
2840		event->ev_type = AAC_EVENT_CMFREE;
2841		event->ev_callback = aac_ioctl_event;
2842		event->ev_arg = &cm;
2843		aacraid_add_event(sc, event);
2844		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2845	}
2846	mtx_unlock(&sc->aac_io_lock);
2847
2848	cm->cm_data = NULL;
2849	/* save original dma map */
2850	orig_map = cm->cm_datamap;
2851
2852	fib = cm->cm_fib;
2853	srbcmd = (struct aac_srb *)fib->data;
2854	if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2855		sizeof (u_int32_t)) != 0))
2856		goto out;
2857	if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2858		error = EINVAL;
2859		goto out;
2860	}
2861	if ((error = copyin((void *)user_srb, srbcmd, fibsize) != 0))
2862		goto out;
2863
2864	srbcmd->function = 0;		/* SRBF_ExecuteScsi */
2865	srbcmd->retry_limit = 0;	/* obsolete */
2866
2867	/* only one sg element from userspace supported */
2868	if (srbcmd->sg_map.SgCount > 1) {
2869		error = EINVAL;
2870		goto out;
2871	}
2872	/* check fibsize */
2873	if (fibsize == (sizeof(struct aac_srb) +
2874		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2875		struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2876		srb_sg_bytecount = sgp->SgByteCount;
2877		srb_sg_address = (u_int64_t)sgp->SgAddress;
2878	} else if (fibsize == (sizeof(struct aac_srb) +
2879		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2880#ifdef __LP64__
2881		struct aac_sg_entry64 *sgp =
2882			(struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2883		srb_sg_bytecount = sgp->SgByteCount;
2884		srb_sg_address = sgp->SgAddress;
2885		if (srb_sg_address > 0xffffffffull &&
2886			!(sc->flags & AAC_FLAGS_SG_64BIT))
2887#endif
2888		{
2889			error = EINVAL;
2890			goto out;
2891		}
2892	} else {
2893		error = EINVAL;
2894		goto out;
2895	}
2896	user_reply = (char *)arg + fibsize;
2897	srbcmd->data_len = srb_sg_bytecount;
2898	if (srbcmd->sg_map.SgCount == 1)
2899		transfer_data = 1;
2900
2901	if (transfer_data) {
2902		/*
2903		 * Create DMA tag for the passthr. data buffer and allocate it.
2904		 */
2905		if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
2906			1, 0,			/* algnmnt, boundary */
2907			(sc->flags & AAC_FLAGS_SG_64BIT) ?
2908			BUS_SPACE_MAXADDR_32BIT :
2909			0x7fffffff,		/* lowaddr */
2910			BUS_SPACE_MAXADDR, 	/* highaddr */
2911			NULL, NULL, 		/* filter, filterarg */
2912			srb_sg_bytecount, 	/* size */
2913			sc->aac_sg_tablesize,	/* nsegments */
2914			srb_sg_bytecount, 	/* maxsegsize */
2915			0,			/* flags */
2916			NULL, NULL,		/* No locking needed */
2917			&cm->cm_passthr_dmat)) {
2918			error = ENOMEM;
2919			goto out;
2920		}
2921		if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2922			BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2923			error = ENOMEM;
2924			goto out;
2925		}
2926		/* fill some cm variables */
2927		cm->cm_datalen = srb_sg_bytecount;
2928		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2929			cm->cm_flags |= AAC_CMD_DATAIN;
2930		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2931			cm->cm_flags |= AAC_CMD_DATAOUT;
2932
2933		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2934			if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2935				cm->cm_data, cm->cm_datalen)) != 0)
2936				goto out;
2937			/* sync required for bus_dmamem_alloc() alloc. mem.? */
2938			bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2939				BUS_DMASYNC_PREWRITE);
2940		}
2941	}
2942
2943	/* build the FIB */
2944	fib->Header.Size = sizeof(struct aac_fib_header) +
2945		sizeof(struct aac_srb);
2946	fib->Header.XferState =
2947		AAC_FIBSTATE_HOSTOWNED   |
2948		AAC_FIBSTATE_INITIALISED |
2949		AAC_FIBSTATE_EMPTY	 |
2950		AAC_FIBSTATE_FROMHOST	 |
2951		AAC_FIBSTATE_REXPECTED   |
2952		AAC_FIBSTATE_NORM	 |
2953		AAC_FIBSTATE_ASYNC;
2954
2955	fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
2956		ScsiPortCommandU64 : ScsiPortCommand;
2957	cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
2958
2959	/* send command */
2960	if (transfer_data) {
2961		bus_dmamap_load(cm->cm_passthr_dmat,
2962			cm->cm_datamap, cm->cm_data,
2963			cm->cm_datalen,
2964			aacraid_map_command_sg, cm, 0);
2965	} else {
2966		aacraid_map_command_sg(cm, NULL, 0, 0);
2967	}
2968
2969	/* wait for completion */
2970	mtx_lock(&sc->aac_io_lock);
2971	while (!(cm->cm_flags & AAC_CMD_COMPLETED))
2972		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
2973	mtx_unlock(&sc->aac_io_lock);
2974
2975	/* copy data */
2976	if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) {
2977		if ((error = copyout(cm->cm_data,
2978			(void *)(uintptr_t)srb_sg_address,
2979			cm->cm_datalen)) != 0)
2980			goto out;
2981		/* sync required for bus_dmamem_alloc() allocated mem.? */
2982		bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2983				BUS_DMASYNC_POSTREAD);
2984	}
2985
2986	/* status */
2987	error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
2988
2989out:
2990	if (cm && cm->cm_data) {
2991		if (transfer_data)
2992			bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
2993		bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
2994		cm->cm_datamap = orig_map;
2995	}
2996	if (cm && cm->cm_passthr_dmat)
2997		bus_dma_tag_destroy(cm->cm_passthr_dmat);
2998	if (cm) {
2999		mtx_lock(&sc->aac_io_lock);
3000		aacraid_release_command(cm);
3001		mtx_unlock(&sc->aac_io_lock);
3002	}
3003	return(error);
3004}
3005
3006/*
3007 * Request an AIF from the controller (new comm. type1)
3008 */
3009static void
3010aac_request_aif(struct aac_softc *sc)
3011{
3012	struct aac_command *cm;
3013	struct aac_fib *fib;
3014
3015	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3016
3017	if (aacraid_alloc_command(sc, &cm)) {
3018		sc->aif_pending = 1;
3019		return;
3020	}
3021	sc->aif_pending = 0;
3022
3023	/* build the FIB */
3024	fib = cm->cm_fib;
3025	fib->Header.Size = sizeof(struct aac_fib);
3026	fib->Header.XferState =
3027        AAC_FIBSTATE_HOSTOWNED   |
3028        AAC_FIBSTATE_INITIALISED |
3029        AAC_FIBSTATE_EMPTY	 |
3030        AAC_FIBSTATE_FROMHOST	 |
3031        AAC_FIBSTATE_REXPECTED   |
3032        AAC_FIBSTATE_NORM	 |
3033        AAC_FIBSTATE_ASYNC;
3034	/* set AIF marker */
3035	fib->Header.Handle = 0x00800000;
3036	fib->Header.Command = AifRequest;
3037	((struct aac_aif_command *)fib->data)->command = AifReqEvent;
3038
3039	aacraid_map_command_sg(cm, NULL, 0, 0);
3040}
3041
3042
3043#if __FreeBSD_version >= 702000
3044/*
3045 * cdevpriv interface private destructor.
3046 */
3047static void
3048aac_cdevpriv_dtor(void *arg)
3049{
3050	struct aac_softc *sc;
3051
3052	sc = arg;
3053	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3054	mtx_lock(&Giant);
3055	device_unbusy(sc->aac_dev);
3056	mtx_unlock(&Giant);
3057}
3058#else
3059static int
3060aac_close(struct cdev *dev, int flags, int fmt, struct thread *td)
3061{
3062	struct aac_softc *sc;
3063
3064	sc = dev->si_drv1;
3065	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3066	return 0;
3067}
3068#endif
3069
3070/*
3071 * Handle an AIF sent to us by the controller; queue it for later reference.
3072 * If the queue fills up, then drop the older entries.
3073 */
3074static void
3075aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3076{
3077	struct aac_aif_command *aif;
3078	struct aac_container *co, *co_next;
3079	struct aac_fib_context *ctx;
3080	struct aac_fib *sync_fib;
3081	struct aac_mntinforesp mir;
3082	int next, current, found;
3083	int count = 0, changed = 0, i = 0;
3084	u_int32_t channel, uid;
3085
3086	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3087
3088	aif = (struct aac_aif_command*)&fib->data[0];
3089	aacraid_print_aif(sc, aif);
3090
3091	/* Is it an event that we should care about? */
3092	switch (aif->command) {
3093	case AifCmdEventNotify:
3094		switch (aif->data.EN.type) {
3095		case AifEnAddContainer:
3096		case AifEnDeleteContainer:
3097			/*
3098			 * A container was added or deleted, but the message
3099			 * doesn't tell us anything else!  Re-enumerate the
3100			 * containers and sort things out.
3101			 */
3102			aac_alloc_sync_fib(sc, &sync_fib);
3103			do {
3104				/*
3105				 * Ask the controller for its containers one at
3106				 * a time.
3107				 * XXX What if the controller's list changes
3108				 * midway through this enumaration?
3109				 * XXX This should be done async.
3110				 */
3111				if (aac_get_container_info(sc, sync_fib, i,
3112					&mir, &uid) != 0)
3113					continue;
3114				if (i == 0)
3115					count = mir.MntRespCount;
3116				/*
3117				 * Check the container against our list.
3118				 * co->co_found was already set to 0 in a
3119				 * previous run.
3120				 */
3121				if ((mir.Status == ST_OK) &&
3122				    (mir.MntTable[0].VolType != CT_NONE)) {
3123					found = 0;
3124					TAILQ_FOREACH(co,
3125						      &sc->aac_container_tqh,
3126						      co_link) {
3127						if (co->co_mntobj.ObjectId ==
3128						    mir.MntTable[0].ObjectId) {
3129							co->co_found = 1;
3130							found = 1;
3131							break;
3132						}
3133					}
3134					/*
3135					 * If the container matched, continue
3136					 * in the list.
3137					 */
3138					if (found) {
3139						i++;
3140						continue;
3141					}
3142
3143					/*
3144					 * This is a new container.  Do all the
3145					 * appropriate things to set it up.
3146					 */
3147					aac_add_container(sc, &mir, 1, uid);
3148					changed = 1;
3149				}
3150				i++;
3151			} while ((i < count) && (i < AAC_MAX_CONTAINERS));
3152			aac_release_sync_fib(sc);
3153
3154			/*
3155			 * Go through our list of containers and see which ones
3156			 * were not marked 'found'.  Since the controller didn't
3157			 * list them they must have been deleted.  Do the
3158			 * appropriate steps to destroy the device.  Also reset
3159			 * the co->co_found field.
3160			 */
3161			co = TAILQ_FIRST(&sc->aac_container_tqh);
3162			while (co != NULL) {
3163				if (co->co_found == 0) {
3164					co_next = TAILQ_NEXT(co, co_link);
3165					TAILQ_REMOVE(&sc->aac_container_tqh, co,
3166						     co_link);
3167					free(co, M_AACRAIDBUF);
3168					changed = 1;
3169					co = co_next;
3170				} else {
3171					co->co_found = 0;
3172					co = TAILQ_NEXT(co, co_link);
3173				}
3174			}
3175
3176			/* Attach the newly created containers */
3177			if (changed) {
3178				if (sc->cam_rescan_cb != NULL)
3179					sc->cam_rescan_cb(sc, 0,
3180				    	AAC_CAM_TARGET_WILDCARD);
3181			}
3182
3183			break;
3184
3185		case AifEnEnclosureManagement:
3186			switch (aif->data.EN.data.EEE.eventType) {
3187			case AIF_EM_DRIVE_INSERTION:
3188			case AIF_EM_DRIVE_REMOVAL:
3189				channel = aif->data.EN.data.EEE.unitID;
3190				if (sc->cam_rescan_cb != NULL)
3191					sc->cam_rescan_cb(sc,
3192					    ((channel>>24) & 0xF) + 1,
3193					    (channel & 0xFFFF));
3194				break;
3195			}
3196			break;
3197
3198		case AifEnAddJBOD:
3199		case AifEnDeleteJBOD:
3200		case AifRawDeviceRemove:
3201			channel = aif->data.EN.data.ECE.container;
3202			if (sc->cam_rescan_cb != NULL)
3203				sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3204				    AAC_CAM_TARGET_WILDCARD);
3205			break;
3206
3207		default:
3208			break;
3209		}
3210
3211	default:
3212		break;
3213	}
3214
3215	/* Copy the AIF data to the AIF queue for ioctl retrieval */
3216	current = sc->aifq_idx;
3217	next = (current + 1) % AAC_AIFQ_LENGTH;
3218	if (next == 0)
3219		sc->aifq_filled = 1;
3220	bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3221	/* modify AIF contexts */
3222	if (sc->aifq_filled) {
3223		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3224			if (next == ctx->ctx_idx)
3225				ctx->ctx_wrap = 1;
3226			else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3227				ctx->ctx_idx = next;
3228		}
3229	}
3230	sc->aifq_idx = next;
3231	/* On the off chance that someone is sleeping for an aif... */
3232	if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3233		wakeup(sc->aac_aifq);
3234	/* Wakeup any poll()ers */
3235	selwakeuppri(&sc->rcv_select, PRIBIO);
3236
3237	return;
3238}
3239
3240/*
3241 * Return the Revision of the driver to userspace and check to see if the
3242 * userspace app is possibly compatible.  This is extremely bogus since
3243 * our driver doesn't follow Adaptec's versioning system.  Cheat by just
3244 * returning what the card reported.
3245 */
3246static int
3247aac_rev_check(struct aac_softc *sc, caddr_t udata)
3248{
3249	struct aac_rev_check rev_check;
3250	struct aac_rev_check_resp rev_check_resp;
3251	int error = 0;
3252
3253	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3254
3255	/*
3256	 * Copyin the revision struct from userspace
3257	 */
3258	if ((error = copyin(udata, (caddr_t)&rev_check,
3259			sizeof(struct aac_rev_check))) != 0) {
3260		return error;
3261	}
3262
3263	fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3264	      rev_check.callingRevision.buildNumber);
3265
3266	/*
3267	 * Doctor up the response struct.
3268	 */
3269	rev_check_resp.possiblyCompatible = 1;
3270	rev_check_resp.adapterSWRevision.external.comp.major =
3271	    AAC_DRIVER_MAJOR_VERSION;
3272	rev_check_resp.adapterSWRevision.external.comp.minor =
3273	    AAC_DRIVER_MINOR_VERSION;
3274	rev_check_resp.adapterSWRevision.external.comp.type =
3275	    AAC_DRIVER_TYPE;
3276	rev_check_resp.adapterSWRevision.external.comp.dash =
3277	    AAC_DRIVER_BUGFIX_LEVEL;
3278	rev_check_resp.adapterSWRevision.buildNumber =
3279	    AAC_DRIVER_BUILD;
3280
3281	return(copyout((caddr_t)&rev_check_resp, udata,
3282			sizeof(struct aac_rev_check_resp)));
3283}
3284
3285/*
3286 * Pass the fib context to the caller
3287 */
3288static int
3289aac_open_aif(struct aac_softc *sc, caddr_t arg)
3290{
3291	struct aac_fib_context *fibctx, *ctx;
3292	int error = 0;
3293
3294	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3295
3296	fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3297	if (fibctx == NULL)
3298		return (ENOMEM);
3299
3300	mtx_lock(&sc->aac_io_lock);
3301	/* all elements are already 0, add to queue */
3302	if (sc->fibctx == NULL)
3303		sc->fibctx = fibctx;
3304	else {
3305		for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3306			;
3307		ctx->next = fibctx;
3308		fibctx->prev = ctx;
3309	}
3310
3311	/* evaluate unique value */
3312	fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3313	ctx = sc->fibctx;
3314	while (ctx != fibctx) {
3315		if (ctx->unique == fibctx->unique) {
3316			fibctx->unique++;
3317			ctx = sc->fibctx;
3318		} else {
3319			ctx = ctx->next;
3320		}
3321	}
3322
3323	error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3324	mtx_unlock(&sc->aac_io_lock);
3325	if (error)
3326		aac_close_aif(sc, (caddr_t)ctx);
3327	return error;
3328}
3329
3330/*
3331 * Close the caller's fib context
3332 */
3333static int
3334aac_close_aif(struct aac_softc *sc, caddr_t arg)
3335{
3336	struct aac_fib_context *ctx;
3337
3338	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3339
3340	mtx_lock(&sc->aac_io_lock);
3341	for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3342		if (ctx->unique == *(uint32_t *)&arg) {
3343			if (ctx == sc->fibctx)
3344				sc->fibctx = NULL;
3345			else {
3346				ctx->prev->next = ctx->next;
3347				if (ctx->next)
3348					ctx->next->prev = ctx->prev;
3349			}
3350			break;
3351		}
3352	}
3353	if (ctx)
3354		free(ctx, M_AACRAIDBUF);
3355
3356	mtx_unlock(&sc->aac_io_lock);
3357	return 0;
3358}
3359
3360/*
3361 * Pass the caller the next AIF in their queue
3362 */
3363static int
3364aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3365{
3366	struct get_adapter_fib_ioctl agf;
3367	struct aac_fib_context *ctx;
3368	int error;
3369
3370	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3371
3372	mtx_lock(&sc->aac_io_lock);
3373	if ((error = copyin(arg, &agf, sizeof(agf))) == 0) {
3374		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3375			if (agf.AdapterFibContext == ctx->unique)
3376				break;
3377		}
3378		if (!ctx) {
3379			mtx_unlock(&sc->aac_io_lock);
3380			return (EFAULT);
3381		}
3382
3383		error = aac_return_aif(sc, ctx, agf.AifFib);
3384		if (error == EAGAIN && agf.Wait) {
3385			fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3386			sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3387			while (error == EAGAIN) {
3388				mtx_unlock(&sc->aac_io_lock);
3389				error = tsleep(sc->aac_aifq, PRIBIO |
3390					       PCATCH, "aacaif", 0);
3391				mtx_lock(&sc->aac_io_lock);
3392				if (error == 0)
3393					error = aac_return_aif(sc, ctx, agf.AifFib);
3394			}
3395			sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3396		}
3397	}
3398	mtx_unlock(&sc->aac_io_lock);
3399	return(error);
3400}
3401
3402/*
3403 * Hand the next AIF off the top of the queue out to userspace.
3404 */
3405static int
3406aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3407{
3408	int current, error;
3409
3410	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3411
3412	current = ctx->ctx_idx;
3413	if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3414		/* empty */
3415		return (EAGAIN);
3416	}
3417	error =
3418		copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3419	if (error)
3420		device_printf(sc->aac_dev,
3421		    "aac_return_aif: copyout returned %d\n", error);
3422	else {
3423		ctx->ctx_wrap = 0;
3424		ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3425	}
3426	return(error);
3427}
3428
3429static int
3430aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3431{
3432	struct aac_pci_info {
3433		u_int32_t bus;
3434		u_int32_t slot;
3435	} pciinf;
3436	int error;
3437
3438	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3439
3440	pciinf.bus = pci_get_bus(sc->aac_dev);
3441	pciinf.slot = pci_get_slot(sc->aac_dev);
3442
3443	error = copyout((caddr_t)&pciinf, uptr,
3444			sizeof(struct aac_pci_info));
3445
3446	return (error);
3447}
3448
3449static int
3450aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3451{
3452	struct aac_features f;
3453	int error;
3454
3455	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3456
3457	if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3458		return (error);
3459
3460	/*
3461	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3462	 * ALL zero in the featuresState, the driver will return the current
3463	 * state of all the supported features, the data field will not be
3464	 * valid.
3465	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3466	 * a specific bit set in the featuresState, the driver will return the
3467	 * current state of this specific feature and whatever data that are
3468	 * associated with the feature in the data field or perform whatever
3469	 * action needed indicates in the data field.
3470	 */
3471	 if (f.feat.fValue == 0) {
3472		f.feat.fBits.largeLBA =
3473		    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3474		f.feat.fBits.JBODSupport = 1;
3475		/* TODO: In the future, add other features state here as well */
3476	} else {
3477		if (f.feat.fBits.largeLBA)
3478			f.feat.fBits.largeLBA =
3479			    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3480		/* TODO: Add other features state and data in the future */
3481	}
3482
3483	error = copyout(&f, uptr, sizeof (f));
3484	return (error);
3485}
3486
3487/*
3488 * Give the userland some information about the container.  The AAC arch
3489 * expects the driver to be a SCSI passthrough type driver, so it expects
3490 * the containers to have b:t:l numbers.  Fake it.
3491 */
3492static int
3493aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3494{
3495	struct aac_query_disk query_disk;
3496	struct aac_container *co;
3497	int error, id;
3498
3499	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3500
3501	mtx_lock(&sc->aac_io_lock);
3502	error = copyin(uptr, (caddr_t)&query_disk,
3503		       sizeof(struct aac_query_disk));
3504	if (error) {
3505		mtx_unlock(&sc->aac_io_lock);
3506		return (error);
3507	}
3508
3509	id = query_disk.ContainerNumber;
3510	if (id == -1) {
3511		mtx_unlock(&sc->aac_io_lock);
3512		return (EINVAL);
3513	}
3514
3515	TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3516		if (co->co_mntobj.ObjectId == id)
3517			break;
3518		}
3519
3520	if (co == NULL) {
3521			query_disk.Valid = 0;
3522			query_disk.Locked = 0;
3523			query_disk.Deleted = 1;		/* XXX is this right? */
3524	} else {
3525		query_disk.Valid = 1;
3526		query_disk.Locked = 1;
3527		query_disk.Deleted = 0;
3528		query_disk.Bus = device_get_unit(sc->aac_dev);
3529		query_disk.Target = 0;
3530		query_disk.Lun = 0;
3531		query_disk.UnMapped = 0;
3532	}
3533
3534	error = copyout((caddr_t)&query_disk, uptr,
3535			sizeof(struct aac_query_disk));
3536
3537	mtx_unlock(&sc->aac_io_lock);
3538	return (error);
3539}
3540
3541static void
3542aac_container_bus(struct aac_softc *sc)
3543{
3544	struct aac_sim *sim;
3545	device_t child;
3546
3547	sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3548		M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3549	if (sim == NULL) {
3550		device_printf(sc->aac_dev,
3551	    	"No memory to add container bus\n");
3552		panic("Out of memory?!");
3553	};
3554	child = device_add_child(sc->aac_dev, "aacraidp", -1);
3555	if (child == NULL) {
3556		device_printf(sc->aac_dev,
3557	    	"device_add_child failed for container bus\n");
3558		free(sim, M_AACRAIDBUF);
3559		panic("Out of memory?!");
3560	}
3561
3562	sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3563	sim->BusNumber = 0;
3564	sim->BusType = CONTAINER_BUS;
3565	sim->InitiatorBusId = -1;
3566	sim->aac_sc = sc;
3567	sim->sim_dev = child;
3568	sim->aac_cam = NULL;
3569
3570	device_set_ivars(child, sim);
3571	device_set_desc(child, "Container Bus");
3572	TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3573	/*
3574	device_set_desc(child, aac_describe_code(aac_container_types,
3575			mir->MntTable[0].VolType));
3576	*/
3577	bus_generic_attach(sc->aac_dev);
3578}
3579
3580static void
3581aac_get_bus_info(struct aac_softc *sc)
3582{
3583	struct aac_fib *fib;
3584	struct aac_ctcfg *c_cmd;
3585	struct aac_ctcfg_resp *c_resp;
3586	struct aac_vmioctl *vmi;
3587	struct aac_vmi_businf_resp *vmi_resp;
3588	struct aac_getbusinf businfo;
3589	struct aac_sim *caminf;
3590	device_t child;
3591	int i, error;
3592
3593	mtx_lock(&sc->aac_io_lock);
3594	aac_alloc_sync_fib(sc, &fib);
3595	c_cmd = (struct aac_ctcfg *)&fib->data[0];
3596	bzero(c_cmd, sizeof(struct aac_ctcfg));
3597
3598	c_cmd->Command = VM_ContainerConfig;
3599	c_cmd->cmd = CT_GET_SCSI_METHOD;
3600	c_cmd->param = 0;
3601
3602	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3603	    sizeof(struct aac_ctcfg));
3604	if (error) {
3605		device_printf(sc->aac_dev, "Error %d sending "
3606		    "VM_ContainerConfig command\n", error);
3607		aac_release_sync_fib(sc);
3608		mtx_unlock(&sc->aac_io_lock);
3609		return;
3610	}
3611
3612	c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3613	if (c_resp->Status != ST_OK) {
3614		device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3615		    c_resp->Status);
3616		aac_release_sync_fib(sc);
3617		mtx_unlock(&sc->aac_io_lock);
3618		return;
3619	}
3620
3621	sc->scsi_method_id = c_resp->param;
3622
3623	vmi = (struct aac_vmioctl *)&fib->data[0];
3624	bzero(vmi, sizeof(struct aac_vmioctl));
3625
3626	vmi->Command = VM_Ioctl;
3627	vmi->ObjType = FT_DRIVE;
3628	vmi->MethId = sc->scsi_method_id;
3629	vmi->ObjId = 0;
3630	vmi->IoctlCmd = GetBusInfo;
3631
3632	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3633	    sizeof(struct aac_vmi_businf_resp));
3634	if (error) {
3635		device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3636		    error);
3637		aac_release_sync_fib(sc);
3638		mtx_unlock(&sc->aac_io_lock);
3639		return;
3640	}
3641
3642	vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3643	if (vmi_resp->Status != ST_OK) {
3644		device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3645		    vmi_resp->Status);
3646		aac_release_sync_fib(sc);
3647		mtx_unlock(&sc->aac_io_lock);
3648		return;
3649	}
3650
3651	bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3652	aac_release_sync_fib(sc);
3653	mtx_unlock(&sc->aac_io_lock);
3654
3655	for (i = 0; i < businfo.BusCount; i++) {
3656		if (businfo.BusValid[i] != AAC_BUS_VALID)
3657			continue;
3658
3659		caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3660		    M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3661		if (caminf == NULL) {
3662			device_printf(sc->aac_dev,
3663			    "No memory to add passthrough bus %d\n", i);
3664			break;
3665		};
3666
3667		child = device_add_child(sc->aac_dev, "aacraidp", -1);
3668		if (child == NULL) {
3669			device_printf(sc->aac_dev,
3670			    "device_add_child failed for passthrough bus %d\n",
3671			    i);
3672			free(caminf, M_AACRAIDBUF);
3673			break;
3674		}
3675
3676		caminf->TargetsPerBus = businfo.TargetsPerBus;
3677		caminf->BusNumber = i+1;
3678		caminf->BusType = PASSTHROUGH_BUS;
3679		caminf->InitiatorBusId = businfo.InitiatorBusId[i];
3680		caminf->aac_sc = sc;
3681		caminf->sim_dev = child;
3682		caminf->aac_cam = NULL;
3683
3684		device_set_ivars(child, caminf);
3685		device_set_desc(child, "SCSI Passthrough Bus");
3686		TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3687	}
3688}
3689
3690/*
3691 * Check to see if the kernel is up and running. If we are in a
3692 * BlinkLED state, return the BlinkLED code.
3693 */
3694static u_int32_t
3695aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3696{
3697	u_int32_t ret;
3698
3699	ret = AAC_GET_FWSTATUS(sc);
3700
3701	if (ret & AAC_UP_AND_RUNNING)
3702		ret = 0;
3703	else if (ret & AAC_KERNEL_PANIC && bled)
3704		*bled = (ret >> 16) & 0xff;
3705
3706	return (ret);
3707}
3708
3709/*
3710 * Once do an IOP reset, basically have to re-initialize the card as
3711 * if coming up from a cold boot, and the driver is responsible for
3712 * any IO that was outstanding to the adapter at the time of the IOP
3713 * RESET. And prepare the driver for IOP RESET by making the init code
3714 * modular with the ability to call it from multiple places.
3715 */
3716static int
3717aac_reset_adapter(struct aac_softc *sc)
3718{
3719	struct aac_command *cm;
3720	struct aac_fib *fib;
3721	struct aac_pause_command *pc;
3722	u_int32_t status, reset_mask, waitCount, max_msix_orig;
3723	int msi_enabled_orig;
3724
3725	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3726	mtx_assert(&sc->aac_io_lock, MA_OWNED);
3727
3728	if (sc->aac_state & AAC_STATE_RESET) {
3729		device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3730		return (EINVAL);
3731	}
3732	sc->aac_state |= AAC_STATE_RESET;
3733
3734	/* disable interrupt */
3735	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3736
3737	/*
3738	 * Abort all pending commands:
3739	 * a) on the controller
3740	 */
3741	while ((cm = aac_dequeue_busy(sc)) != NULL) {
3742		cm->cm_flags |= AAC_CMD_RESET;
3743
3744		/* is there a completion handler? */
3745		if (cm->cm_complete != NULL) {
3746			cm->cm_complete(cm);
3747		} else {
3748			/* assume that someone is sleeping on this
3749			 * command
3750			 */
3751			wakeup(cm);
3752		}
3753	}
3754
3755	/* b) in the waiting queues */
3756	while ((cm = aac_dequeue_ready(sc)) != NULL) {
3757		cm->cm_flags |= AAC_CMD_RESET;
3758
3759		/* is there a completion handler? */
3760		if (cm->cm_complete != NULL) {
3761			cm->cm_complete(cm);
3762		} else {
3763			/* assume that someone is sleeping on this
3764			 * command
3765			 */
3766			wakeup(cm);
3767		}
3768	}
3769
3770	/* flush drives */
3771	if (aac_check_adapter_health(sc, NULL) == 0) {
3772		mtx_unlock(&sc->aac_io_lock);
3773		(void) aacraid_shutdown(sc->aac_dev);
3774		mtx_lock(&sc->aac_io_lock);
3775	}
3776
3777	/* execute IOP reset */
3778	if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3779		AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3780
3781		/* We need to wait for 5 seconds before accessing the MU again
3782		 * 10000 * 100us = 1000,000us = 1000ms = 1s
3783		 */
3784		waitCount = 5 * 10000;
3785		while (waitCount) {
3786			DELAY(100);			/* delay 100 microseconds */
3787			waitCount--;
3788		}
3789	} else if ((aacraid_sync_command(sc,
3790		AAC_IOP_RESET_ALWAYS, 0, 0, 0, 0, &status, &reset_mask)) != 0) {
3791		/* call IOP_RESET for older firmware */
3792		if ((aacraid_sync_command(sc,
3793			AAC_IOP_RESET, 0, 0, 0, 0, &status, NULL)) != 0) {
3794
3795			if (status == AAC_SRB_STS_INVALID_REQUEST)
3796				device_printf(sc->aac_dev, "IOP_RESET not supported\n");
3797			else
3798				/* probably timeout */
3799				device_printf(sc->aac_dev, "IOP_RESET failed\n");
3800
3801			/* unwind aac_shutdown() */
3802			aac_alloc_sync_fib(sc, &fib);
3803			pc = (struct aac_pause_command *)&fib->data[0];
3804			pc->Command = VM_ContainerConfig;
3805			pc->Type = CT_PAUSE_IO;
3806			pc->Timeout = 1;
3807			pc->Min = 1;
3808			pc->NoRescan = 1;
3809
3810			(void) aac_sync_fib(sc, ContainerCommand, 0, fib,
3811				sizeof (struct aac_pause_command));
3812			aac_release_sync_fib(sc);
3813
3814			goto finish;
3815		}
3816	} else if (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET) {
3817		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3818		/*
3819		 * We need to wait for 5 seconds before accessing the doorbell
3820		 * again, 10000 * 100us = 1000,000us = 1000ms = 1s
3821		 */
3822		waitCount = 5 * 10000;
3823		while (waitCount) {
3824			DELAY(100);		/* delay 100 microseconds */
3825			waitCount--;
3826		}
3827	}
3828
3829	/*
3830	 * Initialize the adapter.
3831	 */
3832	max_msix_orig = sc->aac_max_msix;
3833	msi_enabled_orig = sc->msi_enabled;
3834	sc->msi_enabled = FALSE;
3835	if (aac_check_firmware(sc) != 0)
3836		goto finish;
3837	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3838		sc->aac_max_msix = max_msix_orig;
3839		if (msi_enabled_orig) {
3840			sc->msi_enabled = msi_enabled_orig;
3841			AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3842		}
3843		mtx_unlock(&sc->aac_io_lock);
3844		aac_init(sc);
3845		mtx_lock(&sc->aac_io_lock);
3846	}
3847
3848finish:
3849	sc->aac_state &= ~AAC_STATE_RESET;
3850	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3851	aacraid_startio(sc);
3852	return (0);
3853}
3854