mfi.c revision 165852
1/*-
2 * Copyright (c) 2006 IronPort Systems
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/mfi/mfi.c 165852 2007-01-07 06:43:25Z scottl $");
29
30#include "opt_mfi.h"
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/sysctl.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/poll.h>
38#include <sys/selinfo.h>
39#include <sys/bus.h>
40#include <sys/conf.h>
41#include <sys/eventhandler.h>
42#include <sys/rman.h>
43#include <sys/bus_dma.h>
44#include <sys/bio.h>
45#include <sys/ioccom.h>
46#include <sys/uio.h>
47#include <sys/proc.h>
48#include <sys/signalvar.h>
49
50#include <machine/bus.h>
51#include <machine/resource.h>
52
53#include <dev/mfi/mfireg.h>
54#include <dev/mfi/mfi_ioctl.h>
55#include <dev/mfi/mfivar.h>
56
57static int	mfi_alloc_commands(struct mfi_softc *);
58static void	mfi_release_command(struct mfi_command *cm);
59static int	mfi_comms_init(struct mfi_softc *);
60static int	mfi_wait_command(struct mfi_softc *, struct mfi_command *);
61static int	mfi_get_controller_info(struct mfi_softc *);
62static int	mfi_get_log_state(struct mfi_softc *,
63		    struct mfi_evt_log_state **);
64static int	mfi_get_entry(struct mfi_softc *, int);
65static int	mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
66		    uint32_t, void **, size_t);
67static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
68static void	mfi_startup(void *arg);
69static void	mfi_intr(void *arg);
70static void	mfi_enable_intr(struct mfi_softc *sc);
71static void	mfi_ldprobe(struct mfi_softc *sc);
72static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
73static void	mfi_aen_complete(struct mfi_command *);
74static int	mfi_aen_setup(struct mfi_softc *, uint32_t);
75static int	mfi_add_ld(struct mfi_softc *sc, int);
76static void	mfi_add_ld_complete(struct mfi_command *);
77static struct mfi_command * mfi_bio_command(struct mfi_softc *);
78static void	mfi_bio_complete(struct mfi_command *);
79static int	mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
80static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
81static void	mfi_complete(struct mfi_softc *, struct mfi_command *);
82static int	mfi_abort(struct mfi_softc *, struct mfi_command *);
83static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, d_thread_t *);
84static void	mfi_timeout(void *);
85
86
87SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
88static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
89TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
90SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
91            0, "event message locale");
92
93static int	mfi_event_class = MFI_EVT_CLASS_INFO;
94TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
95SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
96          0, "event message class");
97
98/* Management interface */
99static d_open_t		mfi_open;
100static d_close_t	mfi_close;
101static d_ioctl_t	mfi_ioctl;
102static d_poll_t		mfi_poll;
103
104static struct cdevsw mfi_cdevsw = {
105	.d_version = 	D_VERSION,
106	.d_flags =	0,
107	.d_open = 	mfi_open,
108	.d_close =	mfi_close,
109	.d_ioctl =	mfi_ioctl,
110	.d_poll =	mfi_poll,
111	.d_name =	"mfi",
112};
113
114MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
115
116#define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
117
118static int
119mfi_transition_firmware(struct mfi_softc *sc)
120{
121	int32_t fw_state, cur_state;
122	int max_wait, i;
123
124	fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
125	while (fw_state != MFI_FWSTATE_READY) {
126		if (bootverbose)
127			device_printf(sc->mfi_dev, "Waiting for firmware to "
128			    "become ready\n");
129		cur_state = fw_state;
130		switch (fw_state) {
131		case MFI_FWSTATE_FAULT:
132			device_printf(sc->mfi_dev, "Firmware fault\n");
133			return (ENXIO);
134		case MFI_FWSTATE_WAIT_HANDSHAKE:
135			MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
136			max_wait = 2;
137			break;
138		case MFI_FWSTATE_OPERATIONAL:
139			MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
140			max_wait = 10;
141			break;
142		case MFI_FWSTATE_UNDEFINED:
143		case MFI_FWSTATE_BB_INIT:
144			max_wait = 2;
145			break;
146		case MFI_FWSTATE_FW_INIT:
147		case MFI_FWSTATE_DEVICE_SCAN:
148		case MFI_FWSTATE_FLUSH_CACHE:
149			max_wait = 20;
150			break;
151		default:
152			device_printf(sc->mfi_dev,"Unknown firmware state %d\n",
153			    fw_state);
154			return (ENXIO);
155		}
156		for (i = 0; i < (max_wait * 10); i++) {
157			fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
158			if (fw_state == cur_state)
159				DELAY(100000);
160			else
161				break;
162		}
163		if (fw_state == cur_state) {
164			device_printf(sc->mfi_dev, "firmware stuck in state "
165			    "%#x\n", fw_state);
166			return (ENXIO);
167		}
168	}
169	return (0);
170}
171
172static void
173mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
174{
175	uint32_t *addr;
176
177	addr = arg;
178	*addr = segs[0].ds_addr;
179}
180
181int
182mfi_attach(struct mfi_softc *sc)
183{
184	uint32_t status;
185	int error, commsz, framessz, sensesz;
186	int frames, unit, max_fw_sge;
187
188	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
189	TAILQ_INIT(&sc->mfi_ld_tqh);
190	TAILQ_INIT(&sc->mfi_aen_pids);
191
192	mfi_initq_free(sc);
193	mfi_initq_ready(sc);
194	mfi_initq_busy(sc);
195	mfi_initq_bio(sc);
196
197	/* Before we get too far, see if the firmware is working */
198	if ((error = mfi_transition_firmware(sc)) != 0) {
199		device_printf(sc->mfi_dev, "Firmware not in READY state, "
200		    "error %d\n", error);
201		return (ENXIO);
202	}
203
204	/*
205	 * Get information needed for sizing the contiguous memory for the
206	 * frame pool.  Size down the sgl parameter since we know that
207	 * we will never need more than what's required for MAXPHYS.
208	 * It would be nice if these constants were available at runtime
209	 * instead of compile time.
210	 */
211	status = MFI_READ4(sc, MFI_OMSG0);
212	sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
213	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
214	sc->mfi_max_sge = min(max_fw_sge, ((MAXPHYS / PAGE_SIZE) + 1));
215
216	/*
217	 * Create the dma tag for data buffers.  Used both for block I/O
218	 * and for various internal data queries.
219	 */
220	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
221				1, 0,			/* algnmnt, boundary */
222				BUS_SPACE_MAXADDR,	/* lowaddr */
223				BUS_SPACE_MAXADDR,	/* highaddr */
224				NULL, NULL,		/* filter, filterarg */
225				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
226				sc->mfi_max_sge,	/* nsegments */
227				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
228				BUS_DMA_ALLOCNOW,	/* flags */
229				busdma_lock_mutex,	/* lockfunc */
230				&sc->mfi_io_lock,	/* lockfuncarg */
231				&sc->mfi_buffer_dmat)) {
232		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
233		return (ENOMEM);
234	}
235
236	/*
237	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
238	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
239	 * entry, so the calculated size here will be will be 1 more than
240	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
241	 */
242	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
243	    sizeof(struct mfi_hwcomms);
244	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
245				1, 0,			/* algnmnt, boundary */
246				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
247				BUS_SPACE_MAXADDR,	/* highaddr */
248				NULL, NULL,		/* filter, filterarg */
249				commsz,			/* maxsize */
250				1,			/* msegments */
251				commsz,			/* maxsegsize */
252				0,			/* flags */
253				NULL, NULL,		/* lockfunc, lockarg */
254				&sc->mfi_comms_dmat)) {
255		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
256		return (ENOMEM);
257	}
258	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
259	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
260		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
261		return (ENOMEM);
262	}
263	bzero(sc->mfi_comms, commsz);
264	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
265	    sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
266
267	/*
268	 * Allocate DMA memory for the command frames.  Keep them in the
269	 * lower 4GB for efficiency.  Calculate the size of the commands at
270	 * the same time; each command is one 64 byte frame plus a set of
271         * additional frames for holding sg lists or other data.
272	 * The assumption here is that the SG list will start at the second
273	 * frame and not use the unused bytes in the first frame.  While this
274	 * isn't technically correct, it simplifies the calculation and allows
275	 * for command frames that might be larger than an mfi_io_frame.
276	 */
277	if (sizeof(bus_addr_t) == 8) {
278		sc->mfi_sge_size = sizeof(struct mfi_sg64);
279		sc->mfi_flags |= MFI_FLAGS_SG64;
280	} else {
281		sc->mfi_sge_size = sizeof(struct mfi_sg32);
282	}
283	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
284	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
285	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
286	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
287				64, 0,			/* algnmnt, boundary */
288				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
289				BUS_SPACE_MAXADDR,	/* highaddr */
290				NULL, NULL,		/* filter, filterarg */
291				framessz,		/* maxsize */
292				1,			/* nsegments */
293				framessz,		/* maxsegsize */
294				0,			/* flags */
295				NULL, NULL,		/* lockfunc, lockarg */
296				&sc->mfi_frames_dmat)) {
297		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
298		return (ENOMEM);
299	}
300	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
301	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
302		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
303		return (ENOMEM);
304	}
305	bzero(sc->mfi_frames, framessz);
306	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
307	    sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
308
309	/*
310	 * Allocate DMA memory for the frame sense data.  Keep them in the
311	 * lower 4GB for efficiency
312	 */
313	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
314	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
315				4, 0,			/* algnmnt, boundary */
316				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
317				BUS_SPACE_MAXADDR,	/* highaddr */
318				NULL, NULL,		/* filter, filterarg */
319				sensesz,		/* maxsize */
320				1,			/* nsegments */
321				sensesz,		/* maxsegsize */
322				0,			/* flags */
323				NULL, NULL,		/* lockfunc, lockarg */
324				&sc->mfi_sense_dmat)) {
325		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
326		return (ENOMEM);
327	}
328	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
329	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
330		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
331		return (ENOMEM);
332	}
333	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
334	    sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
335
336	if ((error = mfi_alloc_commands(sc)) != 0)
337		return (error);
338
339	if ((error = mfi_comms_init(sc)) != 0)
340		return (error);
341
342	if ((error = mfi_get_controller_info(sc)) != 0)
343		return (error);
344
345	mtx_lock(&sc->mfi_io_lock);
346	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
347		mtx_unlock(&sc->mfi_io_lock);
348		return (error);
349	}
350	mtx_unlock(&sc->mfi_io_lock);
351
352	/*
353	 * Set up the interrupt handler.  XXX This should happen in
354	 * mfi_pci.c
355	 */
356	sc->mfi_irq_rid = 0;
357	if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
358	    &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
359		device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
360		return (EINVAL);
361	}
362	if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
363	    mfi_intr, sc, &sc->mfi_intr)) {
364		device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
365		return (EINVAL);
366	}
367
368	/* Register a config hook to probe the bus for arrays */
369	sc->mfi_ich.ich_func = mfi_startup;
370	sc->mfi_ich.ich_arg = sc;
371	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
372		device_printf(sc->mfi_dev, "Cannot establish configuration "
373		    "hook\n");
374		return (EINVAL);
375	}
376
377	/*
378	 * Register a shutdown handler.
379	 */
380	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
381	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
382		device_printf(sc->mfi_dev, "Warning: shutdown event "
383		    "registration failed\n");
384	}
385
386	/*
387	 * Create the control device for doing management
388	 */
389	unit = device_get_unit(sc->mfi_dev);
390	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
391	    0640, "mfi%d", unit);
392	if (unit == 0)
393		make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
394	if (sc->mfi_cdev != NULL)
395		sc->mfi_cdev->si_drv1 = sc;
396
397	/* Start the timeout watchdog */
398	callout_init(&sc->mfi_watchdog_callout, 1);
399	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
400	    mfi_timeout, sc);
401
402	return (0);
403}
404
405static int
406mfi_alloc_commands(struct mfi_softc *sc)
407{
408	struct mfi_command *cm;
409	int i, ncmds;
410
411	/*
412	 * XXX Should we allocate all the commands up front, or allocate on
413	 * demand later like 'aac' does?
414	 */
415	ncmds = sc->mfi_max_fw_cmds;
416	sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
417	    M_WAITOK | M_ZERO);
418
419	for (i = 0; i < ncmds; i++) {
420		cm = &sc->mfi_commands[i];
421		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
422		    sc->mfi_cmd_size * i);
423		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
424		    sc->mfi_cmd_size * i;
425		cm->cm_frame->header.context = i;
426		cm->cm_sense = &sc->mfi_sense[i];
427		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
428		cm->cm_sc = sc;
429		cm->cm_index = i;
430		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
431		    &cm->cm_dmamap) == 0)
432			mfi_release_command(cm);
433		else
434			break;
435		sc->mfi_total_cmds++;
436	}
437
438	return (0);
439}
440
441static void
442mfi_release_command(struct mfi_command *cm)
443{
444	struct mfi_frame_header *hdr;
445	uint32_t *hdr_data;
446
447	/*
448	 * Zero out the important fields of the frame, but make sure the
449	 * context field is preserved.  For efficiency, handle the fields
450	 * as 32 bit words.  Clear out the first S/G entry too for safety.
451	 */
452	hdr = &cm->cm_frame->header;
453	if (hdr->sg_count) {
454		cm->cm_sg->sg32[0].len = 0;
455		cm->cm_sg->sg32[0].addr = 0;
456	}
457
458	hdr_data = (uint32_t *)cm->cm_frame;
459	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
460	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
461	hdr_data[4] = 0;	/* flags, timeout */
462	hdr_data[5] = 0;	/* data_len */
463
464	cm->cm_extra_frames = 0;
465	cm->cm_flags = 0;
466	cm->cm_complete = NULL;
467	cm->cm_private = NULL;
468	cm->cm_sg = 0;
469	cm->cm_total_frame_size = 0;
470
471	mfi_enqueue_free(cm);
472}
473
474static int
475mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
476    void **bufp, size_t bufsize)
477{
478	struct mfi_command *cm;
479	struct mfi_dcmd_frame *dcmd;
480	void *buf = NULL;
481
482	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
483
484	cm = mfi_dequeue_free(sc);
485	if (cm == NULL)
486		return (EBUSY);
487
488	if ((bufsize > 0) && (bufp != NULL)) {
489		if (*bufp == NULL) {
490			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
491			if (buf == NULL) {
492				mfi_release_command(cm);
493				return (ENOMEM);
494			}
495			*bufp = buf;
496		} else {
497			buf = *bufp;
498		}
499	}
500
501	dcmd =  &cm->cm_frame->dcmd;
502	bzero(dcmd->mbox, MFI_MBOX_SIZE);
503	dcmd->header.cmd = MFI_CMD_DCMD;
504	dcmd->header.timeout = 0;
505	dcmd->header.flags = 0;
506	dcmd->header.data_len = bufsize;
507	dcmd->opcode = opcode;
508	cm->cm_sg = &dcmd->sgl;
509	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
510	cm->cm_flags = 0;
511	cm->cm_data = buf;
512	cm->cm_private = buf;
513	cm->cm_len = bufsize;
514
515	*cmp = cm;
516	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
517		*bufp = buf;
518	return (0);
519}
520
521static int
522mfi_comms_init(struct mfi_softc *sc)
523{
524	struct mfi_command *cm;
525	struct mfi_init_frame *init;
526	struct mfi_init_qinfo *qinfo;
527	int error;
528
529	mtx_lock(&sc->mfi_io_lock);
530	if ((cm = mfi_dequeue_free(sc)) == NULL)
531		return (EBUSY);
532
533	/*
534	 * Abuse the SG list area of the frame to hold the init_qinfo
535	 * object;
536	 */
537	init = &cm->cm_frame->init;
538	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
539
540	bzero(qinfo, sizeof(struct mfi_init_qinfo));
541	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
542	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
543	    offsetof(struct mfi_hwcomms, hw_reply_q);
544	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
545	    offsetof(struct mfi_hwcomms, hw_pi);
546	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
547	    offsetof(struct mfi_hwcomms, hw_ci);
548
549	init->header.cmd = MFI_CMD_INIT;
550	init->header.data_len = sizeof(struct mfi_init_qinfo);
551	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
552	cm->cm_data = NULL;
553	cm->cm_flags = MFI_CMD_POLLED;
554
555	if ((error = mfi_mapcmd(sc, cm)) != 0) {
556		device_printf(sc->mfi_dev, "failed to send init command\n");
557		mtx_unlock(&sc->mfi_io_lock);
558		return (error);
559	}
560	mfi_release_command(cm);
561	mtx_unlock(&sc->mfi_io_lock);
562
563	return (0);
564}
565
566static int
567mfi_get_controller_info(struct mfi_softc *sc)
568{
569	struct mfi_command *cm = NULL;
570	struct mfi_ctrl_info *ci = NULL;
571	uint32_t max_sectors_1, max_sectors_2;
572	int error;
573
574	mtx_lock(&sc->mfi_io_lock);
575	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
576	    (void **)&ci, sizeof(*ci));
577	if (error)
578		goto out;
579	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
580
581	if ((error = mfi_mapcmd(sc, cm)) != 0) {
582		device_printf(sc->mfi_dev, "Failed to get controller info\n");
583		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
584		    MFI_SECTOR_LEN;
585		error = 0;
586		goto out;
587	}
588
589	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
590	    BUS_DMASYNC_POSTREAD);
591	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
592
593	max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
594	max_sectors_2 = ci->max_request_size;
595	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
596
597out:
598	if (ci)
599		free(ci, M_MFIBUF);
600	if (cm)
601		mfi_release_command(cm);
602	mtx_unlock(&sc->mfi_io_lock);
603	return (error);
604}
605
606static int
607mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
608{
609	struct mfi_command *cm = NULL;
610	int error;
611
612	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
613	    (void **)log_state, sizeof(**log_state));
614	if (error)
615		goto out;
616	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
617
618	if ((error = mfi_mapcmd(sc, cm)) != 0) {
619		device_printf(sc->mfi_dev, "Failed to get log state\n");
620		goto out;
621	}
622
623	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
624	    BUS_DMASYNC_POSTREAD);
625	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
626
627out:
628	if (cm)
629		mfi_release_command(cm);
630
631	return (error);
632}
633
634static int
635mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
636{
637	struct mfi_evt_log_state *log_state = NULL;
638	union mfi_evt class_locale;
639	int error = 0;
640	uint32_t seq;
641
642	class_locale.members.reserved = 0;
643	class_locale.members.locale = mfi_event_locale;
644	class_locale.members.class  = mfi_event_class;
645
646	if (seq_start == 0) {
647		error = mfi_get_log_state(sc, &log_state);
648		if (error) {
649			if (log_state)
650				free(log_state, M_MFIBUF);
651			return (error);
652		}
653		/*
654		 * Don't run them yet since we can't parse them.
655		 * We can indirectly get the contents from
656		 * the AEN mechanism via setting it lower then
657		 * current.  The firmware will iterate through them.
658		 */
659		for (seq = log_state->shutdown_seq_num;
660		     seq <= log_state->newest_seq_num; seq++) {
661			mfi_get_entry(sc, seq);
662		}
663	} else
664		seq = seq_start;
665	mfi_aen_register(sc, seq, class_locale.word);
666	free(log_state, M_MFIBUF);
667
668	return 0;
669}
670
671static int
672mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
673{
674
675	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
676	cm->cm_complete = NULL;
677
678	mfi_enqueue_ready(cm);
679	mfi_startio(sc);
680	return (msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0));
681}
682
683void
684mfi_free(struct mfi_softc *sc)
685{
686	struct mfi_command *cm;
687	int i;
688
689	callout_drain(&sc->mfi_watchdog_callout);
690
691	if (sc->mfi_cdev != NULL)
692		destroy_dev(sc->mfi_cdev);
693
694	if (sc->mfi_total_cmds != 0) {
695		for (i = 0; i < sc->mfi_total_cmds; i++) {
696			cm = &sc->mfi_commands[i];
697			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
698		}
699		free(sc->mfi_commands, M_MFIBUF);
700	}
701
702	if (sc->mfi_intr)
703		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
704	if (sc->mfi_irq != NULL)
705		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
706		    sc->mfi_irq);
707
708	if (sc->mfi_sense_busaddr != 0)
709		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
710	if (sc->mfi_sense != NULL)
711		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
712		    sc->mfi_sense_dmamap);
713	if (sc->mfi_sense_dmat != NULL)
714		bus_dma_tag_destroy(sc->mfi_sense_dmat);
715
716	if (sc->mfi_frames_busaddr != 0)
717		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
718	if (sc->mfi_frames != NULL)
719		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
720		    sc->mfi_frames_dmamap);
721	if (sc->mfi_frames_dmat != NULL)
722		bus_dma_tag_destroy(sc->mfi_frames_dmat);
723
724	if (sc->mfi_comms_busaddr != 0)
725		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
726	if (sc->mfi_comms != NULL)
727		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
728		    sc->mfi_comms_dmamap);
729	if (sc->mfi_comms_dmat != NULL)
730		bus_dma_tag_destroy(sc->mfi_comms_dmat);
731
732	if (sc->mfi_buffer_dmat != NULL)
733		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
734	if (sc->mfi_parent_dmat != NULL)
735		bus_dma_tag_destroy(sc->mfi_parent_dmat);
736
737	if (mtx_initialized(&sc->mfi_io_lock))
738		mtx_destroy(&sc->mfi_io_lock);
739
740	return;
741}
742
743static void
744mfi_startup(void *arg)
745{
746	struct mfi_softc *sc;
747
748	sc = (struct mfi_softc *)arg;
749
750	config_intrhook_disestablish(&sc->mfi_ich);
751
752	mfi_enable_intr(sc);
753	mtx_lock(&sc->mfi_io_lock);
754	mfi_ldprobe(sc);
755	mtx_unlock(&sc->mfi_io_lock);
756}
757
758static void
759mfi_intr(void *arg)
760{
761	struct mfi_softc *sc;
762	struct mfi_command *cm;
763	uint32_t status, pi, ci, context;
764
765	sc = (struct mfi_softc *)arg;
766
767	status = MFI_READ4(sc, MFI_OSTS);
768	if ((status & MFI_OSTS_INTR_VALID) == 0)
769		return;
770
771	MFI_WRITE4(sc, MFI_OSTS, status);
772
773	pi = sc->mfi_comms->hw_pi;
774	ci = sc->mfi_comms->hw_ci;
775	mtx_lock(&sc->mfi_io_lock);
776	while (ci != pi) {
777		context = sc->mfi_comms->hw_reply_q[ci];
778		cm = &sc->mfi_commands[context];
779		mfi_remove_busy(cm);
780		mfi_complete(sc, cm);
781		if (++ci == (sc->mfi_max_fw_cmds + 1)) {
782			ci = 0;
783		}
784	}
785
786	sc->mfi_comms->hw_ci = ci;
787
788	/* Give defered I/O a chance to run */
789	if (sc->mfi_flags & MFI_FLAGS_QFRZN)
790		sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
791	mfi_startio(sc);
792	mtx_unlock(&sc->mfi_io_lock);
793
794	return;
795}
796
797int
798mfi_shutdown(struct mfi_softc *sc)
799{
800	struct mfi_dcmd_frame *dcmd;
801	struct mfi_command *cm;
802	int error;
803
804	mtx_lock(&sc->mfi_io_lock);
805	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
806	if (error) {
807		mtx_unlock(&sc->mfi_io_lock);
808		return (error);
809	}
810
811	if (sc->mfi_aen_cm != NULL)
812		mfi_abort(sc, sc->mfi_aen_cm);
813
814	dcmd = &cm->cm_frame->dcmd;
815	dcmd->header.flags = MFI_FRAME_DIR_NONE;
816	cm->cm_flags = MFI_CMD_POLLED;
817	cm->cm_data = NULL;
818
819	if ((error = mfi_mapcmd(sc, cm)) != 0) {
820		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
821	}
822
823	mfi_release_command(cm);
824	mtx_unlock(&sc->mfi_io_lock);
825	return (error);
826}
827
828static void
829mfi_enable_intr(struct mfi_softc *sc)
830{
831
832	MFI_WRITE4(sc, MFI_OMSK, 0x01);
833}
834
835static void
836mfi_ldprobe(struct mfi_softc *sc)
837{
838	struct mfi_frame_header *hdr;
839	struct mfi_command *cm = NULL;
840	struct mfi_ld_list *list = NULL;
841	int error, i;
842
843	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
844
845	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
846	    (void **)&list, sizeof(*list));
847	if (error)
848		goto out;
849
850	cm->cm_flags = MFI_CMD_DATAIN;
851	if (mfi_wait_command(sc, cm) != 0) {
852		device_printf(sc->mfi_dev, "Failed to get device listing\n");
853		goto out;
854	}
855
856	hdr = &cm->cm_frame->header;
857	if (hdr->cmd_status != MFI_STAT_OK) {
858		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
859		    hdr->cmd_status);
860		goto out;
861	}
862
863	for (i = 0; i < list->ld_count; i++)
864		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
865out:
866	if (list)
867		free(list, M_MFIBUF);
868	if (cm)
869		mfi_release_command(cm);
870
871	return;
872}
873
874static void
875mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
876{
877	switch (detail->arg_type) {
878	case MR_EVT_ARGS_NONE:
879		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - %s\n",
880		    detail->seq,
881		    detail->time,
882		    detail->class.members.locale,
883		    detail->class.members.class,
884		    detail->description
885		    );
886		break;
887	case MR_EVT_ARGS_CDB_SENSE:
888		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) CDB %*D"
889		    "Sense %*D\n: %s\n",
890		    detail->seq,
891		    detail->time,
892		    detail->class.members.locale,
893		    detail->class.members.class,
894		    detail->args.cdb_sense.pd.device_id,
895		    detail->args.cdb_sense.pd.enclosure_index,
896		    detail->args.cdb_sense.pd.slot_number,
897		    detail->args.cdb_sense.cdb_len,
898		    detail->args.cdb_sense.cdb,
899		    ":",
900		    detail->args.cdb_sense.sense_len,
901		    detail->args.cdb_sense.sense,
902		    ":",
903		    detail->description
904		    );
905		break;
906	case MR_EVT_ARGS_LD:
907		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
908		    "event: %s\n",
909		    detail->seq,
910		    detail->time,
911		    detail->class.members.locale,
912		    detail->class.members.class,
913		    detail->args.ld.ld_index,
914		    detail->args.ld.target_id,
915		    detail->description
916		    );
917		break;
918	case MR_EVT_ARGS_LD_COUNT:
919		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
920		    "count %lld: %s\n",
921		    detail->seq,
922		    detail->time,
923		    detail->class.members.locale,
924		    detail->class.members.class,
925		    detail->args.ld_count.ld.ld_index,
926		    detail->args.ld_count.ld.target_id,
927		    (long long)detail->args.ld_count.count,
928		    detail->description
929		    );
930		break;
931	case MR_EVT_ARGS_LD_LBA:
932		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
933		    "lba %lld: %s\n",
934		    detail->seq,
935		    detail->time,
936		    detail->class.members.locale,
937		    detail->class.members.class,
938		    detail->args.ld_lba.ld.ld_index,
939		    detail->args.ld_lba.ld.target_id,
940		    (long long)detail->args.ld_lba.lba,
941		    detail->description
942		    );
943		break;
944	case MR_EVT_ARGS_LD_OWNER:
945		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
946		    "owner changed: prior %d, new %d: %s\n",
947		    detail->seq,
948		    detail->time,
949		    detail->class.members.locale,
950		    detail->class.members.class,
951		    detail->args.ld_owner.ld.ld_index,
952		    detail->args.ld_owner.ld.target_id,
953		    detail->args.ld_owner.pre_owner,
954		    detail->args.ld_owner.new_owner,
955		    detail->description
956		    );
957		break;
958	case MR_EVT_ARGS_LD_LBA_PD_LBA:
959		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
960		    "lba %lld, physical drive PD %02d(e%d/s%d) lba %lld: %s\n",
961		    detail->seq,
962		    detail->time,
963		    detail->class.members.locale,
964		    detail->class.members.class,
965		    detail->args.ld_lba_pd_lba.ld.ld_index,
966		    detail->args.ld_lba_pd_lba.ld.target_id,
967		    (long long)detail->args.ld_lba_pd_lba.ld_lba,
968		    detail->args.ld_lba_pd_lba.pd.device_id,
969		    detail->args.ld_lba_pd_lba.pd.enclosure_index,
970		    detail->args.ld_lba_pd_lba.pd.slot_number,
971		    (long long)detail->args.ld_lba_pd_lba.pd_lba,
972		    detail->description
973		    );
974		break;
975	case MR_EVT_ARGS_LD_PROG:
976		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
977		    "progress %d%% in %ds: %s\n",
978		    detail->seq,
979		    detail->time,
980		    detail->class.members.locale,
981		    detail->class.members.class,
982		    detail->args.ld_prog.ld.ld_index,
983		    detail->args.ld_prog.ld.target_id,
984		    detail->args.ld_prog.prog.progress/655,
985		    detail->args.ld_prog.prog.elapsed_seconds,
986		    detail->description
987		    );
988		break;
989	case MR_EVT_ARGS_LD_STATE:
990		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
991		    "state prior %d new %d: %s\n",
992		    detail->seq,
993		    detail->time,
994		    detail->class.members.locale,
995		    detail->class.members.class,
996		    detail->args.ld_state.ld.ld_index,
997		    detail->args.ld_state.ld.target_id,
998		    detail->args.ld_state.prev_state,
999		    detail->args.ld_state.new_state,
1000		    detail->description
1001		    );
1002		break;
1003	case MR_EVT_ARGS_LD_STRIP:
1004		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1005		    "strip %lld: %s\n",
1006		    detail->seq,
1007		    detail->time,
1008		    detail->class.members.locale,
1009		    detail->class.members.class,
1010		    detail->args.ld_strip.ld.ld_index,
1011		    detail->args.ld_strip.ld.target_id,
1012		    (long long)detail->args.ld_strip.strip,
1013		    detail->description
1014		    );
1015		break;
1016	case MR_EVT_ARGS_PD:
1017		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1018		    "event: %s\n",
1019		    detail->seq,
1020		    detail->time,
1021		    detail->class.members.locale,
1022		    detail->class.members.class,
1023		    detail->args.pd.device_id,
1024		    detail->args.pd.enclosure_index,
1025		    detail->args.pd.slot_number,
1026		    detail->description
1027		    );
1028		break;
1029	case MR_EVT_ARGS_PD_ERR:
1030		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1031		    "err %d: %s\n",
1032		    detail->seq,
1033		    detail->time,
1034		    detail->class.members.locale,
1035		    detail->class.members.class,
1036		    detail->args.pd_err.pd.device_id,
1037		    detail->args.pd_err.pd.enclosure_index,
1038		    detail->args.pd_err.pd.slot_number,
1039		    detail->args.pd_err.err,
1040		    detail->description
1041		    );
1042		break;
1043	case MR_EVT_ARGS_PD_LBA:
1044		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1045		    "lba %lld: %s\n",
1046		    detail->seq,
1047		    detail->time,
1048		    detail->class.members.locale,
1049		    detail->class.members.class,
1050		    detail->args.pd_lba.pd.device_id,
1051		    detail->args.pd_lba.pd.enclosure_index,
1052		    detail->args.pd_lba.pd.slot_number,
1053		    (long long)detail->args.pd_lba.lba,
1054		    detail->description
1055		    );
1056		break;
1057	case MR_EVT_ARGS_PD_LBA_LD:
1058		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1059		    "lba %lld VD %02d/%d: %s\n",
1060		    detail->seq,
1061		    detail->time,
1062		    detail->class.members.locale,
1063		    detail->class.members.class,
1064		    detail->args.pd_lba_ld.pd.device_id,
1065		    detail->args.pd_lba_ld.pd.enclosure_index,
1066		    detail->args.pd_lba_ld.pd.slot_number,
1067		    (long long)detail->args.pd_lba.lba,
1068		    detail->args.pd_lba_ld.ld.ld_index,
1069		    detail->args.pd_lba_ld.ld.target_id,
1070		    detail->description
1071		    );
1072		break;
1073	case MR_EVT_ARGS_PD_PROG:
1074		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1075		    "progress %d%% seconds %ds: %s\n",
1076		    detail->seq,
1077		    detail->time,
1078		    detail->class.members.locale,
1079		    detail->class.members.class,
1080		    detail->args.pd_prog.pd.device_id,
1081		    detail->args.pd_prog.pd.enclosure_index,
1082		    detail->args.pd_prog.pd.slot_number,
1083		    detail->args.pd_prog.prog.progress/655,
1084		    detail->args.pd_prog.prog.elapsed_seconds,
1085		    detail->description
1086		    );
1087		break;
1088	case MR_EVT_ARGS_PD_STATE:
1089		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1090		    "state prior %d new %d: %s\n",
1091		    detail->seq,
1092		    detail->time,
1093		    detail->class.members.locale,
1094		    detail->class.members.class,
1095		    detail->args.pd_prog.pd.device_id,
1096		    detail->args.pd_prog.pd.enclosure_index,
1097		    detail->args.pd_prog.pd.slot_number,
1098		    detail->args.pd_state.prev_state,
1099		    detail->args.pd_state.new_state,
1100		    detail->description
1101		    );
1102		break;
1103	case MR_EVT_ARGS_PCI:
1104		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PCI 0x04%x 0x04%x "
1105		    "0x04%x 0x04%x: %s\n",
1106		    detail->seq,
1107		    detail->time,
1108		    detail->class.members.locale,
1109		    detail->class.members.class,
1110		    detail->args.pci.venderId,
1111		    detail->args.pci.deviceId,
1112		    detail->args.pci.subVenderId,
1113		    detail->args.pci.subDeviceId,
1114		    detail->description
1115		    );
1116		break;
1117	case MR_EVT_ARGS_RATE:
1118		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Rebuild rate %d: %s\n",
1119		    detail->seq,
1120		    detail->time,
1121		    detail->class.members.locale,
1122		    detail->class.members.class,
1123		    detail->args.rate,
1124		    detail->description
1125		    );
1126		break;
1127	case MR_EVT_ARGS_TIME:
1128		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Adapter ticks %d "
1129		    "elapsed %ds: %s\n",
1130		    detail->seq,
1131		    detail->time,
1132		    detail->class.members.locale,
1133		    detail->class.members.class,
1134		    detail->args.time.rtc,
1135		    detail->args.time.elapsedSeconds,
1136		    detail->description
1137		    );
1138		break;
1139	case MR_EVT_ARGS_ECC:
1140		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Adapter ECC %x,%x: %s: %s\n",
1141		    detail->seq,
1142		    detail->time,
1143		    detail->class.members.locale,
1144		    detail->class.members.class,
1145		    detail->args.ecc.ecar,
1146		    detail->args.ecc.elog,
1147		    detail->args.ecc.str,
1148		    detail->description
1149		    );
1150		break;
1151	default:
1152		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Type %d: %s\n",
1153		    detail->seq,
1154		    detail->time,
1155		    detail->class.members.locale,
1156		    detail->class.members.class,
1157		    detail->arg_type, detail->description
1158		    );
1159	}
1160}
1161
1162static int
1163mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1164{
1165	struct mfi_command *cm;
1166	struct mfi_dcmd_frame *dcmd;
1167	union mfi_evt current_aen, prior_aen;
1168	struct mfi_evt_detail *ed = NULL;
1169	int error = 0;
1170
1171	current_aen.word = locale;
1172	if (sc->mfi_aen_cm != NULL) {
1173		prior_aen.word =
1174		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1175		if (prior_aen.members.class <= current_aen.members.class &&
1176		    !((prior_aen.members.locale & current_aen.members.locale)
1177		    ^current_aen.members.locale)) {
1178			return (0);
1179		} else {
1180			prior_aen.members.locale |= current_aen.members.locale;
1181			if (prior_aen.members.class
1182			    < current_aen.members.class)
1183				current_aen.members.class =
1184				    prior_aen.members.class;
1185			mfi_abort(sc, sc->mfi_aen_cm);
1186		}
1187	}
1188
1189	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1190	    (void **)&ed, sizeof(*ed));
1191	if (error) {
1192		goto out;
1193	}
1194
1195	dcmd = &cm->cm_frame->dcmd;
1196	((uint32_t *)&dcmd->mbox)[0] = seq;
1197	((uint32_t *)&dcmd->mbox)[1] = locale;
1198	cm->cm_flags = MFI_CMD_DATAIN;
1199	cm->cm_complete = mfi_aen_complete;
1200
1201	sc->mfi_aen_cm = cm;
1202
1203	mfi_enqueue_ready(cm);
1204	mfi_startio(sc);
1205
1206out:
1207	return (error);
1208}
1209
1210static void
1211mfi_aen_complete(struct mfi_command *cm)
1212{
1213	struct mfi_frame_header *hdr;
1214	struct mfi_softc *sc;
1215	struct mfi_evt_detail *detail;
1216	struct mfi_aen *mfi_aen_entry, *tmp;
1217	int seq = 0, aborted = 0;
1218
1219	sc = cm->cm_sc;
1220	hdr = &cm->cm_frame->header;
1221
1222	if (sc->mfi_aen_cm == NULL)
1223		return;
1224
1225	if (sc->mfi_aen_cm->cm_aen_abort || hdr->cmd_status == 0xff) {
1226		sc->mfi_aen_cm->cm_aen_abort = 0;
1227		aborted = 1;
1228	} else {
1229		sc->mfi_aen_triggered = 1;
1230		if (sc->mfi_poll_waiting) {
1231			sc->mfi_poll_waiting = 0;
1232			selwakeup(&sc->mfi_select);
1233		}
1234		detail = cm->cm_data;
1235		/*
1236		 * XXX If this function is too expensive or is recursive, then
1237		 * events should be put onto a queue and processed later.
1238		 */
1239		mtx_unlock(&sc->mfi_io_lock);
1240		mfi_decode_evt(sc, detail);
1241		mtx_lock(&sc->mfi_io_lock);
1242		seq = detail->seq + 1;
1243		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
1244			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1245			    aen_link);
1246			PROC_LOCK(mfi_aen_entry->p);
1247			psignal(mfi_aen_entry->p, SIGIO);
1248			PROC_UNLOCK(mfi_aen_entry->p);
1249			free(mfi_aen_entry, M_MFIBUF);
1250		}
1251	}
1252
1253	free(cm->cm_data, M_MFIBUF);
1254	sc->mfi_aen_cm = NULL;
1255	wakeup(&sc->mfi_aen_cm);
1256	mfi_release_command(cm);
1257
1258	/* set it up again so the driver can catch more events */
1259	if (!aborted) {
1260		mfi_aen_setup(sc, seq);
1261	}
1262}
1263
1264/* Only do one event for now so we can easily iterate through them */
1265#define MAX_EVENTS 1
1266static int
1267mfi_get_entry(struct mfi_softc *sc, int seq)
1268{
1269	struct mfi_command *cm;
1270	struct mfi_dcmd_frame *dcmd;
1271	struct mfi_evt_list *el;
1272	int error;
1273	int i;
1274	int size;
1275
1276	if ((cm = mfi_dequeue_free(sc)) == NULL) {
1277		return (EBUSY);
1278	}
1279
1280	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1281		* (MAX_EVENTS - 1);
1282	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1283	if (el == NULL) {
1284		mfi_release_command(cm);
1285		return (ENOMEM);
1286	}
1287
1288	dcmd = &cm->cm_frame->dcmd;
1289	bzero(dcmd->mbox, MFI_MBOX_SIZE);
1290	dcmd->header.cmd = MFI_CMD_DCMD;
1291	dcmd->header.timeout = 0;
1292	dcmd->header.data_len = size;
1293	dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1294	((uint32_t *)&dcmd->mbox)[0] = seq;
1295	((uint32_t *)&dcmd->mbox)[1] = MFI_EVT_LOCALE_ALL;
1296	cm->cm_sg = &dcmd->sgl;
1297	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1298	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1299	cm->cm_data = el;
1300	cm->cm_len = size;
1301
1302	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1303		device_printf(sc->mfi_dev, "Failed to get controller entry\n");
1304		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1305		    MFI_SECTOR_LEN;
1306		free(el, M_MFIBUF);
1307		mfi_release_command(cm);
1308		return (0);
1309	}
1310
1311	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1312	    BUS_DMASYNC_POSTREAD);
1313	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1314
1315	if (dcmd->header.cmd_status != MFI_STAT_NOT_FOUND) {
1316		for (i = 0; i < el->count; i++) {
1317			if (seq + i == el->event[i].seq)
1318				mfi_decode_evt(sc, &el->event[i]);
1319		}
1320	}
1321
1322	free(cm->cm_data, M_MFIBUF);
1323	mfi_release_command(cm);
1324	return (0);
1325}
1326
1327static int
1328mfi_add_ld(struct mfi_softc *sc, int id)
1329{
1330	struct mfi_command *cm;
1331	struct mfi_dcmd_frame *dcmd = NULL;
1332	struct mfi_ld_info *ld_info = NULL;
1333	int error;
1334
1335	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1336
1337	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1338	    (void **)&ld_info, sizeof(*ld_info));
1339	if (error) {
1340		device_printf(sc->mfi_dev,
1341		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1342		if (ld_info)
1343			free(ld_info, M_MFIBUF);
1344		return (error);
1345	}
1346	cm->cm_flags = MFI_CMD_DATAIN;
1347	dcmd = &cm->cm_frame->dcmd;
1348	dcmd->mbox[0] = id;
1349	if (mfi_wait_command(sc, cm) != 0) {
1350		device_printf(sc->mfi_dev,
1351		    "Failed to get logical drive: %d\n", id);
1352		free(ld_info, M_MFIBUF);
1353		return (0);
1354	}
1355
1356	mfi_add_ld_complete(cm);
1357	return (0);
1358}
1359
1360static void
1361mfi_add_ld_complete(struct mfi_command *cm)
1362{
1363	struct mfi_frame_header *hdr;
1364	struct mfi_ld_info *ld_info;
1365	struct mfi_softc *sc;
1366	struct mfi_ld *ld;
1367	device_t child;
1368
1369	sc = cm->cm_sc;
1370	hdr = &cm->cm_frame->header;
1371	ld_info = cm->cm_private;
1372
1373	if (hdr->cmd_status != MFI_STAT_OK) {
1374		free(ld_info, M_MFIBUF);
1375		mfi_release_command(cm);
1376		return;
1377	}
1378	mfi_release_command(cm);
1379
1380	ld = malloc(sizeof(struct mfi_ld), M_MFIBUF, M_NOWAIT|M_ZERO);
1381	if (ld == NULL) {
1382		device_printf(sc->mfi_dev, "Cannot allocate ld\n");
1383		free(ld_info, M_MFIBUF);
1384		return;
1385	}
1386
1387	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1388		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1389		free(ld, M_MFIBUF);
1390		free(ld_info, M_MFIBUF);
1391		return;
1392	}
1393
1394	ld->ld_id = ld_info->ld_config.properties.ld.v.target_id;
1395	ld->ld_disk = child;
1396	ld->ld_info = ld_info;
1397
1398	device_set_ivars(child, ld);
1399	device_set_desc(child, "MFI Logical Disk");
1400	mtx_unlock(&sc->mfi_io_lock);
1401	mtx_lock(&Giant);
1402	bus_generic_attach(sc->mfi_dev);
1403	mtx_unlock(&Giant);
1404	mtx_lock(&sc->mfi_io_lock);
1405}
1406
1407static struct mfi_command *
1408mfi_bio_command(struct mfi_softc *sc)
1409{
1410	struct mfi_io_frame *io;
1411	struct mfi_command *cm;
1412	struct bio *bio;
1413	int flags, blkcount;
1414
1415	if ((cm = mfi_dequeue_free(sc)) == NULL)
1416		return (NULL);
1417
1418	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1419		mfi_release_command(cm);
1420		return (NULL);
1421	}
1422
1423	io = &cm->cm_frame->io;
1424	switch (bio->bio_cmd & 0x03) {
1425	case BIO_READ:
1426		io->header.cmd = MFI_CMD_LD_READ;
1427		flags = MFI_CMD_DATAIN;
1428		break;
1429	case BIO_WRITE:
1430		io->header.cmd = MFI_CMD_LD_WRITE;
1431		flags = MFI_CMD_DATAOUT;
1432		break;
1433	default:
1434		panic("Invalid bio command");
1435	}
1436
1437	/* Cheat with the sector length to avoid a non-constant division */
1438	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1439	io->header.target_id = (uintptr_t)bio->bio_driver1;
1440	io->header.timeout = 0;
1441	io->header.flags = 0;
1442	io->header.sense_len = MFI_SENSE_LEN;
1443	io->header.data_len = blkcount;
1444	io->sense_addr_lo = cm->cm_sense_busaddr;
1445	io->sense_addr_hi = 0;
1446	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
1447	io->lba_lo = bio->bio_pblkno & 0xffffffff;
1448	cm->cm_complete = mfi_bio_complete;
1449	cm->cm_private = bio;
1450	cm->cm_data = bio->bio_data;
1451	cm->cm_len = bio->bio_bcount;
1452	cm->cm_sg = &io->sgl;
1453	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1454	cm->cm_flags = flags;
1455	return (cm);
1456}
1457
1458static void
1459mfi_bio_complete(struct mfi_command *cm)
1460{
1461	struct bio *bio;
1462	struct mfi_frame_header *hdr;
1463	struct mfi_softc *sc;
1464
1465	bio = cm->cm_private;
1466	hdr = &cm->cm_frame->header;
1467	sc = cm->cm_sc;
1468
1469	if ((hdr->cmd_status != 0) || (hdr->scsi_status != 0)) {
1470		bio->bio_flags |= BIO_ERROR;
1471		bio->bio_error = EIO;
1472		device_printf(sc->mfi_dev, "I/O error, status= %d "
1473		    "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1474		mfi_print_sense(cm->cm_sc, cm->cm_sense);
1475	}
1476
1477	mfi_release_command(cm);
1478	mfi_disk_complete(bio);
1479}
1480
1481void
1482mfi_startio(struct mfi_softc *sc)
1483{
1484	struct mfi_command *cm;
1485
1486	for (;;) {
1487		/* Don't bother if we're short on resources */
1488		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1489			break;
1490
1491		/* Try a command that has already been prepared */
1492		cm = mfi_dequeue_ready(sc);
1493
1494		/* Nope, so look for work on the bioq */
1495		if (cm == NULL)
1496			cm = mfi_bio_command(sc);
1497
1498		/* No work available, so exit */
1499		if (cm == NULL)
1500			break;
1501
1502		/* Send the command to the controller */
1503		if (mfi_mapcmd(sc, cm) != 0) {
1504			mfi_requeue_ready(cm);
1505			break;
1506		}
1507	}
1508}
1509
1510static int
1511mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1512{
1513	int error, polled;
1514
1515	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1516
1517	if (cm->cm_data != NULL) {
1518		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1519		error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1520		    cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1521		if (error == EINPROGRESS) {
1522			sc->mfi_flags |= MFI_FLAGS_QFRZN;
1523			return (0);
1524		}
1525	} else {
1526		error = mfi_send_frame(sc, cm);
1527	}
1528
1529	return (error);
1530}
1531
1532static void
1533mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1534{
1535	struct mfi_frame_header *hdr;
1536	struct mfi_command *cm;
1537	union mfi_sgl *sgl;
1538	struct mfi_softc *sc;
1539	int i, dir;
1540
1541	if (error)
1542		return;
1543
1544	cm = (struct mfi_command *)arg;
1545	sc = cm->cm_sc;
1546	hdr = &cm->cm_frame->header;
1547	sgl = cm->cm_sg;
1548
1549	if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1550		for (i = 0; i < nsegs; i++) {
1551			sgl->sg32[i].addr = segs[i].ds_addr;
1552			sgl->sg32[i].len = segs[i].ds_len;
1553		}
1554	} else {
1555		for (i = 0; i < nsegs; i++) {
1556			sgl->sg64[i].addr = segs[i].ds_addr;
1557			sgl->sg64[i].len = segs[i].ds_len;
1558		}
1559		hdr->flags |= MFI_FRAME_SGL64;
1560	}
1561	hdr->sg_count = nsegs;
1562
1563	dir = 0;
1564	if (cm->cm_flags & MFI_CMD_DATAIN) {
1565		dir |= BUS_DMASYNC_PREREAD;
1566		hdr->flags |= MFI_FRAME_DIR_READ;
1567	}
1568	if (cm->cm_flags & MFI_CMD_DATAOUT) {
1569		dir |= BUS_DMASYNC_PREWRITE;
1570		hdr->flags |= MFI_FRAME_DIR_WRITE;
1571	}
1572	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1573	cm->cm_flags |= MFI_CMD_MAPPED;
1574
1575	/*
1576	 * Instead of calculating the total number of frames in the
1577	 * compound frame, it's already assumed that there will be at
1578	 * least 1 frame, so don't compensate for the modulo of the
1579	 * following division.
1580	 */
1581	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
1582	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1583
1584	mfi_send_frame(sc, cm);
1585
1586	return;
1587}
1588
1589static int
1590mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1591{
1592	struct mfi_frame_header *hdr;
1593	int tm = MFI_POLL_TIMEOUT_SECS * 1000;
1594
1595	hdr = &cm->cm_frame->header;
1596
1597	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1598		cm->cm_timestamp = time_uptime;
1599		mfi_enqueue_busy(cm);
1600	} else {
1601		hdr->cmd_status = 0xff;
1602		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1603	}
1604
1605	/*
1606	 * The bus address of the command is aligned on a 64 byte boundary,
1607	 * leaving the least 6 bits as zero.  For whatever reason, the
1608	 * hardware wants the address shifted right by three, leaving just
1609	 * 3 zero bits.  These three bits are then used as a prefetching
1610	 * hint for the hardware to predict how many frames need to be
1611	 * fetched across the bus.  If a command has more than 8 frames
1612	 * then the 3 bits are set to 0x7 and the firmware uses other
1613	 * information in the command to determine the total amount to fetch.
1614	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
1615	 * is enough for both 32bit and 64bit systems.
1616	 */
1617	if (cm->cm_extra_frames > 7)
1618		cm->cm_extra_frames = 7;
1619
1620	MFI_WRITE4(sc, MFI_IQP, (cm->cm_frame_busaddr >> 3) |
1621	    cm->cm_extra_frames);
1622
1623	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1624		return (0);
1625
1626	/* This is a polled command, so busy-wait for it to complete. */
1627	while (hdr->cmd_status == 0xff) {
1628		DELAY(1000);
1629		tm -= 1;
1630		if (tm <= 0)
1631			break;
1632	}
1633
1634	if (hdr->cmd_status == 0xff) {
1635		device_printf(sc->mfi_dev, "Frame %p timed out "
1636			      "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1637		return (ETIMEDOUT);
1638	}
1639
1640	return (0);
1641}
1642
1643static void
1644mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
1645{
1646	int dir;
1647
1648	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
1649		dir = 0;
1650		if (cm->cm_flags & MFI_CMD_DATAIN)
1651			dir |= BUS_DMASYNC_POSTREAD;
1652		if (cm->cm_flags & MFI_CMD_DATAOUT)
1653			dir |= BUS_DMASYNC_POSTWRITE;
1654
1655		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1656		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1657		cm->cm_flags &= ~MFI_CMD_MAPPED;
1658	}
1659
1660	if (cm->cm_complete != NULL)
1661		cm->cm_complete(cm);
1662	else
1663		wakeup(cm);
1664}
1665
1666static int
1667mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
1668{
1669	struct mfi_command *cm;
1670	struct mfi_abort_frame *abort;
1671	int i = 0;
1672
1673	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1674
1675	if ((cm = mfi_dequeue_free(sc)) == NULL) {
1676		return (EBUSY);
1677	}
1678
1679	abort = &cm->cm_frame->abort;
1680	abort->header.cmd = MFI_CMD_ABORT;
1681	abort->header.flags = 0;
1682	abort->abort_context = cm_abort->cm_frame->header.context;
1683	abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
1684	abort->abort_mfi_addr_hi = 0;
1685	cm->cm_data = NULL;
1686	cm->cm_flags = MFI_CMD_POLLED;
1687
1688	sc->mfi_aen_cm->cm_aen_abort = 1;
1689	mfi_mapcmd(sc, cm);
1690	mfi_release_command(cm);
1691
1692	while (i < 5 && sc->mfi_aen_cm != NULL) {
1693		msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 5 * hz);
1694		i++;
1695	}
1696
1697	return (0);
1698}
1699
1700int
1701mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
1702{
1703	struct mfi_command *cm;
1704	struct mfi_io_frame *io;
1705	int error;
1706
1707	if ((cm = mfi_dequeue_free(sc)) == NULL)
1708		return (EBUSY);
1709
1710	io = &cm->cm_frame->io;
1711	io->header.cmd = MFI_CMD_LD_WRITE;
1712	io->header.target_id = id;
1713	io->header.timeout = 0;
1714	io->header.flags = 0;
1715	io->header.sense_len = MFI_SENSE_LEN;
1716	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1717	io->sense_addr_lo = cm->cm_sense_busaddr;
1718	io->sense_addr_hi = 0;
1719	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
1720	io->lba_lo = lba & 0xffffffff;
1721	cm->cm_data = virt;
1722	cm->cm_len = len;
1723	cm->cm_sg = &io->sgl;
1724	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1725	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
1726
1727	error = mfi_mapcmd(sc, cm);
1728	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1729	    BUS_DMASYNC_POSTWRITE);
1730	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1731	mfi_release_command(cm);
1732
1733	return (error);
1734}
1735
1736static int
1737mfi_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1738{
1739	struct mfi_softc *sc;
1740
1741	sc = dev->si_drv1;
1742
1743	mtx_lock(&sc->mfi_io_lock);
1744	sc->mfi_flags |= MFI_FLAGS_OPEN;
1745	mtx_unlock(&sc->mfi_io_lock);
1746
1747	return (0);
1748}
1749
1750static int
1751mfi_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1752{
1753	struct mfi_softc *sc;
1754	struct mfi_aen *mfi_aen_entry, *tmp;
1755
1756	sc = dev->si_drv1;
1757
1758	mtx_lock(&sc->mfi_io_lock);
1759	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
1760
1761	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
1762		if (mfi_aen_entry->p == curproc) {
1763			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1764			    aen_link);
1765			free(mfi_aen_entry, M_MFIBUF);
1766		}
1767	}
1768	mtx_unlock(&sc->mfi_io_lock);
1769	return (0);
1770}
1771
1772static int
1773mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1774{
1775	struct mfi_softc *sc;
1776	union mfi_statrequest *ms;
1777	struct mfi_ioc_packet *ioc;
1778	struct mfi_ioc_aen *aen;
1779	struct mfi_command *cm = NULL;
1780	uint32_t context;
1781	uint8_t *sense_ptr;
1782	uint8_t *data = NULL, *temp;
1783	int i;
1784	int error;
1785
1786	sc = dev->si_drv1;
1787	error = 0;
1788
1789	switch (cmd) {
1790	case MFIIO_STATS:
1791		ms = (union mfi_statrequest *)arg;
1792		switch (ms->ms_item) {
1793		case MFIQ_FREE:
1794		case MFIQ_BIO:
1795		case MFIQ_READY:
1796		case MFIQ_BUSY:
1797			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
1798			    sizeof(struct mfi_qstat));
1799			break;
1800		default:
1801			error = ENOIOCTL;
1802			break;
1803		}
1804		break;
1805	case MFI_CMD:
1806		ioc = (struct mfi_ioc_packet *)arg;
1807
1808		mtx_lock(&sc->mfi_io_lock);
1809		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1810			mtx_unlock(&sc->mfi_io_lock);
1811			return (EBUSY);
1812		}
1813		mtx_unlock(&sc->mfi_io_lock);
1814
1815		/*
1816		 * save off original context since copying from user
1817		 * will clobber some data
1818		 */
1819		context = cm->cm_frame->header.context;
1820
1821		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
1822		      ioc->mfi_sgl_off); /* Linux can do 2 frames ? */
1823		cm->cm_total_frame_size = ioc->mfi_sgl_off;
1824		cm->cm_sg =
1825		    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
1826		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT
1827			| MFI_CMD_POLLED;
1828		cm->cm_len = cm->cm_frame->header.data_len;
1829		cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
1830					    M_WAITOK | M_ZERO);
1831		if (cm->cm_data == NULL) {
1832			device_printf(sc->mfi_dev, "Malloc failed\n");
1833			goto out;
1834		}
1835
1836		/* restore header context */
1837		cm->cm_frame->header.context = context;
1838
1839		temp = data;
1840		for (i = 0; i < ioc->mfi_sge_count; i++) {
1841			error = copyin(ioc->mfi_sgl[i].iov_base,
1842			       temp,
1843			       ioc->mfi_sgl[i].iov_len);
1844			if (error != 0) {
1845				device_printf(sc->mfi_dev,
1846				    "Copy in failed\n");
1847				goto out;
1848			}
1849			temp = &temp[ioc->mfi_sgl[i].iov_len];
1850		}
1851
1852		mtx_lock(&sc->mfi_io_lock);
1853		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1854			device_printf(sc->mfi_dev,
1855			    "Controller polled failed\n");
1856			mtx_unlock(&sc->mfi_io_lock);
1857			goto out;
1858		}
1859
1860		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1861				BUS_DMASYNC_POSTREAD);
1862		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1863		mtx_unlock(&sc->mfi_io_lock);
1864
1865		temp = data;
1866		for (i = 0; i < ioc->mfi_sge_count; i++) {
1867			error = copyout(temp,
1868				ioc->mfi_sgl[i].iov_base,
1869				ioc->mfi_sgl[i].iov_len);
1870			if (error != 0) {
1871				device_printf(sc->mfi_dev,
1872				    "Copy out failed\n");
1873				goto out;
1874			}
1875			temp = &temp[ioc->mfi_sgl[i].iov_len];
1876		}
1877
1878		if (ioc->mfi_sense_len) {
1879			/* copy out sense */
1880			sense_ptr = &((struct mfi_ioc_packet*)arg)
1881			    ->mfi_frame.raw[0];
1882			error = copyout(cm->cm_sense, sense_ptr,
1883			    ioc->mfi_sense_len);
1884			if (error != 0) {
1885				device_printf(sc->mfi_dev,
1886				    "Copy out failed\n");
1887				goto out;
1888			}
1889		}
1890
1891		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
1892		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
1893			switch (cm->cm_frame->dcmd.opcode) {
1894			case MFI_DCMD_CFG_CLEAR:
1895			case MFI_DCMD_CFG_ADD:
1896/*
1897				mfi_ldrescan(sc);
1898*/
1899				break;
1900			}
1901		}
1902out:
1903		if (data)
1904			free(data, M_MFIBUF);
1905		if (cm) {
1906			mtx_lock(&sc->mfi_io_lock);
1907			mfi_release_command(cm);
1908			mtx_unlock(&sc->mfi_io_lock);
1909		}
1910
1911		break;
1912	case MFI_SET_AEN:
1913		aen = (struct mfi_ioc_aen *)arg;
1914		error = mfi_aen_register(sc, aen->aen_seq_num,
1915		    aen->aen_class_locale);
1916
1917		break;
1918	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
1919		{
1920			devclass_t devclass;
1921			struct mfi_linux_ioc_packet l_ioc;
1922			int adapter;
1923
1924			devclass = devclass_find("mfi");
1925			if (devclass == NULL)
1926				return (ENOENT);
1927
1928			error = copyin(arg, &l_ioc, sizeof(l_ioc));
1929			if (error)
1930				return (error);
1931			adapter = l_ioc.lioc_adapter_no;
1932			sc = devclass_get_softc(devclass, adapter);
1933			if (sc == NULL)
1934				return (ENOENT);
1935			return (mfi_linux_ioctl_int(sc->mfi_cdev,
1936			    cmd, arg, flag, td));
1937			break;
1938		}
1939	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
1940		{
1941			devclass_t devclass;
1942			struct mfi_linux_ioc_aen l_aen;
1943			int adapter;
1944
1945			devclass = devclass_find("mfi");
1946			if (devclass == NULL)
1947				return (ENOENT);
1948
1949			error = copyin(arg, &l_aen, sizeof(l_aen));
1950			if (error)
1951				return (error);
1952			adapter = l_aen.laen_adapter_no;
1953			sc = devclass_get_softc(devclass, adapter);
1954			if (sc == NULL)
1955				return (ENOENT);
1956			return (mfi_linux_ioctl_int(sc->mfi_cdev,
1957			    cmd, arg, flag, td));
1958			break;
1959		}
1960	default:
1961		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
1962		error = ENOENT;
1963		break;
1964	}
1965
1966	return (error);
1967}
1968
1969static int
1970mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1971{
1972	struct mfi_softc *sc;
1973	struct mfi_linux_ioc_packet l_ioc;
1974	struct mfi_linux_ioc_aen l_aen;
1975	struct mfi_command *cm = NULL;
1976	struct mfi_aen *mfi_aen_entry;
1977	uint8_t *sense_ptr;
1978	uint32_t context;
1979	uint8_t *data = NULL, *temp;
1980	void *temp_convert;
1981	int i;
1982	int error;
1983
1984	sc = dev->si_drv1;
1985	error = 0;
1986	switch (cmd) {
1987	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
1988		error = copyin(arg, &l_ioc, sizeof(l_ioc));
1989		if (error != 0)
1990			return (error);
1991
1992		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
1993			return (EINVAL);
1994		}
1995
1996		mtx_lock(&sc->mfi_io_lock);
1997		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1998			mtx_unlock(&sc->mfi_io_lock);
1999			return (EBUSY);
2000		}
2001		mtx_unlock(&sc->mfi_io_lock);
2002
2003		/*
2004		 * save off original context since copying from user
2005		 * will clobber some data
2006		 */
2007		context = cm->cm_frame->header.context;
2008
2009		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
2010		      l_ioc.lioc_sgl_off); /* Linux can do 2 frames ? */
2011		cm->cm_total_frame_size = l_ioc.lioc_sgl_off;
2012		cm->cm_sg =
2013		    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
2014		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT
2015			| MFI_CMD_POLLED;
2016		cm->cm_len = cm->cm_frame->header.data_len;
2017		cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
2018					    M_WAITOK | M_ZERO);
2019
2020		/* restore header context */
2021		cm->cm_frame->header.context = context;
2022
2023		temp = data;
2024		for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2025			temp_convert =
2026			    (void *)(uintptr_t)l_ioc.lioc_sgl[i].iov_base;
2027			error = copyin(temp_convert,
2028			       temp,
2029			       l_ioc.lioc_sgl[i].iov_len);
2030			if (error != 0) {
2031				device_printf(sc->mfi_dev,
2032				    "Copy in failed\n");
2033				goto out;
2034			}
2035			temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2036		}
2037
2038		mtx_lock(&sc->mfi_io_lock);
2039		if ((error = mfi_mapcmd(sc, cm)) != 0) {
2040			device_printf(sc->mfi_dev,
2041			    "Controller polled failed\n");
2042			mtx_unlock(&sc->mfi_io_lock);
2043			goto out;
2044		}
2045
2046		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2047				BUS_DMASYNC_POSTREAD);
2048		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2049		mtx_unlock(&sc->mfi_io_lock);
2050
2051		temp = data;
2052		for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2053			temp_convert =
2054			    (void *)(uintptr_t)l_ioc.lioc_sgl[i].iov_base;
2055			error = copyout(temp,
2056				temp_convert,
2057				l_ioc.lioc_sgl[i].iov_len);
2058			if (error != 0) {
2059				device_printf(sc->mfi_dev,
2060				    "Copy out failed\n");
2061				goto out;
2062			}
2063			temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2064		}
2065
2066		if (l_ioc.lioc_sense_len) {
2067			/* copy out sense */
2068			sense_ptr = &((struct mfi_linux_ioc_packet*)arg)
2069			    ->lioc_frame.raw[0];
2070			error = copyout(cm->cm_sense, sense_ptr,
2071			    l_ioc.lioc_sense_len);
2072			if (error != 0) {
2073				device_printf(sc->mfi_dev,
2074				    "Copy out failed\n");
2075				goto out;
2076			}
2077		}
2078
2079		error = copyout(&cm->cm_frame->header.cmd_status,
2080			&((struct mfi_linux_ioc_packet*)arg)
2081			->lioc_frame.hdr.cmd_status,
2082			1);
2083		if (error != 0) {
2084			device_printf(sc->mfi_dev,
2085				      "Copy out failed\n");
2086			goto out;
2087		}
2088
2089		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2090			switch (cm->cm_frame->dcmd.opcode) {
2091			case MFI_DCMD_CFG_CLEAR:
2092			case MFI_DCMD_CFG_ADD:
2093				/* mfi_ldrescan(sc); */
2094				break;
2095			}
2096		}
2097out:
2098		if (data)
2099			free(data, M_MFIBUF);
2100		if (cm) {
2101			mtx_lock(&sc->mfi_io_lock);
2102			mfi_release_command(cm);
2103			mtx_unlock(&sc->mfi_io_lock);
2104		}
2105
2106		return (error);
2107	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
2108		error = copyin(arg, &l_aen, sizeof(l_aen));
2109		if (error != 0)
2110			return (error);
2111		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
2112		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
2113		    M_WAITOK);
2114		mtx_lock(&sc->mfi_io_lock);
2115		if (mfi_aen_entry != NULL) {
2116			mfi_aen_entry->p = curproc;
2117			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
2118			    aen_link);
2119		}
2120		error = mfi_aen_register(sc, l_aen.laen_seq_num,
2121		    l_aen.laen_class_locale);
2122
2123		if (error != 0) {
2124			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2125			    aen_link);
2126			free(mfi_aen_entry, M_MFIBUF);
2127		}
2128		mtx_unlock(&sc->mfi_io_lock);
2129
2130		return (error);
2131	default:
2132		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
2133		error = ENOENT;
2134		break;
2135	}
2136
2137	return (error);
2138}
2139
2140static int
2141mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
2142{
2143	struct mfi_softc *sc;
2144	int revents = 0;
2145
2146	sc = dev->si_drv1;
2147
2148	if (poll_events & (POLLIN | POLLRDNORM)) {
2149		if (sc->mfi_aen_triggered != 0) {
2150			revents |= poll_events & (POLLIN | POLLRDNORM);
2151			sc->mfi_aen_triggered = 0;
2152		}
2153		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
2154			revents |= POLLERR;
2155		}
2156	}
2157
2158	if (revents == 0) {
2159		if (poll_events & (POLLIN | POLLRDNORM)) {
2160			sc->mfi_poll_waiting = 1;
2161			selrecord(td, &sc->mfi_select);
2162		}
2163	}
2164
2165	return revents;
2166}
2167
2168
2169static void
2170mfi_dump_all(void)
2171{
2172	struct mfi_softc *sc;
2173	struct mfi_command *cm;
2174	devclass_t dc;
2175	time_t deadline;
2176	int timedout;
2177	int i;
2178
2179	dc = devclass_find("mfi");
2180	if (dc == NULL) {
2181		printf("No mfi dev class\n");
2182		return;
2183	}
2184
2185	for (i = 0; ; i++) {
2186		sc = devclass_get_softc(dc, i);
2187		if (sc == NULL)
2188			break;
2189		device_printf(sc->mfi_dev, "Dumping\n\n");
2190		timedout = 0;
2191		deadline = time_uptime - MFI_CMD_TIMEOUT;
2192		mtx_lock(&sc->mfi_io_lock);
2193		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
2194			if (cm->cm_timestamp < deadline) {
2195				device_printf(sc->mfi_dev,
2196				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
2197				    (int)(time_uptime - cm->cm_timestamp));
2198				MFI_PRINT_CMD(cm);
2199				timedout++;
2200			}
2201		}
2202
2203#if 0
2204		if (timedout)
2205			MFI_DUMP_CMDS(SC);
2206#endif
2207
2208		mtx_unlock(&sc->mfi_io_lock);
2209	}
2210
2211	return;
2212}
2213
2214static void
2215mfi_timeout(void *data)
2216{
2217	struct mfi_softc *sc = (struct mfi_softc *)data;
2218	struct mfi_command *cm;
2219	time_t deadline;
2220	int timedout = 0;
2221
2222	deadline = time_uptime - MFI_CMD_TIMEOUT;
2223	mtx_lock(&sc->mfi_io_lock);
2224	TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
2225		if (sc->mfi_aen_cm == cm)
2226			continue;
2227		if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
2228			device_printf(sc->mfi_dev,
2229			    "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
2230			    (int)(time_uptime - cm->cm_timestamp));
2231			MFI_PRINT_CMD(cm);
2232			MFI_VALIDATE_CMD(sc, cm);
2233			timedout++;
2234		}
2235	}
2236
2237#if 0
2238	if (timedout)
2239		MFI_DUMP_CMDS(SC);
2240#endif
2241
2242	mtx_unlock(&sc->mfi_io_lock);
2243
2244	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
2245	    mfi_timeout, sc);
2246
2247	if (0)
2248		mfi_dump_all();
2249	return;
2250}
2251