mfi.c revision 178968
1/*-
2 * Copyright (c) 2006 IronPort Systems
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26/*-
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 *    notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 *    notice, this list of conditions and the following disclaimer in the
38 *    documentation and/or other materials provided with the distribution.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 */
52
53#include <sys/cdefs.h>
54__FBSDID("$FreeBSD: head/sys/dev/mfi/mfi.c 178968 2008-05-12 14:09:19Z scottl $");
55
56#include "opt_mfi.h"
57
58#include <sys/param.h>
59#include <sys/systm.h>
60#include <sys/sysctl.h>
61#include <sys/malloc.h>
62#include <sys/kernel.h>
63#include <sys/poll.h>
64#include <sys/selinfo.h>
65#include <sys/bus.h>
66#include <sys/conf.h>
67#include <sys/eventhandler.h>
68#include <sys/rman.h>
69#include <sys/bus_dma.h>
70#include <sys/bio.h>
71#include <sys/ioccom.h>
72#include <sys/uio.h>
73#include <sys/proc.h>
74#include <sys/signalvar.h>
75
76#include <machine/bus.h>
77#include <machine/resource.h>
78
79#include <dev/mfi/mfireg.h>
80#include <dev/mfi/mfi_ioctl.h>
81#include <dev/mfi/mfivar.h>
82
83static int	mfi_alloc_commands(struct mfi_softc *);
84static int	mfi_comms_init(struct mfi_softc *);
85static int	mfi_wait_command(struct mfi_softc *, struct mfi_command *);
86static int	mfi_get_controller_info(struct mfi_softc *);
87static int	mfi_get_log_state(struct mfi_softc *,
88		    struct mfi_evt_log_state **);
89static int	mfi_get_entry(struct mfi_softc *, int);
90static int	mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
91		    uint32_t, void **, size_t);
92static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
93static void	mfi_startup(void *arg);
94static void	mfi_intr(void *arg);
95static void	mfi_ldprobe(struct mfi_softc *sc);
96static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
97static void	mfi_aen_complete(struct mfi_command *);
98static int	mfi_aen_setup(struct mfi_softc *, uint32_t);
99static int	mfi_add_ld(struct mfi_softc *sc, int);
100static void	mfi_add_ld_complete(struct mfi_command *);
101static struct mfi_command * mfi_bio_command(struct mfi_softc *);
102static void	mfi_bio_complete(struct mfi_command *);
103static int	mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
104static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
105static void	mfi_complete(struct mfi_softc *, struct mfi_command *);
106static int	mfi_abort(struct mfi_softc *, struct mfi_command *);
107static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, d_thread_t *);
108static void	mfi_timeout(void *);
109static int	mfi_user_command(struct mfi_softc *,
110		    struct mfi_ioc_passthru *);
111static void 	mfi_enable_intr_xscale(struct mfi_softc *sc);
112static void 	mfi_enable_intr_ppc(struct mfi_softc *sc);
113static int32_t 	mfi_read_fw_status_xscale(struct mfi_softc *sc);
114static int32_t 	mfi_read_fw_status_ppc(struct mfi_softc *sc);
115static int 	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
116static int 	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
117static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
118static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt);
119
120SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
121static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
122TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
123SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
124            0, "event message locale");
125
126static int	mfi_event_class = MFI_EVT_CLASS_INFO;
127TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
128SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
129          0, "event message class");
130
131static int	mfi_max_cmds = 128;
132TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
133SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
134	   0, "Max commands");
135
136/* Management interface */
137static d_open_t		mfi_open;
138static d_close_t	mfi_close;
139static d_ioctl_t	mfi_ioctl;
140static d_poll_t		mfi_poll;
141
142static struct cdevsw mfi_cdevsw = {
143	.d_version = 	D_VERSION,
144	.d_flags =	0,
145	.d_open = 	mfi_open,
146	.d_close =	mfi_close,
147	.d_ioctl =	mfi_ioctl,
148	.d_poll =	mfi_poll,
149	.d_name =	"mfi",
150};
151
152MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
153
154#define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
155
156static void
157mfi_enable_intr_xscale(struct mfi_softc *sc)
158{
159	MFI_WRITE4(sc, MFI_OMSK, 0x01);
160}
161
162static void
163mfi_enable_intr_ppc(struct mfi_softc *sc)
164{
165	MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
166	MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
167}
168
169static int32_t
170mfi_read_fw_status_xscale(struct mfi_softc *sc)
171{
172	return MFI_READ4(sc, MFI_OMSG0);
173}
174
175static int32_t
176mfi_read_fw_status_ppc(struct mfi_softc *sc)
177{
178	return MFI_READ4(sc, MFI_OSP0);
179}
180
181static int
182mfi_check_clear_intr_xscale(struct mfi_softc *sc)
183{
184	int32_t status;
185
186	status = MFI_READ4(sc, MFI_OSTS);
187	if ((status & MFI_OSTS_INTR_VALID) == 0)
188		return 1;
189
190	MFI_WRITE4(sc, MFI_OSTS, status);
191	return 0;
192 }
193
194static int
195mfi_check_clear_intr_ppc(struct mfi_softc *sc)
196{
197	int32_t status;
198
199	status = MFI_READ4(sc, MFI_OSTS);
200	if (!status)
201		return 1;
202
203	MFI_WRITE4(sc, MFI_ODCR0, status);
204	return 0;
205 }
206
207static void
208mfi_issue_cmd_xscale(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
209{
210	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
211}
212
213static void
214mfi_issue_cmd_ppc(struct mfi_softc *sc,uint32_t bus_add,uint32_t frame_cnt)
215{
216	MFI_WRITE4(sc, MFI_IQP, (bus_add |frame_cnt <<1)|1 );
217}
218
219static int
220mfi_transition_firmware(struct mfi_softc *sc)
221{
222	int32_t fw_state, cur_state;
223	int max_wait, i;
224
225	fw_state = sc->mfi_read_fw_status(sc)& MFI_FWSTATE_MASK;
226	while (fw_state != MFI_FWSTATE_READY) {
227		if (bootverbose)
228			device_printf(sc->mfi_dev, "Waiting for firmware to "
229			"become ready\n");
230		cur_state = fw_state;
231		switch (fw_state) {
232		case MFI_FWSTATE_FAULT:
233			device_printf(sc->mfi_dev, "Firmware fault\n");
234			return (ENXIO);
235		case MFI_FWSTATE_WAIT_HANDSHAKE:
236			MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
237			max_wait = 2;
238			break;
239		case MFI_FWSTATE_OPERATIONAL:
240			MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
241			max_wait = 10;
242			break;
243		case MFI_FWSTATE_UNDEFINED:
244		case MFI_FWSTATE_BB_INIT:
245			max_wait = 2;
246			break;
247		case MFI_FWSTATE_FW_INIT:
248		case MFI_FWSTATE_DEVICE_SCAN:
249		case MFI_FWSTATE_FLUSH_CACHE:
250			max_wait = 20;
251			break;
252		default:
253			device_printf(sc->mfi_dev,"Unknown firmware state %d\n",
254			    fw_state);
255			return (ENXIO);
256		}
257		for (i = 0; i < (max_wait * 10); i++) {
258			fw_state = sc->mfi_read_fw_status(sc) & MFI_FWSTATE_MASK;
259			if (fw_state == cur_state)
260				DELAY(100000);
261			else
262				break;
263		}
264		if (fw_state == cur_state) {
265			device_printf(sc->mfi_dev, "firmware stuck in state "
266			    "%#x\n", fw_state);
267			return (ENXIO);
268		}
269	}
270	return (0);
271}
272
273static void
274mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
275{
276	uint32_t *addr;
277
278	addr = arg;
279	*addr = segs[0].ds_addr;
280}
281
282int
283mfi_attach(struct mfi_softc *sc)
284{
285	uint32_t status;
286	int error, commsz, framessz, sensesz;
287	int frames, unit, max_fw_sge;
288    device_printf(sc->mfi_dev, "Megaraid SAS driver Ver 2.00 \n");
289
290	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
291	sx_init(&sc->mfi_config_lock, "MFI config");
292	TAILQ_INIT(&sc->mfi_ld_tqh);
293	TAILQ_INIT(&sc->mfi_aen_pids);
294	TAILQ_INIT(&sc->mfi_cam_ccbq);
295
296	mfi_initq_free(sc);
297	mfi_initq_ready(sc);
298	mfi_initq_busy(sc);
299	mfi_initq_bio(sc);
300
301	if (sc->mfi_flags & MFI_FLAGS_1064R) {
302		sc->mfi_enable_intr = mfi_enable_intr_xscale;
303		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
304		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
305		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
306	}
307	else {
308		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
309 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
310		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
311		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
312	}
313
314
315	/* Before we get too far, see if the firmware is working */
316	if ((error = mfi_transition_firmware(sc)) != 0) {
317		device_printf(sc->mfi_dev, "Firmware not in READY state, "
318		    "error %d\n", error);
319		return (ENXIO);
320	}
321
322	/*
323	 * Get information needed for sizing the contiguous memory for the
324	 * frame pool.  Size down the sgl parameter since we know that
325	 * we will never need more than what's required for MAXPHYS.
326	 * It would be nice if these constants were available at runtime
327	 * instead of compile time.
328	 */
329	status = sc->mfi_read_fw_status(sc);
330	sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
331	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
332	sc->mfi_max_sge = min(max_fw_sge, ((MAXPHYS / PAGE_SIZE) + 1));
333
334	/*
335	 * Create the dma tag for data buffers.  Used both for block I/O
336	 * and for various internal data queries.
337	 */
338	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
339				1, 0,			/* algnmnt, boundary */
340				BUS_SPACE_MAXADDR,	/* lowaddr */
341				BUS_SPACE_MAXADDR,	/* highaddr */
342				NULL, NULL,		/* filter, filterarg */
343				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
344				sc->mfi_max_sge,	/* nsegments */
345				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
346				BUS_DMA_ALLOCNOW,	/* flags */
347				busdma_lock_mutex,	/* lockfunc */
348				&sc->mfi_io_lock,	/* lockfuncarg */
349				&sc->mfi_buffer_dmat)) {
350		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
351		return (ENOMEM);
352	}
353
354	/*
355	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
356	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
357	 * entry, so the calculated size here will be will be 1 more than
358	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
359	 */
360	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
361	    sizeof(struct mfi_hwcomms);
362	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
363				1, 0,			/* algnmnt, boundary */
364				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
365				BUS_SPACE_MAXADDR,	/* highaddr */
366				NULL, NULL,		/* filter, filterarg */
367				commsz,			/* maxsize */
368				1,			/* msegments */
369				commsz,			/* maxsegsize */
370				0,			/* flags */
371				NULL, NULL,		/* lockfunc, lockarg */
372				&sc->mfi_comms_dmat)) {
373		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
374		return (ENOMEM);
375	}
376	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
377	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
378		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
379		return (ENOMEM);
380	}
381	bzero(sc->mfi_comms, commsz);
382	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
383	    sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
384
385	/*
386	 * Allocate DMA memory for the command frames.  Keep them in the
387	 * lower 4GB for efficiency.  Calculate the size of the commands at
388	 * the same time; each command is one 64 byte frame plus a set of
389         * additional frames for holding sg lists or other data.
390	 * The assumption here is that the SG list will start at the second
391	 * frame and not use the unused bytes in the first frame.  While this
392	 * isn't technically correct, it simplifies the calculation and allows
393	 * for command frames that might be larger than an mfi_io_frame.
394	 */
395	if (sizeof(bus_addr_t) == 8) {
396		sc->mfi_sge_size = sizeof(struct mfi_sg64);
397		sc->mfi_flags |= MFI_FLAGS_SG64;
398	} else {
399		sc->mfi_sge_size = sizeof(struct mfi_sg32);
400	}
401	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
402	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
403	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
404	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
405				64, 0,			/* algnmnt, boundary */
406				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
407				BUS_SPACE_MAXADDR,	/* highaddr */
408				NULL, NULL,		/* filter, filterarg */
409				framessz,		/* maxsize */
410				1,			/* nsegments */
411				framessz,		/* maxsegsize */
412				0,			/* flags */
413				NULL, NULL,		/* lockfunc, lockarg */
414				&sc->mfi_frames_dmat)) {
415		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
416		return (ENOMEM);
417	}
418	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
419	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
420		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
421		return (ENOMEM);
422	}
423	bzero(sc->mfi_frames, framessz);
424	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
425	    sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
426
427	/*
428	 * Allocate DMA memory for the frame sense data.  Keep them in the
429	 * lower 4GB for efficiency
430	 */
431	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
432	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
433				4, 0,			/* algnmnt, boundary */
434				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
435				BUS_SPACE_MAXADDR,	/* highaddr */
436				NULL, NULL,		/* filter, filterarg */
437				sensesz,		/* maxsize */
438				1,			/* nsegments */
439				sensesz,		/* maxsegsize */
440				0,			/* flags */
441				NULL, NULL,		/* lockfunc, lockarg */
442				&sc->mfi_sense_dmat)) {
443		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
444		return (ENOMEM);
445	}
446	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
447	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
448		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
449		return (ENOMEM);
450	}
451	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
452	    sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
453
454	if ((error = mfi_alloc_commands(sc)) != 0)
455		return (error);
456
457	if ((error = mfi_comms_init(sc)) != 0)
458		return (error);
459
460	if ((error = mfi_get_controller_info(sc)) != 0)
461		return (error);
462
463	mtx_lock(&sc->mfi_io_lock);
464	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
465		mtx_unlock(&sc->mfi_io_lock);
466		return (error);
467	}
468	mtx_unlock(&sc->mfi_io_lock);
469
470	/*
471	 * Set up the interrupt handler.  XXX This should happen in
472	 * mfi_pci.c
473	 */
474	sc->mfi_irq_rid = 0;
475	if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
476	    &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
477		device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
478		return (EINVAL);
479	}
480	if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
481	    NULL, mfi_intr, sc, &sc->mfi_intr)) {
482		device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
483		return (EINVAL);
484	}
485
486	/* Register a config hook to probe the bus for arrays */
487	sc->mfi_ich.ich_func = mfi_startup;
488	sc->mfi_ich.ich_arg = sc;
489	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
490		device_printf(sc->mfi_dev, "Cannot establish configuration "
491		    "hook\n");
492		return (EINVAL);
493	}
494
495	/*
496	 * Register a shutdown handler.
497	 */
498	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
499	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
500		device_printf(sc->mfi_dev, "Warning: shutdown event "
501		    "registration failed\n");
502	}
503
504	/*
505	 * Create the control device for doing management
506	 */
507	unit = device_get_unit(sc->mfi_dev);
508	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
509	    0640, "mfi%d", unit);
510	if (unit == 0)
511		make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
512	if (sc->mfi_cdev != NULL)
513		sc->mfi_cdev->si_drv1 = sc;
514	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
515	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
516	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
517	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
518	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
519	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
520	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
521	    &sc->mfi_keep_deleted_volumes, 0,
522	    "Don't detach the mfid device for a busy volume that is deleted");
523
524	device_add_child(sc->mfi_dev, "mfip", -1);
525	bus_generic_attach(sc->mfi_dev);
526
527	/* Start the timeout watchdog */
528	callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
529	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
530	    mfi_timeout, sc);
531
532	return (0);
533}
534
535static int
536mfi_alloc_commands(struct mfi_softc *sc)
537{
538	struct mfi_command *cm;
539	int i, ncmds;
540
541	/*
542	 * XXX Should we allocate all the commands up front, or allocate on
543	 * demand later like 'aac' does?
544	 */
545	ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
546	if (bootverbose)
547		device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
548		   "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
549
550	sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
551	    M_WAITOK | M_ZERO);
552
553	for (i = 0; i < ncmds; i++) {
554		cm = &sc->mfi_commands[i];
555		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
556		    sc->mfi_cmd_size * i);
557		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
558		    sc->mfi_cmd_size * i;
559		cm->cm_frame->header.context = i;
560		cm->cm_sense = &sc->mfi_sense[i];
561		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
562		cm->cm_sc = sc;
563		cm->cm_index = i;
564		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
565		    &cm->cm_dmamap) == 0)
566			mfi_release_command(cm);
567		else
568			break;
569		sc->mfi_total_cmds++;
570	}
571
572	return (0);
573}
574
575void
576mfi_release_command(struct mfi_command *cm)
577{
578	struct mfi_frame_header *hdr;
579	uint32_t *hdr_data;
580
581	/*
582	 * Zero out the important fields of the frame, but make sure the
583	 * context field is preserved.  For efficiency, handle the fields
584	 * as 32 bit words.  Clear out the first S/G entry too for safety.
585	 */
586	hdr = &cm->cm_frame->header;
587	if (cm->cm_data != NULL && hdr->sg_count) {
588		cm->cm_sg->sg32[0].len = 0;
589		cm->cm_sg->sg32[0].addr = 0;
590	}
591
592	hdr_data = (uint32_t *)cm->cm_frame;
593	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
594	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
595	hdr_data[4] = 0;	/* flags, timeout */
596	hdr_data[5] = 0;	/* data_len */
597
598	cm->cm_extra_frames = 0;
599	cm->cm_flags = 0;
600	cm->cm_complete = NULL;
601	cm->cm_private = NULL;
602	cm->cm_data = NULL;
603	cm->cm_sg = 0;
604	cm->cm_total_frame_size = 0;
605
606	mfi_enqueue_free(cm);
607}
608
609static int
610mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
611    void **bufp, size_t bufsize)
612{
613	struct mfi_command *cm;
614	struct mfi_dcmd_frame *dcmd;
615	void *buf = NULL;
616
617	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
618
619	cm = mfi_dequeue_free(sc);
620	if (cm == NULL)
621		return (EBUSY);
622
623	if ((bufsize > 0) && (bufp != NULL)) {
624		if (*bufp == NULL) {
625			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
626			if (buf == NULL) {
627				mfi_release_command(cm);
628				return (ENOMEM);
629			}
630			*bufp = buf;
631		} else {
632			buf = *bufp;
633		}
634	}
635
636	dcmd =  &cm->cm_frame->dcmd;
637	bzero(dcmd->mbox, MFI_MBOX_SIZE);
638	dcmd->header.cmd = MFI_CMD_DCMD;
639	dcmd->header.timeout = 0;
640	dcmd->header.flags = 0;
641	dcmd->header.data_len = bufsize;
642	dcmd->opcode = opcode;
643	cm->cm_sg = &dcmd->sgl;
644	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
645	cm->cm_flags = 0;
646	cm->cm_data = buf;
647	cm->cm_private = buf;
648	cm->cm_len = bufsize;
649
650	*cmp = cm;
651	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
652		*bufp = buf;
653	return (0);
654}
655
656static int
657mfi_comms_init(struct mfi_softc *sc)
658{
659	struct mfi_command *cm;
660	struct mfi_init_frame *init;
661	struct mfi_init_qinfo *qinfo;
662	int error;
663
664	mtx_lock(&sc->mfi_io_lock);
665	if ((cm = mfi_dequeue_free(sc)) == NULL)
666		return (EBUSY);
667
668	/*
669	 * Abuse the SG list area of the frame to hold the init_qinfo
670	 * object;
671	 */
672	init = &cm->cm_frame->init;
673	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
674
675	bzero(qinfo, sizeof(struct mfi_init_qinfo));
676	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
677	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
678	    offsetof(struct mfi_hwcomms, hw_reply_q);
679	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
680	    offsetof(struct mfi_hwcomms, hw_pi);
681	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
682	    offsetof(struct mfi_hwcomms, hw_ci);
683
684	init->header.cmd = MFI_CMD_INIT;
685	init->header.data_len = sizeof(struct mfi_init_qinfo);
686	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
687	cm->cm_data = NULL;
688	cm->cm_flags = MFI_CMD_POLLED;
689
690	if ((error = mfi_mapcmd(sc, cm)) != 0) {
691		device_printf(sc->mfi_dev, "failed to send init command\n");
692		mtx_unlock(&sc->mfi_io_lock);
693		return (error);
694	}
695	mfi_release_command(cm);
696	mtx_unlock(&sc->mfi_io_lock);
697
698	return (0);
699}
700
701static int
702mfi_get_controller_info(struct mfi_softc *sc)
703{
704	struct mfi_command *cm = NULL;
705	struct mfi_ctrl_info *ci = NULL;
706	uint32_t max_sectors_1, max_sectors_2;
707	int error;
708
709	mtx_lock(&sc->mfi_io_lock);
710	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
711	    (void **)&ci, sizeof(*ci));
712	if (error)
713		goto out;
714	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
715
716	if ((error = mfi_mapcmd(sc, cm)) != 0) {
717		device_printf(sc->mfi_dev, "Failed to get controller info\n");
718		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
719		    MFI_SECTOR_LEN;
720		error = 0;
721		goto out;
722	}
723
724	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
725	    BUS_DMASYNC_POSTREAD);
726	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
727
728	max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
729	max_sectors_2 = ci->max_request_size;
730	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
731
732out:
733	if (ci)
734		free(ci, M_MFIBUF);
735	if (cm)
736		mfi_release_command(cm);
737	mtx_unlock(&sc->mfi_io_lock);
738	return (error);
739}
740
741static int
742mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
743{
744	struct mfi_command *cm = NULL;
745	int error;
746
747	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
748	    (void **)log_state, sizeof(**log_state));
749	if (error)
750		goto out;
751	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
752
753	if ((error = mfi_mapcmd(sc, cm)) != 0) {
754		device_printf(sc->mfi_dev, "Failed to get log state\n");
755		goto out;
756	}
757
758	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
759	    BUS_DMASYNC_POSTREAD);
760	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
761
762out:
763	if (cm)
764		mfi_release_command(cm);
765
766	return (error);
767}
768
769static int
770mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
771{
772	struct mfi_evt_log_state *log_state = NULL;
773	union mfi_evt class_locale;
774	int error = 0;
775	uint32_t seq;
776
777	class_locale.members.reserved = 0;
778	class_locale.members.locale = mfi_event_locale;
779	class_locale.members.class  = mfi_event_class;
780
781	if (seq_start == 0) {
782		error = mfi_get_log_state(sc, &log_state);
783		if (error) {
784			if (log_state)
785				free(log_state, M_MFIBUF);
786			return (error);
787		}
788		/* The message log is a circular buffer */
789		for (seq = log_state->shutdown_seq_num;
790		     seq != log_state->newest_seq_num; seq++) {
791			mfi_get_entry(sc, seq);
792		}
793		mfi_get_entry(sc, seq);
794	} else
795		seq = seq_start;
796	mfi_aen_register(sc, seq, class_locale.word);
797	free(log_state, M_MFIBUF);
798
799	return 0;
800}
801
802static int
803mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
804{
805
806	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
807	cm->cm_complete = NULL;
808
809
810	/*
811	 * MegaCli can issue a DCMD of 0.  In this case do nothing
812	 * and return 0 to it as status
813	 */
814	if (cm->cm_frame->dcmd.opcode == 0) {
815		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
816		cm->cm_error = 0;
817		return (cm->cm_error);
818	}
819	mfi_enqueue_ready(cm);
820	mfi_startio(sc);
821	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
822		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
823	return (cm->cm_error);
824}
825
826void
827mfi_free(struct mfi_softc *sc)
828{
829	struct mfi_command *cm;
830	int i;
831
832	callout_drain(&sc->mfi_watchdog_callout);
833
834	if (sc->mfi_cdev != NULL)
835		destroy_dev(sc->mfi_cdev);
836
837	if (sc->mfi_total_cmds != 0) {
838		for (i = 0; i < sc->mfi_total_cmds; i++) {
839			cm = &sc->mfi_commands[i];
840			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
841		}
842		free(sc->mfi_commands, M_MFIBUF);
843	}
844
845	if (sc->mfi_intr)
846		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
847	if (sc->mfi_irq != NULL)
848		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
849		    sc->mfi_irq);
850
851	if (sc->mfi_sense_busaddr != 0)
852		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
853	if (sc->mfi_sense != NULL)
854		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
855		    sc->mfi_sense_dmamap);
856	if (sc->mfi_sense_dmat != NULL)
857		bus_dma_tag_destroy(sc->mfi_sense_dmat);
858
859	if (sc->mfi_frames_busaddr != 0)
860		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
861	if (sc->mfi_frames != NULL)
862		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
863		    sc->mfi_frames_dmamap);
864	if (sc->mfi_frames_dmat != NULL)
865		bus_dma_tag_destroy(sc->mfi_frames_dmat);
866
867	if (sc->mfi_comms_busaddr != 0)
868		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
869	if (sc->mfi_comms != NULL)
870		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
871		    sc->mfi_comms_dmamap);
872	if (sc->mfi_comms_dmat != NULL)
873		bus_dma_tag_destroy(sc->mfi_comms_dmat);
874
875	if (sc->mfi_buffer_dmat != NULL)
876		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
877	if (sc->mfi_parent_dmat != NULL)
878		bus_dma_tag_destroy(sc->mfi_parent_dmat);
879
880	if (mtx_initialized(&sc->mfi_io_lock)) {
881		mtx_destroy(&sc->mfi_io_lock);
882		sx_destroy(&sc->mfi_config_lock);
883	}
884
885	return;
886}
887
888static void
889mfi_startup(void *arg)
890{
891	struct mfi_softc *sc;
892
893	sc = (struct mfi_softc *)arg;
894
895	config_intrhook_disestablish(&sc->mfi_ich);
896
897	sc->mfi_enable_intr(sc);
898	sx_xlock(&sc->mfi_config_lock);
899	mtx_lock(&sc->mfi_io_lock);
900	mfi_ldprobe(sc);
901	mtx_unlock(&sc->mfi_io_lock);
902	sx_xunlock(&sc->mfi_config_lock);
903}
904
905static void
906mfi_intr(void *arg)
907{
908	struct mfi_softc *sc;
909	struct mfi_command *cm;
910	uint32_t pi, ci, context;
911
912	sc = (struct mfi_softc *)arg;
913
914	if (sc->mfi_check_clear_intr(sc))
915		return;
916
917	pi = sc->mfi_comms->hw_pi;
918	ci = sc->mfi_comms->hw_ci;
919	mtx_lock(&sc->mfi_io_lock);
920	while (ci != pi) {
921		context = sc->mfi_comms->hw_reply_q[ci];
922		if (context < sc->mfi_max_fw_cmds) {
923			cm = &sc->mfi_commands[context];
924			mfi_remove_busy(cm);
925			cm->cm_error = 0;
926			mfi_complete(sc, cm);
927		}
928		if (++ci == (sc->mfi_max_fw_cmds + 1)) {
929			ci = 0;
930		}
931	}
932
933	sc->mfi_comms->hw_ci = ci;
934
935	/* Give defered I/O a chance to run */
936	if (sc->mfi_flags & MFI_FLAGS_QFRZN)
937		sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
938	mfi_startio(sc);
939	mtx_unlock(&sc->mfi_io_lock);
940
941	return;
942}
943
944int
945mfi_shutdown(struct mfi_softc *sc)
946{
947	struct mfi_dcmd_frame *dcmd;
948	struct mfi_command *cm;
949	int error;
950
951	mtx_lock(&sc->mfi_io_lock);
952	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
953	if (error) {
954		mtx_unlock(&sc->mfi_io_lock);
955		return (error);
956	}
957
958	if (sc->mfi_aen_cm != NULL)
959		mfi_abort(sc, sc->mfi_aen_cm);
960
961	dcmd = &cm->cm_frame->dcmd;
962	dcmd->header.flags = MFI_FRAME_DIR_NONE;
963	cm->cm_flags = MFI_CMD_POLLED;
964	cm->cm_data = NULL;
965
966	if ((error = mfi_mapcmd(sc, cm)) != 0) {
967		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
968	}
969
970	mfi_release_command(cm);
971	mtx_unlock(&sc->mfi_io_lock);
972	return (error);
973}
974
975static void
976mfi_ldprobe(struct mfi_softc *sc)
977{
978	struct mfi_frame_header *hdr;
979	struct mfi_command *cm = NULL;
980	struct mfi_ld_list *list = NULL;
981	struct mfi_disk *ld;
982	int error, i;
983
984	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
985	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
986
987	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
988	    (void **)&list, sizeof(*list));
989	if (error)
990		goto out;
991
992	cm->cm_flags = MFI_CMD_DATAIN;
993	if (mfi_wait_command(sc, cm) != 0) {
994		device_printf(sc->mfi_dev, "Failed to get device listing\n");
995		goto out;
996	}
997
998	hdr = &cm->cm_frame->header;
999	if (hdr->cmd_status != MFI_STAT_OK) {
1000		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1001		    hdr->cmd_status);
1002		goto out;
1003	}
1004
1005	for (i = 0; i < list->ld_count; i++) {
1006		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1007			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1008				goto skip_add;
1009		}
1010		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1011	skip_add:;
1012	}
1013out:
1014	if (list)
1015		free(list, M_MFIBUF);
1016	if (cm)
1017		mfi_release_command(cm);
1018
1019	return;
1020}
1021
1022static void
1023mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1024{
1025	switch (detail->arg_type) {
1026	case MR_EVT_ARGS_NONE:
1027		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - %s\n",
1028		    detail->seq,
1029		    detail->time,
1030		    detail->class.members.locale,
1031		    detail->class.members.class,
1032		    detail->description
1033		    );
1034		break;
1035	case MR_EVT_ARGS_CDB_SENSE:
1036		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) CDB %*D"
1037		    "Sense %*D\n: %s\n",
1038		    detail->seq,
1039		    detail->time,
1040		    detail->class.members.locale,
1041		    detail->class.members.class,
1042		    detail->args.cdb_sense.pd.device_id,
1043		    detail->args.cdb_sense.pd.enclosure_index,
1044		    detail->args.cdb_sense.pd.slot_number,
1045		    detail->args.cdb_sense.cdb_len,
1046		    detail->args.cdb_sense.cdb,
1047		    ":",
1048		    detail->args.cdb_sense.sense_len,
1049		    detail->args.cdb_sense.sense,
1050		    ":",
1051		    detail->description
1052		    );
1053		break;
1054	case MR_EVT_ARGS_LD:
1055		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1056		    "event: %s\n",
1057		    detail->seq,
1058		    detail->time,
1059		    detail->class.members.locale,
1060		    detail->class.members.class,
1061		    detail->args.ld.ld_index,
1062		    detail->args.ld.target_id,
1063		    detail->description
1064		    );
1065		break;
1066	case MR_EVT_ARGS_LD_COUNT:
1067		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1068		    "count %lld: %s\n",
1069		    detail->seq,
1070		    detail->time,
1071		    detail->class.members.locale,
1072		    detail->class.members.class,
1073		    detail->args.ld_count.ld.ld_index,
1074		    detail->args.ld_count.ld.target_id,
1075		    (long long)detail->args.ld_count.count,
1076		    detail->description
1077		    );
1078		break;
1079	case MR_EVT_ARGS_LD_LBA:
1080		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1081		    "lba %lld: %s\n",
1082		    detail->seq,
1083		    detail->time,
1084		    detail->class.members.locale,
1085		    detail->class.members.class,
1086		    detail->args.ld_lba.ld.ld_index,
1087		    detail->args.ld_lba.ld.target_id,
1088		    (long long)detail->args.ld_lba.lba,
1089		    detail->description
1090		    );
1091		break;
1092	case MR_EVT_ARGS_LD_OWNER:
1093		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1094		    "owner changed: prior %d, new %d: %s\n",
1095		    detail->seq,
1096		    detail->time,
1097		    detail->class.members.locale,
1098		    detail->class.members.class,
1099		    detail->args.ld_owner.ld.ld_index,
1100		    detail->args.ld_owner.ld.target_id,
1101		    detail->args.ld_owner.pre_owner,
1102		    detail->args.ld_owner.new_owner,
1103		    detail->description
1104		    );
1105		break;
1106	case MR_EVT_ARGS_LD_LBA_PD_LBA:
1107		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1108		    "lba %lld, physical drive PD %02d(e%d/s%d) lba %lld: %s\n",
1109		    detail->seq,
1110		    detail->time,
1111		    detail->class.members.locale,
1112		    detail->class.members.class,
1113		    detail->args.ld_lba_pd_lba.ld.ld_index,
1114		    detail->args.ld_lba_pd_lba.ld.target_id,
1115		    (long long)detail->args.ld_lba_pd_lba.ld_lba,
1116		    detail->args.ld_lba_pd_lba.pd.device_id,
1117		    detail->args.ld_lba_pd_lba.pd.enclosure_index,
1118		    detail->args.ld_lba_pd_lba.pd.slot_number,
1119		    (long long)detail->args.ld_lba_pd_lba.pd_lba,
1120		    detail->description
1121		    );
1122		break;
1123	case MR_EVT_ARGS_LD_PROG:
1124		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1125		    "progress %d%% in %ds: %s\n",
1126		    detail->seq,
1127		    detail->time,
1128		    detail->class.members.locale,
1129		    detail->class.members.class,
1130		    detail->args.ld_prog.ld.ld_index,
1131		    detail->args.ld_prog.ld.target_id,
1132		    detail->args.ld_prog.prog.progress/655,
1133		    detail->args.ld_prog.prog.elapsed_seconds,
1134		    detail->description
1135		    );
1136		break;
1137	case MR_EVT_ARGS_LD_STATE:
1138		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1139		    "state prior %d new %d: %s\n",
1140		    detail->seq,
1141		    detail->time,
1142		    detail->class.members.locale,
1143		    detail->class.members.class,
1144		    detail->args.ld_state.ld.ld_index,
1145		    detail->args.ld_state.ld.target_id,
1146		    detail->args.ld_state.prev_state,
1147		    detail->args.ld_state.new_state,
1148		    detail->description
1149		    );
1150		break;
1151	case MR_EVT_ARGS_LD_STRIP:
1152		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1153		    "strip %lld: %s\n",
1154		    detail->seq,
1155		    detail->time,
1156		    detail->class.members.locale,
1157		    detail->class.members.class,
1158		    detail->args.ld_strip.ld.ld_index,
1159		    detail->args.ld_strip.ld.target_id,
1160		    (long long)detail->args.ld_strip.strip,
1161		    detail->description
1162		    );
1163		break;
1164	case MR_EVT_ARGS_PD:
1165		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1166		    "event: %s\n",
1167		    detail->seq,
1168		    detail->time,
1169		    detail->class.members.locale,
1170		    detail->class.members.class,
1171		    detail->args.pd.device_id,
1172		    detail->args.pd.enclosure_index,
1173		    detail->args.pd.slot_number,
1174		    detail->description
1175		    );
1176		break;
1177	case MR_EVT_ARGS_PD_ERR:
1178		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1179		    "err %d: %s\n",
1180		    detail->seq,
1181		    detail->time,
1182		    detail->class.members.locale,
1183		    detail->class.members.class,
1184		    detail->args.pd_err.pd.device_id,
1185		    detail->args.pd_err.pd.enclosure_index,
1186		    detail->args.pd_err.pd.slot_number,
1187		    detail->args.pd_err.err,
1188		    detail->description
1189		    );
1190		break;
1191	case MR_EVT_ARGS_PD_LBA:
1192		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1193		    "lba %lld: %s\n",
1194		    detail->seq,
1195		    detail->time,
1196		    detail->class.members.locale,
1197		    detail->class.members.class,
1198		    detail->args.pd_lba.pd.device_id,
1199		    detail->args.pd_lba.pd.enclosure_index,
1200		    detail->args.pd_lba.pd.slot_number,
1201		    (long long)detail->args.pd_lba.lba,
1202		    detail->description
1203		    );
1204		break;
1205	case MR_EVT_ARGS_PD_LBA_LD:
1206		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1207		    "lba %lld VD %02d/%d: %s\n",
1208		    detail->seq,
1209		    detail->time,
1210		    detail->class.members.locale,
1211		    detail->class.members.class,
1212		    detail->args.pd_lba_ld.pd.device_id,
1213		    detail->args.pd_lba_ld.pd.enclosure_index,
1214		    detail->args.pd_lba_ld.pd.slot_number,
1215		    (long long)detail->args.pd_lba.lba,
1216		    detail->args.pd_lba_ld.ld.ld_index,
1217		    detail->args.pd_lba_ld.ld.target_id,
1218		    detail->description
1219		    );
1220		break;
1221	case MR_EVT_ARGS_PD_PROG:
1222		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1223		    "progress %d%% seconds %ds: %s\n",
1224		    detail->seq,
1225		    detail->time,
1226		    detail->class.members.locale,
1227		    detail->class.members.class,
1228		    detail->args.pd_prog.pd.device_id,
1229		    detail->args.pd_prog.pd.enclosure_index,
1230		    detail->args.pd_prog.pd.slot_number,
1231		    detail->args.pd_prog.prog.progress/655,
1232		    detail->args.pd_prog.prog.elapsed_seconds,
1233		    detail->description
1234		    );
1235		break;
1236	case MR_EVT_ARGS_PD_STATE:
1237		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1238		    "state prior %d new %d: %s\n",
1239		    detail->seq,
1240		    detail->time,
1241		    detail->class.members.locale,
1242		    detail->class.members.class,
1243		    detail->args.pd_prog.pd.device_id,
1244		    detail->args.pd_prog.pd.enclosure_index,
1245		    detail->args.pd_prog.pd.slot_number,
1246		    detail->args.pd_state.prev_state,
1247		    detail->args.pd_state.new_state,
1248		    detail->description
1249		    );
1250		break;
1251	case MR_EVT_ARGS_PCI:
1252		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PCI 0x04%x 0x04%x "
1253		    "0x04%x 0x04%x: %s\n",
1254		    detail->seq,
1255		    detail->time,
1256		    detail->class.members.locale,
1257		    detail->class.members.class,
1258		    detail->args.pci.venderId,
1259		    detail->args.pci.deviceId,
1260		    detail->args.pci.subVenderId,
1261		    detail->args.pci.subDeviceId,
1262		    detail->description
1263		    );
1264		break;
1265	case MR_EVT_ARGS_RATE:
1266		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Rebuild rate %d: %s\n",
1267		    detail->seq,
1268		    detail->time,
1269		    detail->class.members.locale,
1270		    detail->class.members.class,
1271		    detail->args.rate,
1272		    detail->description
1273		    );
1274		break;
1275	case MR_EVT_ARGS_TIME:
1276		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Adapter ticks %d "
1277		    "elapsed %ds: %s\n",
1278		    detail->seq,
1279		    detail->time,
1280		    detail->class.members.locale,
1281		    detail->class.members.class,
1282		    detail->args.time.rtc,
1283		    detail->args.time.elapsedSeconds,
1284		    detail->description
1285		    );
1286		break;
1287	case MR_EVT_ARGS_ECC:
1288		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Adapter ECC %x,%x: %s: %s\n",
1289		    detail->seq,
1290		    detail->time,
1291		    detail->class.members.locale,
1292		    detail->class.members.class,
1293		    detail->args.ecc.ecar,
1294		    detail->args.ecc.elog,
1295		    detail->args.ecc.str,
1296		    detail->description
1297		    );
1298		break;
1299	default:
1300		device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Type %d: %s\n",
1301		    detail->seq,
1302		    detail->time,
1303		    detail->class.members.locale,
1304		    detail->class.members.class,
1305		    detail->arg_type, detail->description
1306		    );
1307	}
1308}
1309
1310static int
1311mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1312{
1313	struct mfi_command *cm;
1314	struct mfi_dcmd_frame *dcmd;
1315	union mfi_evt current_aen, prior_aen;
1316	struct mfi_evt_detail *ed = NULL;
1317	int error = 0;
1318
1319	current_aen.word = locale;
1320	if (sc->mfi_aen_cm != NULL) {
1321		prior_aen.word =
1322		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1323		if (prior_aen.members.class <= current_aen.members.class &&
1324		    !((prior_aen.members.locale & current_aen.members.locale)
1325		    ^current_aen.members.locale)) {
1326			return (0);
1327		} else {
1328			prior_aen.members.locale |= current_aen.members.locale;
1329			if (prior_aen.members.class
1330			    < current_aen.members.class)
1331				current_aen.members.class =
1332				    prior_aen.members.class;
1333			mfi_abort(sc, sc->mfi_aen_cm);
1334		}
1335	}
1336
1337	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1338	    (void **)&ed, sizeof(*ed));
1339	if (error) {
1340		goto out;
1341	}
1342
1343	dcmd = &cm->cm_frame->dcmd;
1344	((uint32_t *)&dcmd->mbox)[0] = seq;
1345	((uint32_t *)&dcmd->mbox)[1] = locale;
1346	cm->cm_flags = MFI_CMD_DATAIN;
1347	cm->cm_complete = mfi_aen_complete;
1348
1349	sc->mfi_aen_cm = cm;
1350
1351	mfi_enqueue_ready(cm);
1352	mfi_startio(sc);
1353
1354out:
1355	return (error);
1356}
1357
1358static void
1359mfi_aen_complete(struct mfi_command *cm)
1360{
1361	struct mfi_frame_header *hdr;
1362	struct mfi_softc *sc;
1363	struct mfi_evt_detail *detail;
1364	struct mfi_aen *mfi_aen_entry, *tmp;
1365	int seq = 0, aborted = 0;
1366
1367	sc = cm->cm_sc;
1368	hdr = &cm->cm_frame->header;
1369
1370	if (sc->mfi_aen_cm == NULL)
1371		return;
1372
1373	if (sc->mfi_aen_cm->cm_aen_abort || hdr->cmd_status == 0xff) {
1374		sc->mfi_aen_cm->cm_aen_abort = 0;
1375		aborted = 1;
1376	} else {
1377		sc->mfi_aen_triggered = 1;
1378		if (sc->mfi_poll_waiting) {
1379			sc->mfi_poll_waiting = 0;
1380			selwakeup(&sc->mfi_select);
1381		}
1382		detail = cm->cm_data;
1383		/*
1384		 * XXX If this function is too expensive or is recursive, then
1385		 * events should be put onto a queue and processed later.
1386		 */
1387		mtx_unlock(&sc->mfi_io_lock);
1388		mfi_decode_evt(sc, detail);
1389		mtx_lock(&sc->mfi_io_lock);
1390		seq = detail->seq + 1;
1391		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
1392			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1393			    aen_link);
1394			PROC_LOCK(mfi_aen_entry->p);
1395			psignal(mfi_aen_entry->p, SIGIO);
1396			PROC_UNLOCK(mfi_aen_entry->p);
1397			free(mfi_aen_entry, M_MFIBUF);
1398		}
1399	}
1400
1401	free(cm->cm_data, M_MFIBUF);
1402	sc->mfi_aen_cm = NULL;
1403	wakeup(&sc->mfi_aen_cm);
1404	mfi_release_command(cm);
1405
1406	/* set it up again so the driver can catch more events */
1407	if (!aborted) {
1408		mfi_aen_setup(sc, seq);
1409	}
1410}
1411
1412/* Only do one event for now so we can easily iterate through them */
1413#define MAX_EVENTS 1
1414static int
1415mfi_get_entry(struct mfi_softc *sc, int seq)
1416{
1417	struct mfi_command *cm;
1418	struct mfi_dcmd_frame *dcmd;
1419	struct mfi_evt_list *el;
1420	int error;
1421	int i;
1422	int size;
1423
1424	if ((cm = mfi_dequeue_free(sc)) == NULL) {
1425		return (EBUSY);
1426	}
1427
1428	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1429		* (MAX_EVENTS - 1);
1430	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1431	if (el == NULL) {
1432		mfi_release_command(cm);
1433		return (ENOMEM);
1434	}
1435
1436	dcmd = &cm->cm_frame->dcmd;
1437	bzero(dcmd->mbox, MFI_MBOX_SIZE);
1438	dcmd->header.cmd = MFI_CMD_DCMD;
1439	dcmd->header.timeout = 0;
1440	dcmd->header.data_len = size;
1441	dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1442	((uint32_t *)&dcmd->mbox)[0] = seq;
1443	((uint32_t *)&dcmd->mbox)[1] = MFI_EVT_LOCALE_ALL;
1444	cm->cm_sg = &dcmd->sgl;
1445	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1446	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1447	cm->cm_data = el;
1448	cm->cm_len = size;
1449
1450	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1451		device_printf(sc->mfi_dev, "Failed to get controller entry\n");
1452		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1453		    MFI_SECTOR_LEN;
1454		free(el, M_MFIBUF);
1455		mfi_release_command(cm);
1456		return (0);
1457	}
1458
1459	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1460	    BUS_DMASYNC_POSTREAD);
1461	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1462
1463	if (dcmd->header.cmd_status != MFI_STAT_NOT_FOUND) {
1464		for (i = 0; i < el->count; i++) {
1465			if (seq + i == el->event[i].seq)
1466				mfi_decode_evt(sc, &el->event[i]);
1467		}
1468	}
1469
1470	free(cm->cm_data, M_MFIBUF);
1471	mfi_release_command(cm);
1472	return (0);
1473}
1474
1475static int
1476mfi_add_ld(struct mfi_softc *sc, int id)
1477{
1478	struct mfi_command *cm;
1479	struct mfi_dcmd_frame *dcmd = NULL;
1480	struct mfi_ld_info *ld_info = NULL;
1481	int error;
1482
1483	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1484
1485	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1486	    (void **)&ld_info, sizeof(*ld_info));
1487	if (error) {
1488		device_printf(sc->mfi_dev,
1489		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1490		if (ld_info)
1491			free(ld_info, M_MFIBUF);
1492		return (error);
1493	}
1494	cm->cm_flags = MFI_CMD_DATAIN;
1495	dcmd = &cm->cm_frame->dcmd;
1496	dcmd->mbox[0] = id;
1497	if (mfi_wait_command(sc, cm) != 0) {
1498		device_printf(sc->mfi_dev,
1499		    "Failed to get logical drive: %d\n", id);
1500		free(ld_info, M_MFIBUF);
1501		return (0);
1502	}
1503
1504	mfi_add_ld_complete(cm);
1505	return (0);
1506}
1507
1508static void
1509mfi_add_ld_complete(struct mfi_command *cm)
1510{
1511	struct mfi_frame_header *hdr;
1512	struct mfi_ld_info *ld_info;
1513	struct mfi_softc *sc;
1514	device_t child;
1515
1516	sc = cm->cm_sc;
1517	hdr = &cm->cm_frame->header;
1518	ld_info = cm->cm_private;
1519
1520	if (hdr->cmd_status != MFI_STAT_OK) {
1521		free(ld_info, M_MFIBUF);
1522		mfi_release_command(cm);
1523		return;
1524	}
1525	mfi_release_command(cm);
1526
1527	mtx_unlock(&sc->mfi_io_lock);
1528	mtx_lock(&Giant);
1529	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1530		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1531		free(ld_info, M_MFIBUF);
1532		mtx_unlock(&Giant);
1533		mtx_lock(&sc->mfi_io_lock);
1534		return;
1535	}
1536
1537	device_set_ivars(child, ld_info);
1538	device_set_desc(child, "MFI Logical Disk");
1539	bus_generic_attach(sc->mfi_dev);
1540	mtx_unlock(&Giant);
1541	mtx_lock(&sc->mfi_io_lock);
1542}
1543
1544static struct mfi_command *
1545mfi_bio_command(struct mfi_softc *sc)
1546{
1547	struct mfi_io_frame *io;
1548	struct mfi_command *cm;
1549	struct bio *bio;
1550	int flags, blkcount;
1551
1552	if ((cm = mfi_dequeue_free(sc)) == NULL)
1553		return (NULL);
1554
1555	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1556		mfi_release_command(cm);
1557		return (NULL);
1558	}
1559
1560	io = &cm->cm_frame->io;
1561	switch (bio->bio_cmd & 0x03) {
1562	case BIO_READ:
1563		io->header.cmd = MFI_CMD_LD_READ;
1564		flags = MFI_CMD_DATAIN;
1565		break;
1566	case BIO_WRITE:
1567		io->header.cmd = MFI_CMD_LD_WRITE;
1568		flags = MFI_CMD_DATAOUT;
1569		break;
1570	default:
1571		panic("Invalid bio command");
1572	}
1573
1574	/* Cheat with the sector length to avoid a non-constant division */
1575	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1576	io->header.target_id = (uintptr_t)bio->bio_driver1;
1577	io->header.timeout = 0;
1578	io->header.flags = 0;
1579	io->header.sense_len = MFI_SENSE_LEN;
1580	io->header.data_len = blkcount;
1581	io->sense_addr_lo = cm->cm_sense_busaddr;
1582	io->sense_addr_hi = 0;
1583	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
1584	io->lba_lo = bio->bio_pblkno & 0xffffffff;
1585	cm->cm_complete = mfi_bio_complete;
1586	cm->cm_private = bio;
1587	cm->cm_data = bio->bio_data;
1588	cm->cm_len = bio->bio_bcount;
1589	cm->cm_sg = &io->sgl;
1590	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1591	cm->cm_flags = flags;
1592	return (cm);
1593}
1594
1595static void
1596mfi_bio_complete(struct mfi_command *cm)
1597{
1598	struct bio *bio;
1599	struct mfi_frame_header *hdr;
1600	struct mfi_softc *sc;
1601
1602	bio = cm->cm_private;
1603	hdr = &cm->cm_frame->header;
1604	sc = cm->cm_sc;
1605
1606	if ((hdr->cmd_status != 0) || (hdr->scsi_status != 0)) {
1607		bio->bio_flags |= BIO_ERROR;
1608		bio->bio_error = EIO;
1609		device_printf(sc->mfi_dev, "I/O error, status= %d "
1610		    "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1611		mfi_print_sense(cm->cm_sc, cm->cm_sense);
1612	}
1613
1614	mfi_release_command(cm);
1615	mfi_disk_complete(bio);
1616}
1617
1618void
1619mfi_startio(struct mfi_softc *sc)
1620{
1621	struct mfi_command *cm;
1622	struct ccb_hdr *ccbh;
1623
1624	for (;;) {
1625		/* Don't bother if we're short on resources */
1626		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1627			break;
1628
1629		/* Try a command that has already been prepared */
1630		cm = mfi_dequeue_ready(sc);
1631
1632		if (cm == NULL) {
1633			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
1634				cm = sc->mfi_cam_start(ccbh);
1635		}
1636
1637		/* Nope, so look for work on the bioq */
1638		if (cm == NULL)
1639			cm = mfi_bio_command(sc);
1640
1641		/* No work available, so exit */
1642		if (cm == NULL)
1643			break;
1644
1645		/* Send the command to the controller */
1646		if (mfi_mapcmd(sc, cm) != 0) {
1647			mfi_requeue_ready(cm);
1648			break;
1649		}
1650	}
1651}
1652
1653static int
1654mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1655{
1656	int error, polled;
1657
1658	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1659
1660	if (cm->cm_data != NULL) {
1661		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1662		error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1663		    cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1664		if (error == EINPROGRESS) {
1665			sc->mfi_flags |= MFI_FLAGS_QFRZN;
1666			return (0);
1667		}
1668	} else {
1669		error = mfi_send_frame(sc, cm);
1670	}
1671
1672	return (error);
1673}
1674
1675static void
1676mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1677{
1678	struct mfi_frame_header *hdr;
1679	struct mfi_command *cm;
1680	union mfi_sgl *sgl;
1681	struct mfi_softc *sc;
1682	int i, dir;
1683
1684	cm = (struct mfi_command *)arg;
1685	sc = cm->cm_sc;
1686	hdr = &cm->cm_frame->header;
1687	sgl = cm->cm_sg;
1688
1689	if (error) {
1690		printf("error %d in callback\n", error);
1691		cm->cm_error = error;
1692		mfi_complete(sc, cm);
1693		return;
1694	}
1695
1696	if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1697		for (i = 0; i < nsegs; i++) {
1698			sgl->sg32[i].addr = segs[i].ds_addr;
1699			sgl->sg32[i].len = segs[i].ds_len;
1700		}
1701	} else {
1702		for (i = 0; i < nsegs; i++) {
1703			sgl->sg64[i].addr = segs[i].ds_addr;
1704			sgl->sg64[i].len = segs[i].ds_len;
1705		}
1706		hdr->flags |= MFI_FRAME_SGL64;
1707	}
1708	hdr->sg_count = nsegs;
1709
1710	dir = 0;
1711	if (cm->cm_flags & MFI_CMD_DATAIN) {
1712		dir |= BUS_DMASYNC_PREREAD;
1713		hdr->flags |= MFI_FRAME_DIR_READ;
1714	}
1715	if (cm->cm_flags & MFI_CMD_DATAOUT) {
1716		dir |= BUS_DMASYNC_PREWRITE;
1717		hdr->flags |= MFI_FRAME_DIR_WRITE;
1718	}
1719	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1720	cm->cm_flags |= MFI_CMD_MAPPED;
1721
1722	/*
1723	 * Instead of calculating the total number of frames in the
1724	 * compound frame, it's already assumed that there will be at
1725	 * least 1 frame, so don't compensate for the modulo of the
1726	 * following division.
1727	 */
1728	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
1729	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1730
1731	mfi_send_frame(sc, cm);
1732
1733	return;
1734}
1735
1736static int
1737mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1738{
1739	struct mfi_frame_header *hdr;
1740	int tm = MFI_POLL_TIMEOUT_SECS * 1000;
1741
1742	hdr = &cm->cm_frame->header;
1743
1744	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1745		cm->cm_timestamp = time_uptime;
1746		mfi_enqueue_busy(cm);
1747	} else {
1748		hdr->cmd_status = 0xff;
1749		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1750	}
1751
1752	/*
1753	 * The bus address of the command is aligned on a 64 byte boundary,
1754	 * leaving the least 6 bits as zero.  For whatever reason, the
1755	 * hardware wants the address shifted right by three, leaving just
1756	 * 3 zero bits.  These three bits are then used as a prefetching
1757	 * hint for the hardware to predict how many frames need to be
1758	 * fetched across the bus.  If a command has more than 8 frames
1759	 * then the 3 bits are set to 0x7 and the firmware uses other
1760	 * information in the command to determine the total amount to fetch.
1761	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
1762	 * is enough for both 32bit and 64bit systems.
1763	 */
1764	if (cm->cm_extra_frames > 7)
1765		cm->cm_extra_frames = 7;
1766
1767	sc->mfi_issue_cmd(sc,cm->cm_frame_busaddr,cm->cm_extra_frames);
1768
1769	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1770		return (0);
1771
1772	/* This is a polled command, so busy-wait for it to complete. */
1773	while (hdr->cmd_status == 0xff) {
1774		DELAY(1000);
1775		tm -= 1;
1776		if (tm <= 0)
1777			break;
1778	}
1779
1780	if (hdr->cmd_status == 0xff) {
1781		device_printf(sc->mfi_dev, "Frame %p timed out "
1782			      "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1783		return (ETIMEDOUT);
1784	}
1785
1786	return (0);
1787}
1788
1789static void
1790mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
1791{
1792	int dir;
1793
1794	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
1795		dir = 0;
1796		if (cm->cm_flags & MFI_CMD_DATAIN)
1797			dir |= BUS_DMASYNC_POSTREAD;
1798		if (cm->cm_flags & MFI_CMD_DATAOUT)
1799			dir |= BUS_DMASYNC_POSTWRITE;
1800
1801		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1802		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1803		cm->cm_flags &= ~MFI_CMD_MAPPED;
1804	}
1805
1806	cm->cm_flags |= MFI_CMD_COMPLETED;
1807
1808	if (cm->cm_complete != NULL)
1809		cm->cm_complete(cm);
1810	else
1811		wakeup(cm);
1812}
1813
1814static int
1815mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
1816{
1817	struct mfi_command *cm;
1818	struct mfi_abort_frame *abort;
1819	int i = 0;
1820
1821	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1822
1823	if ((cm = mfi_dequeue_free(sc)) == NULL) {
1824		return (EBUSY);
1825	}
1826
1827	abort = &cm->cm_frame->abort;
1828	abort->header.cmd = MFI_CMD_ABORT;
1829	abort->header.flags = 0;
1830	abort->abort_context = cm_abort->cm_frame->header.context;
1831	abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
1832	abort->abort_mfi_addr_hi = 0;
1833	cm->cm_data = NULL;
1834	cm->cm_flags = MFI_CMD_POLLED;
1835
1836	sc->mfi_aen_cm->cm_aen_abort = 1;
1837	mfi_mapcmd(sc, cm);
1838	mfi_release_command(cm);
1839
1840	while (i < 5 && sc->mfi_aen_cm != NULL) {
1841		msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort", 5 * hz);
1842		i++;
1843	}
1844
1845	return (0);
1846}
1847
1848int
1849mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
1850{
1851	struct mfi_command *cm;
1852	struct mfi_io_frame *io;
1853	int error;
1854
1855	if ((cm = mfi_dequeue_free(sc)) == NULL)
1856		return (EBUSY);
1857
1858	io = &cm->cm_frame->io;
1859	io->header.cmd = MFI_CMD_LD_WRITE;
1860	io->header.target_id = id;
1861	io->header.timeout = 0;
1862	io->header.flags = 0;
1863	io->header.sense_len = MFI_SENSE_LEN;
1864	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1865	io->sense_addr_lo = cm->cm_sense_busaddr;
1866	io->sense_addr_hi = 0;
1867	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
1868	io->lba_lo = lba & 0xffffffff;
1869	cm->cm_data = virt;
1870	cm->cm_len = len;
1871	cm->cm_sg = &io->sgl;
1872	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1873	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
1874
1875	error = mfi_mapcmd(sc, cm);
1876	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1877	    BUS_DMASYNC_POSTWRITE);
1878	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1879	mfi_release_command(cm);
1880
1881	return (error);
1882}
1883
1884static int
1885mfi_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1886{
1887	struct mfi_softc *sc;
1888	int error;
1889
1890	sc = dev->si_drv1;
1891
1892	mtx_lock(&sc->mfi_io_lock);
1893	if (sc->mfi_detaching)
1894		error = ENXIO;
1895	else {
1896		sc->mfi_flags |= MFI_FLAGS_OPEN;
1897		error = 0;
1898	}
1899	mtx_unlock(&sc->mfi_io_lock);
1900
1901	return (error);
1902}
1903
1904static int
1905mfi_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1906{
1907	struct mfi_softc *sc;
1908	struct mfi_aen *mfi_aen_entry, *tmp;
1909
1910	sc = dev->si_drv1;
1911
1912	mtx_lock(&sc->mfi_io_lock);
1913	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
1914
1915	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
1916		if (mfi_aen_entry->p == curproc) {
1917			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1918			    aen_link);
1919			free(mfi_aen_entry, M_MFIBUF);
1920		}
1921	}
1922	mtx_unlock(&sc->mfi_io_lock);
1923	return (0);
1924}
1925
1926static int
1927mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
1928{
1929
1930	switch (opcode) {
1931	case MFI_DCMD_LD_DELETE:
1932	case MFI_DCMD_CFG_ADD:
1933	case MFI_DCMD_CFG_CLEAR:
1934		sx_xlock(&sc->mfi_config_lock);
1935		return (1);
1936	default:
1937		return (0);
1938	}
1939}
1940
1941static void
1942mfi_config_unlock(struct mfi_softc *sc, int locked)
1943{
1944
1945	if (locked)
1946		sx_xunlock(&sc->mfi_config_lock);
1947}
1948
1949/* Perform pre-issue checks on commands from userland and possibly veto them. */
1950static int
1951mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
1952{
1953	struct mfi_disk *ld, *ld2;
1954	int error;
1955
1956	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1957	error = 0;
1958	switch (cm->cm_frame->dcmd.opcode) {
1959	case MFI_DCMD_LD_DELETE:
1960		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1961			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
1962				break;
1963		}
1964		if (ld == NULL)
1965			error = ENOENT;
1966		else
1967			error = mfi_disk_disable(ld);
1968		break;
1969	case MFI_DCMD_CFG_CLEAR:
1970		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1971			error = mfi_disk_disable(ld);
1972			if (error)
1973				break;
1974		}
1975		if (error) {
1976			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
1977				if (ld2 == ld)
1978					break;
1979				mfi_disk_enable(ld2);
1980			}
1981		}
1982		break;
1983	default:
1984		break;
1985	}
1986	return (error);
1987}
1988
1989/* Perform post-issue checks on commands from userland. */
1990static void
1991mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
1992{
1993	struct mfi_disk *ld, *ldn;
1994
1995	switch (cm->cm_frame->dcmd.opcode) {
1996	case MFI_DCMD_LD_DELETE:
1997		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1998			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
1999				break;
2000		}
2001		KASSERT(ld != NULL, ("volume dissappeared"));
2002		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2003			mtx_unlock(&sc->mfi_io_lock);
2004			mtx_lock(&Giant);
2005			device_delete_child(sc->mfi_dev, ld->ld_dev);
2006			mtx_unlock(&Giant);
2007			mtx_lock(&sc->mfi_io_lock);
2008		} else
2009			mfi_disk_enable(ld);
2010		break;
2011	case MFI_DCMD_CFG_CLEAR:
2012		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2013			mtx_unlock(&sc->mfi_io_lock);
2014			mtx_lock(&Giant);
2015			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2016				device_delete_child(sc->mfi_dev, ld->ld_dev);
2017			}
2018			mtx_unlock(&Giant);
2019			mtx_lock(&sc->mfi_io_lock);
2020		} else {
2021			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2022				mfi_disk_enable(ld);
2023		}
2024		break;
2025	case MFI_DCMD_CFG_ADD:
2026		mfi_ldprobe(sc);
2027		break;
2028	}
2029}
2030
2031static int
2032mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2033{
2034	struct mfi_command *cm;
2035	struct mfi_dcmd_frame *dcmd;
2036	void *ioc_buf = NULL;
2037	uint32_t context;
2038	int error = 0, locked;
2039
2040
2041	if (ioc->buf_size > 0) {
2042		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2043		if (ioc_buf == NULL) {
2044			return (ENOMEM);
2045		}
2046		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2047		if (error) {
2048			device_printf(sc->mfi_dev, "failed to copyin\n");
2049			free(ioc_buf, M_MFIBUF);
2050			return (error);
2051		}
2052	}
2053
2054	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2055
2056	mtx_lock(&sc->mfi_io_lock);
2057	while ((cm = mfi_dequeue_free(sc)) == NULL)
2058		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2059
2060	/* Save context for later */
2061	context = cm->cm_frame->header.context;
2062
2063	dcmd = &cm->cm_frame->dcmd;
2064	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2065
2066	cm->cm_sg = &dcmd->sgl;
2067	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2068	cm->cm_data = ioc_buf;
2069	cm->cm_len = ioc->buf_size;
2070
2071	/* restore context */
2072	cm->cm_frame->header.context = context;
2073
2074	/* Cheat since we don't know if we're writing or reading */
2075	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2076
2077	error = mfi_check_command_pre(sc, cm);
2078	if (error)
2079		goto out;
2080
2081	error = mfi_wait_command(sc, cm);
2082	if (error) {
2083		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2084		goto out;
2085	}
2086	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2087	mfi_check_command_post(sc, cm);
2088out:
2089	mfi_release_command(cm);
2090	mtx_unlock(&sc->mfi_io_lock);
2091	mfi_config_unlock(sc, locked);
2092	if (ioc->buf_size > 0)
2093		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2094	if (ioc_buf)
2095		free(ioc_buf, M_MFIBUF);
2096	return (error);
2097}
2098
2099#ifdef __amd64__
2100#define	PTRIN(p)		((void *)(uintptr_t)(p))
2101#else
2102#define	PTRIN(p)		(p)
2103#endif
2104
2105static int
2106mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
2107{
2108	struct mfi_softc *sc;
2109	union mfi_statrequest *ms;
2110	struct mfi_ioc_packet *ioc;
2111	struct mfi_ioc_aen *aen;
2112	struct mfi_command *cm = NULL;
2113	uint32_t context;
2114	uint8_t *sense_ptr;
2115	uint8_t *data = NULL, *temp;
2116	int i;
2117	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2118#ifdef __amd64__
2119	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
2120	struct mfi_ioc_passthru iop_swab;
2121#endif
2122	int error, locked;
2123
2124	sc = dev->si_drv1;
2125	error = 0;
2126
2127	switch (cmd) {
2128	case MFIIO_STATS:
2129		ms = (union mfi_statrequest *)arg;
2130		switch (ms->ms_item) {
2131		case MFIQ_FREE:
2132		case MFIQ_BIO:
2133		case MFIQ_READY:
2134		case MFIQ_BUSY:
2135			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2136			    sizeof(struct mfi_qstat));
2137			break;
2138		default:
2139			error = ENOIOCTL;
2140			break;
2141		}
2142		break;
2143	case MFIIO_QUERY_DISK:
2144	{
2145		struct mfi_query_disk *qd;
2146		struct mfi_disk *ld;
2147
2148		qd = (struct mfi_query_disk *)arg;
2149		mtx_lock(&sc->mfi_io_lock);
2150		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2151			if (ld->ld_id == qd->array_id)
2152				break;
2153		}
2154		if (ld == NULL) {
2155			qd->present = 0;
2156			mtx_unlock(&sc->mfi_io_lock);
2157			return (0);
2158		}
2159		qd->present = 1;
2160		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2161			qd->open = 1;
2162		bzero(qd->devname, SPECNAMELEN + 1);
2163		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2164		mtx_unlock(&sc->mfi_io_lock);
2165		break;
2166	}
2167	case MFI_CMD:
2168		{
2169		devclass_t devclass;
2170		ioc = (struct mfi_ioc_packet *)arg;
2171		int adapter;
2172
2173		adapter = ioc->mfi_adapter_no;
2174		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2175			devclass = devclass_find("mfi");
2176			sc = devclass_get_softc(devclass, adapter);
2177		}
2178		mtx_lock(&sc->mfi_io_lock);
2179		if ((cm = mfi_dequeue_free(sc)) == NULL) {
2180			mtx_unlock(&sc->mfi_io_lock);
2181			return (EBUSY);
2182		}
2183		mtx_unlock(&sc->mfi_io_lock);
2184		locked = 0;
2185
2186		/*
2187		 * save off original context since copying from user
2188		 * will clobber some data
2189		 */
2190		context = cm->cm_frame->header.context;
2191
2192		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2193		      2 * MFI_DCMD_FRAME_SIZE);  /* this isn't quite right */
2194		cm->cm_total_frame_size = (sizeof(union mfi_sgl) * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2195		if (ioc->mfi_sge_count) {
2196			cm->cm_sg =
2197			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
2198		}
2199		cm->cm_flags = 0;
2200		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2201			cm->cm_flags |= MFI_CMD_DATAIN;
2202		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2203			cm->cm_flags |= MFI_CMD_DATAOUT;
2204		/* Legacy app shim */
2205		if (cm->cm_flags == 0)
2206			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2207		cm->cm_len = cm->cm_frame->header.data_len;
2208		if (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT)) {
2209			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
2210			    M_WAITOK | M_ZERO);
2211			if (cm->cm_data == NULL) {
2212				device_printf(sc->mfi_dev, "Malloc failed\n");
2213				goto out;
2214			}
2215		} else {
2216			cm->cm_data = 0;
2217		}
2218
2219		/* restore header context */
2220		cm->cm_frame->header.context = context;
2221
2222		temp = data;
2223		if (cm->cm_flags & MFI_CMD_DATAOUT) {
2224			for (i = 0; i < ioc->mfi_sge_count; i++) {
2225				error = copyin(ioc->mfi_sgl[i].iov_base,
2226				       temp,
2227				       ioc->mfi_sgl[i].iov_len);
2228				if (error != 0) {
2229					device_printf(sc->mfi_dev,
2230					    "Copy in failed\n");
2231					goto out;
2232				}
2233				temp = &temp[ioc->mfi_sgl[i].iov_len];
2234			}
2235		}
2236
2237		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2238			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2239
2240		mtx_lock(&sc->mfi_io_lock);
2241		error = mfi_check_command_pre(sc, cm);
2242		if (error) {
2243			mtx_unlock(&sc->mfi_io_lock);
2244			goto out;
2245		}
2246
2247		if ((error = mfi_wait_command(sc, cm)) != 0) {
2248			device_printf(sc->mfi_dev,
2249			    "Controller polled failed\n");
2250			mtx_unlock(&sc->mfi_io_lock);
2251			goto out;
2252		}
2253
2254		mfi_check_command_post(sc, cm);
2255		mtx_unlock(&sc->mfi_io_lock);
2256
2257		temp = data;
2258		if (cm->cm_flags & MFI_CMD_DATAIN) {
2259			for (i = 0; i < ioc->mfi_sge_count; i++) {
2260				error = copyout(temp,
2261					ioc->mfi_sgl[i].iov_base,
2262					ioc->mfi_sgl[i].iov_len);
2263				if (error != 0) {
2264					device_printf(sc->mfi_dev,
2265					    "Copy out failed\n");
2266					goto out;
2267				}
2268				temp = &temp[ioc->mfi_sgl[i].iov_len];
2269			}
2270		}
2271
2272		if (ioc->mfi_sense_len) {
2273			/* copy out sense */
2274			sense_ptr = &((struct mfi_ioc_packet*)arg)
2275			    ->mfi_frame.raw[0];
2276			error = copyout(cm->cm_sense, sense_ptr,
2277			    ioc->mfi_sense_len);
2278			if (error != 0) {
2279				device_printf(sc->mfi_dev,
2280				    "Copy out failed\n");
2281				goto out;
2282			}
2283		}
2284
2285		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
2286out:
2287		mfi_config_unlock(sc, locked);
2288		if (data)
2289			free(data, M_MFIBUF);
2290		if (cm) {
2291			mtx_lock(&sc->mfi_io_lock);
2292			mfi_release_command(cm);
2293			mtx_unlock(&sc->mfi_io_lock);
2294		}
2295
2296		break;
2297		}
2298	case MFI_SET_AEN:
2299		aen = (struct mfi_ioc_aen *)arg;
2300		error = mfi_aen_register(sc, aen->aen_seq_num,
2301		    aen->aen_class_locale);
2302
2303		break;
2304	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2305		{
2306			devclass_t devclass;
2307			struct mfi_linux_ioc_packet l_ioc;
2308			int adapter;
2309
2310			devclass = devclass_find("mfi");
2311			if (devclass == NULL)
2312				return (ENOENT);
2313
2314			error = copyin(arg, &l_ioc, sizeof(l_ioc));
2315			if (error)
2316				return (error);
2317			adapter = l_ioc.lioc_adapter_no;
2318			sc = devclass_get_softc(devclass, adapter);
2319			if (sc == NULL)
2320				return (ENOENT);
2321			return (mfi_linux_ioctl_int(sc->mfi_cdev,
2322			    cmd, arg, flag, td));
2323			break;
2324		}
2325	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
2326		{
2327			devclass_t devclass;
2328			struct mfi_linux_ioc_aen l_aen;
2329			int adapter;
2330
2331			devclass = devclass_find("mfi");
2332			if (devclass == NULL)
2333				return (ENOENT);
2334
2335			error = copyin(arg, &l_aen, sizeof(l_aen));
2336			if (error)
2337				return (error);
2338			adapter = l_aen.laen_adapter_no;
2339			sc = devclass_get_softc(devclass, adapter);
2340			if (sc == NULL)
2341				return (ENOENT);
2342			return (mfi_linux_ioctl_int(sc->mfi_cdev,
2343			    cmd, arg, flag, td));
2344			break;
2345		}
2346#ifdef __amd64__
2347	case MFIIO_PASSTHRU32:
2348		iop_swab.ioc_frame	= iop32->ioc_frame;
2349		iop_swab.buf_size	= iop32->buf_size;
2350		iop_swab.buf		= PTRIN(iop32->buf);
2351		iop			= &iop_swab;
2352		/* FALLTHROUGH */
2353#endif
2354	case MFIIO_PASSTHRU:
2355		error = mfi_user_command(sc, iop);
2356#ifdef __amd64__
2357		if (cmd == MFIIO_PASSTHRU32)
2358			iop32->ioc_frame = iop_swab.ioc_frame;
2359#endif
2360		break;
2361	default:
2362		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
2363		error = ENOENT;
2364		break;
2365	}
2366
2367	return (error);
2368}
2369
2370static int
2371mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
2372{
2373	struct mfi_softc *sc;
2374	struct mfi_linux_ioc_packet l_ioc;
2375	struct mfi_linux_ioc_aen l_aen;
2376	struct mfi_command *cm = NULL;
2377	struct mfi_aen *mfi_aen_entry;
2378	uint8_t *sense_ptr;
2379	uint32_t context;
2380	uint8_t *data = NULL, *temp;
2381	int i;
2382	int error, locked;
2383
2384	sc = dev->si_drv1;
2385	error = 0;
2386	switch (cmd) {
2387	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
2388		error = copyin(arg, &l_ioc, sizeof(l_ioc));
2389		if (error != 0)
2390			return (error);
2391
2392		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
2393			return (EINVAL);
2394		}
2395
2396		mtx_lock(&sc->mfi_io_lock);
2397		if ((cm = mfi_dequeue_free(sc)) == NULL) {
2398			mtx_unlock(&sc->mfi_io_lock);
2399			return (EBUSY);
2400		}
2401		mtx_unlock(&sc->mfi_io_lock);
2402		locked = 0;
2403
2404		/*
2405		 * save off original context since copying from user
2406		 * will clobber some data
2407		 */
2408		context = cm->cm_frame->header.context;
2409
2410		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
2411		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
2412		cm->cm_total_frame_size = (sizeof(union mfi_sgl) * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
2413		if (l_ioc.lioc_sge_count)
2414			cm->cm_sg =
2415			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
2416		cm->cm_flags = 0;
2417		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
2418			cm->cm_flags |= MFI_CMD_DATAIN;
2419		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
2420			cm->cm_flags |= MFI_CMD_DATAOUT;
2421		cm->cm_len = cm->cm_frame->header.data_len;
2422		if (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT)) {
2423			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
2424			    M_WAITOK | M_ZERO);
2425			if (cm->cm_data == NULL) {
2426				device_printf(sc->mfi_dev, "Malloc failed\n");
2427				goto out;
2428			}
2429		} else {
2430			cm->cm_data = 0;
2431		}
2432
2433		/* restore header context */
2434		cm->cm_frame->header.context = context;
2435
2436		temp = data;
2437		if (cm->cm_flags & MFI_CMD_DATAOUT) {
2438			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2439				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
2440				       temp,
2441				       l_ioc.lioc_sgl[i].iov_len);
2442				if (error != 0) {
2443					device_printf(sc->mfi_dev,
2444					    "Copy in failed\n");
2445					goto out;
2446				}
2447				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2448			}
2449		}
2450
2451		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
2452			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
2453
2454		mtx_lock(&sc->mfi_io_lock);
2455		error = mfi_check_command_pre(sc, cm);
2456		if (error) {
2457			mtx_unlock(&sc->mfi_io_lock);
2458			goto out;
2459		}
2460
2461		if ((error = mfi_wait_command(sc, cm)) != 0) {
2462			device_printf(sc->mfi_dev,
2463			    "Controller polled failed\n");
2464			mtx_unlock(&sc->mfi_io_lock);
2465			goto out;
2466		}
2467
2468		mfi_check_command_post(sc, cm);
2469		mtx_unlock(&sc->mfi_io_lock);
2470
2471		temp = data;
2472		if (cm->cm_flags & MFI_CMD_DATAIN) {
2473			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
2474				error = copyout(temp,
2475					PTRIN(l_ioc.lioc_sgl[i].iov_base),
2476					l_ioc.lioc_sgl[i].iov_len);
2477				if (error != 0) {
2478					device_printf(sc->mfi_dev,
2479					    "Copy out failed\n");
2480					goto out;
2481				}
2482				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
2483			}
2484		}
2485
2486		if (l_ioc.lioc_sense_len) {
2487			/* copy out sense */
2488			sense_ptr = &((struct mfi_linux_ioc_packet*)arg)
2489			    ->lioc_frame.raw[0];
2490			error = copyout(cm->cm_sense, sense_ptr,
2491			    l_ioc.lioc_sense_len);
2492			if (error != 0) {
2493				device_printf(sc->mfi_dev,
2494				    "Copy out failed\n");
2495				goto out;
2496			}
2497		}
2498
2499		error = copyout(&cm->cm_frame->header.cmd_status,
2500			&((struct mfi_linux_ioc_packet*)arg)
2501			->lioc_frame.hdr.cmd_status,
2502			1);
2503		if (error != 0) {
2504			device_printf(sc->mfi_dev,
2505				      "Copy out failed\n");
2506			goto out;
2507		}
2508
2509out:
2510		mfi_config_unlock(sc, locked);
2511		if (data)
2512			free(data, M_MFIBUF);
2513		if (cm) {
2514			mtx_lock(&sc->mfi_io_lock);
2515			mfi_release_command(cm);
2516			mtx_unlock(&sc->mfi_io_lock);
2517		}
2518
2519		return (error);
2520	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
2521		error = copyin(arg, &l_aen, sizeof(l_aen));
2522		if (error != 0)
2523			return (error);
2524		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
2525		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
2526		    M_WAITOK);
2527		mtx_lock(&sc->mfi_io_lock);
2528		if (mfi_aen_entry != NULL) {
2529			mfi_aen_entry->p = curproc;
2530			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
2531			    aen_link);
2532		}
2533		error = mfi_aen_register(sc, l_aen.laen_seq_num,
2534		    l_aen.laen_class_locale);
2535
2536		if (error != 0) {
2537			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2538			    aen_link);
2539			free(mfi_aen_entry, M_MFIBUF);
2540		}
2541		mtx_unlock(&sc->mfi_io_lock);
2542
2543		return (error);
2544	default:
2545		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
2546		error = ENOENT;
2547		break;
2548	}
2549
2550	return (error);
2551}
2552
2553static int
2554mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
2555{
2556	struct mfi_softc *sc;
2557	int revents = 0;
2558
2559	sc = dev->si_drv1;
2560
2561	if (poll_events & (POLLIN | POLLRDNORM)) {
2562		if (sc->mfi_aen_triggered != 0) {
2563			revents |= poll_events & (POLLIN | POLLRDNORM);
2564			sc->mfi_aen_triggered = 0;
2565		}
2566		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
2567			revents |= POLLERR;
2568		}
2569	}
2570
2571	if (revents == 0) {
2572		if (poll_events & (POLLIN | POLLRDNORM)) {
2573			sc->mfi_poll_waiting = 1;
2574			selrecord(td, &sc->mfi_select);
2575		}
2576	}
2577
2578	return revents;
2579}
2580
2581
2582static void
2583mfi_dump_all(void)
2584{
2585	struct mfi_softc *sc;
2586	struct mfi_command *cm;
2587	devclass_t dc;
2588	time_t deadline;
2589	int timedout;
2590	int i;
2591
2592	dc = devclass_find("mfi");
2593	if (dc == NULL) {
2594		printf("No mfi dev class\n");
2595		return;
2596	}
2597
2598	for (i = 0; ; i++) {
2599		sc = devclass_get_softc(dc, i);
2600		if (sc == NULL)
2601			break;
2602		device_printf(sc->mfi_dev, "Dumping\n\n");
2603		timedout = 0;
2604		deadline = time_uptime - MFI_CMD_TIMEOUT;
2605		mtx_lock(&sc->mfi_io_lock);
2606		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
2607			if (cm->cm_timestamp < deadline) {
2608				device_printf(sc->mfi_dev,
2609				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
2610				    (int)(time_uptime - cm->cm_timestamp));
2611				MFI_PRINT_CMD(cm);
2612				timedout++;
2613			}
2614		}
2615
2616#if 0
2617		if (timedout)
2618			MFI_DUMP_CMDS(SC);
2619#endif
2620
2621		mtx_unlock(&sc->mfi_io_lock);
2622	}
2623
2624	return;
2625}
2626
2627static void
2628mfi_timeout(void *data)
2629{
2630	struct mfi_softc *sc = (struct mfi_softc *)data;
2631	struct mfi_command *cm;
2632	time_t deadline;
2633	int timedout = 0;
2634
2635	deadline = time_uptime - MFI_CMD_TIMEOUT;
2636	mtx_lock(&sc->mfi_io_lock);
2637	TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
2638		if (sc->mfi_aen_cm == cm)
2639			continue;
2640		if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
2641			device_printf(sc->mfi_dev,
2642			    "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm,
2643			    (int)(time_uptime - cm->cm_timestamp));
2644			MFI_PRINT_CMD(cm);
2645			MFI_VALIDATE_CMD(sc, cm);
2646			timedout++;
2647		}
2648	}
2649
2650#if 0
2651	if (timedout)
2652		MFI_DUMP_CMDS(SC);
2653#endif
2654
2655	mtx_unlock(&sc->mfi_io_lock);
2656
2657	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
2658	    mfi_timeout, sc);
2659
2660	if (0)
2661		mfi_dump_all();
2662	return;
2663}
2664