mfi.c revision 233711
1/*-
2 * Copyright (c) 2006 IronPort Systems
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26/*-
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 *    notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 *    notice, this list of conditions and the following disclaimer in the
38 *    documentation and/or other materials provided with the distribution.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 */
52
53#include <sys/cdefs.h>
54__FBSDID("$FreeBSD: head/sys/dev/mfi/mfi.c 233711 2012-03-30 23:05:48Z ambrisko $");
55
56#include "opt_compat.h"
57#include "opt_mfi.h"
58
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/sysctl.h>
62#include <sys/malloc.h>
63#include <sys/kernel.h>
64#include <sys/poll.h>
65#include <sys/selinfo.h>
66#include <sys/bus.h>
67#include <sys/conf.h>
68#include <sys/eventhandler.h>
69#include <sys/rman.h>
70#include <sys/bus_dma.h>
71#include <sys/bio.h>
72#include <sys/ioccom.h>
73#include <sys/uio.h>
74#include <sys/proc.h>
75#include <sys/signalvar.h>
76#include <sys/taskqueue.h>
77
78#include <machine/bus.h>
79#include <machine/resource.h>
80
81#include <dev/mfi/mfireg.h>
82#include <dev/mfi/mfi_ioctl.h>
83#include <dev/mfi/mfivar.h>
84#include <sys/interrupt.h>
85#include <sys/priority.h>
86
87static int	mfi_alloc_commands(struct mfi_softc *);
88static int	mfi_comms_init(struct mfi_softc *);
89static int	mfi_get_controller_info(struct mfi_softc *);
90static int	mfi_get_log_state(struct mfi_softc *,
91		    struct mfi_evt_log_state **);
92static int	mfi_parse_entries(struct mfi_softc *, int, int);
93static int	mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
94		    uint32_t, void **, size_t);
95static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
96static void	mfi_startup(void *arg);
97static void	mfi_intr(void *arg);
98static void	mfi_ldprobe(struct mfi_softc *sc);
99static void	mfi_syspdprobe(struct mfi_softc *sc);
100static void	mfi_handle_evt(void *context, int pending);
101static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
102static void	mfi_aen_complete(struct mfi_command *);
103static int	mfi_add_ld(struct mfi_softc *sc, int);
104static void	mfi_add_ld_complete(struct mfi_command *);
105static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
106static void	mfi_add_sys_pd_complete(struct mfi_command *);
107static struct mfi_command * mfi_bio_command(struct mfi_softc *);
108static void	mfi_bio_complete(struct mfi_command *);
109static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
110static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
111static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
112static int	mfi_abort(struct mfi_softc *, struct mfi_command *);
113static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114static void	mfi_timeout(void *);
115static int	mfi_user_command(struct mfi_softc *,
116		    struct mfi_ioc_passthru *);
117static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
118static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
119static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
120static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
121static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
124		    uint32_t frame_cnt);
125static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
126		    uint32_t frame_cnt);
127static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
132
133SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
135TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
137            0, "event message locale");
138
139static int	mfi_event_class = MFI_EVT_CLASS_INFO;
140TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
142          0, "event message class");
143
144static int	mfi_max_cmds = 128;
145TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
147	   0, "Max commands");
148
149static int	mfi_detect_jbod_change = 1;
150TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
152	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
153
154/* Management interface */
155static d_open_t		mfi_open;
156static d_close_t	mfi_close;
157static d_ioctl_t	mfi_ioctl;
158static d_poll_t		mfi_poll;
159
160static struct cdevsw mfi_cdevsw = {
161	.d_version = 	D_VERSION,
162	.d_flags =	0,
163	.d_open = 	mfi_open,
164	.d_close =	mfi_close,
165	.d_ioctl =	mfi_ioctl,
166	.d_poll =	mfi_poll,
167	.d_name =	"mfi",
168};
169
170MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
171
172#define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
173struct mfi_skinny_dma_info mfi_skinny;
174
175static void
176mfi_enable_intr_xscale(struct mfi_softc *sc)
177{
178	MFI_WRITE4(sc, MFI_OMSK, 0x01);
179}
180
181static void
182mfi_enable_intr_ppc(struct mfi_softc *sc)
183{
184	if (sc->mfi_flags & MFI_FLAGS_1078) {
185		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
186		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
187	}
188	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
189		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
190		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
191	}
192	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
193		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
194	}
195}
196
197static int32_t
198mfi_read_fw_status_xscale(struct mfi_softc *sc)
199{
200	return MFI_READ4(sc, MFI_OMSG0);
201}
202
203static int32_t
204mfi_read_fw_status_ppc(struct mfi_softc *sc)
205{
206	return MFI_READ4(sc, MFI_OSP0);
207}
208
209static int
210mfi_check_clear_intr_xscale(struct mfi_softc *sc)
211{
212	int32_t status;
213
214	status = MFI_READ4(sc, MFI_OSTS);
215	if ((status & MFI_OSTS_INTR_VALID) == 0)
216		return 1;
217
218	MFI_WRITE4(sc, MFI_OSTS, status);
219	return 0;
220}
221
222static int
223mfi_check_clear_intr_ppc(struct mfi_softc *sc)
224{
225	int32_t status;
226
227	status = MFI_READ4(sc, MFI_OSTS);
228	if (sc->mfi_flags & MFI_FLAGS_1078) {
229		if (!(status & MFI_1078_RM)) {
230			return 1;
231		}
232	}
233	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
234		if (!(status & MFI_GEN2_RM)) {
235			return 1;
236		}
237	}
238	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
239		if (!(status & MFI_SKINNY_RM)) {
240			return 1;
241		}
242	}
243	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
244		MFI_WRITE4(sc, MFI_OSTS, status);
245	else
246		MFI_WRITE4(sc, MFI_ODCR0, status);
247	return 0;
248}
249
250static void
251mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
252{
253	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
254}
255
256static void
257mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
258{
259	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
260	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
261	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
262	} else {
263	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
264	}
265}
266
267int
268mfi_transition_firmware(struct mfi_softc *sc)
269{
270	uint32_t fw_state, cur_state;
271	int max_wait, i;
272	uint32_t cur_abs_reg_val = 0;
273	uint32_t prev_abs_reg_val = 0;
274
275	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
276	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
277	while (fw_state != MFI_FWSTATE_READY) {
278		if (bootverbose)
279			device_printf(sc->mfi_dev, "Waiting for firmware to "
280			"become ready\n");
281		cur_state = fw_state;
282		switch (fw_state) {
283		case MFI_FWSTATE_FAULT:
284			device_printf(sc->mfi_dev, "Firmware fault\n");
285			return (ENXIO);
286		case MFI_FWSTATE_WAIT_HANDSHAKE:
287			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
288			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
289			else
290			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
291			max_wait = MFI_RESET_WAIT_TIME;
292			break;
293		case MFI_FWSTATE_OPERATIONAL:
294			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
295			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
296			else
297			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
298			max_wait = MFI_RESET_WAIT_TIME;
299			break;
300		case MFI_FWSTATE_UNDEFINED:
301		case MFI_FWSTATE_BB_INIT:
302			max_wait = MFI_RESET_WAIT_TIME;
303			break;
304		case MFI_FWSTATE_FW_INIT_2:
305			max_wait = MFI_RESET_WAIT_TIME;
306			break;
307		case MFI_FWSTATE_FW_INIT:
308		case MFI_FWSTATE_FLUSH_CACHE:
309			max_wait = MFI_RESET_WAIT_TIME;
310			break;
311		case MFI_FWSTATE_DEVICE_SCAN:
312			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
313			prev_abs_reg_val = cur_abs_reg_val;
314			break;
315		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
316			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
317			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
318			else
319			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
320			max_wait = MFI_RESET_WAIT_TIME;
321			break;
322		default:
323			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
324			    fw_state);
325			return (ENXIO);
326		}
327		for (i = 0; i < (max_wait * 10); i++) {
328			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
329			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
330			if (fw_state == cur_state)
331				DELAY(100000);
332			else
333				break;
334		}
335		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
336			/* Check the device scanning progress */
337			if (prev_abs_reg_val != cur_abs_reg_val) {
338				continue;
339			}
340		}
341		if (fw_state == cur_state) {
342			device_printf(sc->mfi_dev, "Firmware stuck in state "
343			    "%#x\n", fw_state);
344			return (ENXIO);
345		}
346	}
347	return (0);
348}
349
350static void
351mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
352{
353	bus_addr_t *addr;
354
355	addr = arg;
356	*addr = segs[0].ds_addr;
357}
358
359
360int
361mfi_attach(struct mfi_softc *sc)
362{
363	uint32_t status;
364	int error, commsz, framessz, sensesz;
365	int frames, unit, max_fw_sge;
366	uint32_t tb_mem_size = 0;
367
368	if (sc == NULL)
369		return EINVAL;
370
371	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
372	    MEGASAS_VERSION);
373
374	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
375	sx_init(&sc->mfi_config_lock, "MFI config");
376	TAILQ_INIT(&sc->mfi_ld_tqh);
377	TAILQ_INIT(&sc->mfi_syspd_tqh);
378	TAILQ_INIT(&sc->mfi_evt_queue);
379	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
380	TAILQ_INIT(&sc->mfi_aen_pids);
381	TAILQ_INIT(&sc->mfi_cam_ccbq);
382
383	mfi_initq_free(sc);
384	mfi_initq_ready(sc);
385	mfi_initq_busy(sc);
386	mfi_initq_bio(sc);
387
388	sc->adpreset = 0;
389	sc->last_seq_num = 0;
390	sc->disableOnlineCtrlReset = 1;
391	sc->issuepend_done = 1;
392	sc->hw_crit_error = 0;
393
394	if (sc->mfi_flags & MFI_FLAGS_1064R) {
395		sc->mfi_enable_intr = mfi_enable_intr_xscale;
396		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
397		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
398		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
399	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
400		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
401		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
402		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
403		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
404		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
405		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
406		sc->mfi_tbolt = 1;
407		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
408	} else {
409		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
410		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
411		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
412		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
413	}
414
415
416	/* Before we get too far, see if the firmware is working */
417	if ((error = mfi_transition_firmware(sc)) != 0) {
418		device_printf(sc->mfi_dev, "Firmware not in READY state, "
419		    "error %d\n", error);
420		return (ENXIO);
421	}
422
423	/* Start: LSIP200113393 */
424	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
425				1, 0,			/* algnmnt, boundary */
426				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
427				BUS_SPACE_MAXADDR,	/* highaddr */
428				NULL, NULL,		/* filter, filterarg */
429				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
430				1,			/* msegments */
431				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
432				0,			/* flags */
433				NULL, NULL,		/* lockfunc, lockarg */
434				&sc->verbuf_h_dmat)) {
435		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
436		return (ENOMEM);
437	}
438	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
439	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
440		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
441		return (ENOMEM);
442	}
443	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
444	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
445	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
446	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
447	/* End: LSIP200113393 */
448
449	/*
450	 * Get information needed for sizing the contiguous memory for the
451	 * frame pool.  Size down the sgl parameter since we know that
452	 * we will never need more than what's required for MAXPHYS.
453	 * It would be nice if these constants were available at runtime
454	 * instead of compile time.
455	 */
456	status = sc->mfi_read_fw_status(sc);
457	sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
458	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
459	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
460
461	/* ThunderBolt Support get the contiguous memory */
462
463	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
464		mfi_tbolt_init_globals(sc);
465		device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
466		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
467		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
468
469		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
470				1, 0,			/* algnmnt, boundary */
471				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
472				BUS_SPACE_MAXADDR,	/* highaddr */
473				NULL, NULL,		/* filter, filterarg */
474				tb_mem_size,		/* maxsize */
475				1,			/* msegments */
476				tb_mem_size,		/* maxsegsize */
477				0,			/* flags */
478				NULL, NULL,		/* lockfunc, lockarg */
479				&sc->mfi_tb_dmat)) {
480			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
481			return (ENOMEM);
482		}
483		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
484		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
485			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
486			return (ENOMEM);
487		}
488		bzero(sc->request_message_pool, tb_mem_size);
489		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
490		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
491
492		/* For ThunderBolt memory init */
493		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
494				0x100, 0,		/* alignmnt, boundary */
495				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
496				BUS_SPACE_MAXADDR,	/* highaddr */
497				NULL, NULL,		/* filter, filterarg */
498				MFI_FRAME_SIZE,		/* maxsize */
499				1,			/* msegments */
500				MFI_FRAME_SIZE,		/* maxsegsize */
501				0,			/* flags */
502				NULL, NULL,		/* lockfunc, lockarg */
503				&sc->mfi_tb_init_dmat)) {
504		device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
505		return (ENOMEM);
506		}
507		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
508		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
509			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
510			return (ENOMEM);
511		}
512		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
513		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
514		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
515		    &sc->mfi_tb_init_busaddr, 0);
516		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
517		    tb_mem_size)) {
518			device_printf(sc->mfi_dev,
519			    "Thunderbolt pool preparation error\n");
520			return 0;
521		}
522
523		/*
524		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
525		  we are taking it diffrent from what we have allocated for Request
526		  and reply descriptors to avoid confusion later
527		*/
528		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
529		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
530				1, 0,			/* algnmnt, boundary */
531				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
532				BUS_SPACE_MAXADDR,	/* highaddr */
533				NULL, NULL,		/* filter, filterarg */
534				tb_mem_size,		/* maxsize */
535				1,			/* msegments */
536				tb_mem_size,		/* maxsegsize */
537				0,			/* flags */
538				NULL, NULL,		/* lockfunc, lockarg */
539				&sc->mfi_tb_ioc_init_dmat)) {
540			device_printf(sc->mfi_dev,
541			    "Cannot allocate comms DMA tag\n");
542			return (ENOMEM);
543		}
544		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
545		    (void **)&sc->mfi_tb_ioc_init_desc,
546		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
547			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
548			return (ENOMEM);
549		}
550		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
551		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
552		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
553		    &sc->mfi_tb_ioc_init_busaddr, 0);
554	}
555	/*
556	 * Create the dma tag for data buffers.  Used both for block I/O
557	 * and for various internal data queries.
558	 */
559	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
560				1, 0,			/* algnmnt, boundary */
561				BUS_SPACE_MAXADDR,	/* lowaddr */
562				BUS_SPACE_MAXADDR,	/* highaddr */
563				NULL, NULL,		/* filter, filterarg */
564				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
565				sc->mfi_max_sge,	/* nsegments */
566				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
567				BUS_DMA_ALLOCNOW,	/* flags */
568				busdma_lock_mutex,	/* lockfunc */
569				&sc->mfi_io_lock,	/* lockfuncarg */
570				&sc->mfi_buffer_dmat)) {
571		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
572		return (ENOMEM);
573	}
574
575	/*
576	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
577	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
578	 * entry, so the calculated size here will be will be 1 more than
579	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
580	 */
581	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
582	    sizeof(struct mfi_hwcomms);
583	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
584				1, 0,			/* algnmnt, boundary */
585				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
586				BUS_SPACE_MAXADDR,	/* highaddr */
587				NULL, NULL,		/* filter, filterarg */
588				commsz,			/* maxsize */
589				1,			/* msegments */
590				commsz,			/* maxsegsize */
591				0,			/* flags */
592				NULL, NULL,		/* lockfunc, lockarg */
593				&sc->mfi_comms_dmat)) {
594		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
595		return (ENOMEM);
596	}
597	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
598	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
599		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
600		return (ENOMEM);
601	}
602	bzero(sc->mfi_comms, commsz);
603	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
604	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
605	/*
606	 * Allocate DMA memory for the command frames.  Keep them in the
607	 * lower 4GB for efficiency.  Calculate the size of the commands at
608	 * the same time; each command is one 64 byte frame plus a set of
609         * additional frames for holding sg lists or other data.
610	 * The assumption here is that the SG list will start at the second
611	 * frame and not use the unused bytes in the first frame.  While this
612	 * isn't technically correct, it simplifies the calculation and allows
613	 * for command frames that might be larger than an mfi_io_frame.
614	 */
615	if (sizeof(bus_addr_t) == 8) {
616		sc->mfi_sge_size = sizeof(struct mfi_sg64);
617		sc->mfi_flags |= MFI_FLAGS_SG64;
618	} else {
619		sc->mfi_sge_size = sizeof(struct mfi_sg32);
620	}
621	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
622		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
623	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
624	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
625	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
626	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
627				64, 0,			/* algnmnt, boundary */
628				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
629				BUS_SPACE_MAXADDR,	/* highaddr */
630				NULL, NULL,		/* filter, filterarg */
631				framessz,		/* maxsize */
632				1,			/* nsegments */
633				framessz,		/* maxsegsize */
634				0,			/* flags */
635				NULL, NULL,		/* lockfunc, lockarg */
636				&sc->mfi_frames_dmat)) {
637		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
638		return (ENOMEM);
639	}
640	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
641	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
642		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
643		return (ENOMEM);
644	}
645	bzero(sc->mfi_frames, framessz);
646	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
647	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
648	/*
649	 * Allocate DMA memory for the frame sense data.  Keep them in the
650	 * lower 4GB for efficiency
651	 */
652	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
653	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
654				4, 0,			/* algnmnt, boundary */
655				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
656				BUS_SPACE_MAXADDR,	/* highaddr */
657				NULL, NULL,		/* filter, filterarg */
658				sensesz,		/* maxsize */
659				1,			/* nsegments */
660				sensesz,		/* maxsegsize */
661				0,			/* flags */
662				NULL, NULL,		/* lockfunc, lockarg */
663				&sc->mfi_sense_dmat)) {
664		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
665		return (ENOMEM);
666	}
667	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
668	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
669		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
670		return (ENOMEM);
671	}
672	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
673	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
674	if ((error = mfi_alloc_commands(sc)) != 0)
675		return (error);
676
677	/* Before moving the FW to operational state, check whether
678	 * hostmemory is required by the FW or not
679	 */
680
681	/* ThunderBolt MFI_IOC2 INIT */
682	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
683		sc->mfi_disable_intr(sc);
684		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
685			device_printf(sc->mfi_dev,
686			    "TB Init has failed with error %d\n",error);
687			return error;
688		}
689
690		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
691			return error;
692		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
693		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
694		    &sc->mfi_intr)) {
695			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
696			return (EINVAL);
697		}
698		sc->mfi_enable_intr(sc);
699		sc->map_id = 0;
700	} else {
701		if ((error = mfi_comms_init(sc)) != 0)
702			return (error);
703
704		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
705		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
706			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
707			return (EINVAL);
708		}
709		sc->mfi_enable_intr(sc);
710	}
711	if ((error = mfi_get_controller_info(sc)) != 0)
712		return (error);
713	sc->disableOnlineCtrlReset = 0;
714
715	/* Register a config hook to probe the bus for arrays */
716	sc->mfi_ich.ich_func = mfi_startup;
717	sc->mfi_ich.ich_arg = sc;
718	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
719		device_printf(sc->mfi_dev, "Cannot establish configuration "
720		    "hook\n");
721		return (EINVAL);
722	}
723	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
724		mtx_unlock(&sc->mfi_io_lock);
725		return (error);
726	}
727
728	/*
729	 * Register a shutdown handler.
730	 */
731	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
732	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
733		device_printf(sc->mfi_dev, "Warning: shutdown event "
734		    "registration failed\n");
735	}
736
737	/*
738	 * Create the control device for doing management
739	 */
740	unit = device_get_unit(sc->mfi_dev);
741	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
742	    0640, "mfi%d", unit);
743	if (unit == 0)
744		make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
745	if (sc->mfi_cdev != NULL)
746		sc->mfi_cdev->si_drv1 = sc;
747	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
748	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
749	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
750	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
751	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
752	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
753	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
754	    &sc->mfi_keep_deleted_volumes, 0,
755	    "Don't detach the mfid device for a busy volume that is deleted");
756
757	device_add_child(sc->mfi_dev, "mfip", -1);
758	bus_generic_attach(sc->mfi_dev);
759
760	/* Start the timeout watchdog */
761	callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
762	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
763	    mfi_timeout, sc);
764
765	return (0);
766}
767
768static int
769mfi_alloc_commands(struct mfi_softc *sc)
770{
771	struct mfi_command *cm;
772	int i, ncmds;
773
774	/*
775	 * XXX Should we allocate all the commands up front, or allocate on
776	 * demand later like 'aac' does?
777	 */
778	ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
779	if (bootverbose)
780		device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
781		   "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
782
783	sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
784	    M_WAITOK | M_ZERO);
785
786	for (i = 0; i < ncmds; i++) {
787		cm = &sc->mfi_commands[i];
788		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
789		    sc->mfi_cmd_size * i);
790		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
791		    sc->mfi_cmd_size * i;
792		cm->cm_frame->header.context = i;
793		cm->cm_sense = &sc->mfi_sense[i];
794		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
795		cm->cm_sc = sc;
796		cm->cm_index = i;
797		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
798		    &cm->cm_dmamap) == 0) {
799			mtx_lock(&sc->mfi_io_lock);
800			mfi_release_command(cm);
801			mtx_unlock(&sc->mfi_io_lock);
802		}
803		else
804			break;
805		sc->mfi_total_cmds++;
806	}
807
808	return (0);
809}
810
811void
812mfi_release_command(struct mfi_command *cm)
813{
814	struct mfi_frame_header *hdr;
815	uint32_t *hdr_data;
816
817	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
818
819	/*
820	 * Zero out the important fields of the frame, but make sure the
821	 * context field is preserved.  For efficiency, handle the fields
822	 * as 32 bit words.  Clear out the first S/G entry too for safety.
823	 */
824	hdr = &cm->cm_frame->header;
825	if (cm->cm_data != NULL && hdr->sg_count) {
826		cm->cm_sg->sg32[0].len = 0;
827		cm->cm_sg->sg32[0].addr = 0;
828	}
829
830	hdr_data = (uint32_t *)cm->cm_frame;
831	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
832	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
833	hdr_data[4] = 0;	/* flags, timeout */
834	hdr_data[5] = 0;	/* data_len */
835
836	cm->cm_extra_frames = 0;
837	cm->cm_flags = 0;
838	cm->cm_complete = NULL;
839	cm->cm_private = NULL;
840	cm->cm_data = NULL;
841	cm->cm_sg = 0;
842	cm->cm_total_frame_size = 0;
843	cm->retry_for_fw_reset = 0;
844
845	mfi_enqueue_free(cm);
846}
847
848static int
849mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
850    uint32_t opcode, void **bufp, size_t bufsize)
851{
852	struct mfi_command *cm;
853	struct mfi_dcmd_frame *dcmd;
854	void *buf = NULL;
855	uint32_t context = 0;
856
857	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
858
859	cm = mfi_dequeue_free(sc);
860	if (cm == NULL)
861		return (EBUSY);
862
863	/* Zero out the MFI frame */
864	context = cm->cm_frame->header.context;
865	bzero(cm->cm_frame, sizeof(union mfi_frame));
866	cm->cm_frame->header.context = context;
867
868	if ((bufsize > 0) && (bufp != NULL)) {
869		if (*bufp == NULL) {
870			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
871			if (buf == NULL) {
872				mfi_release_command(cm);
873				return (ENOMEM);
874			}
875			*bufp = buf;
876		} else {
877			buf = *bufp;
878		}
879	}
880
881	dcmd =  &cm->cm_frame->dcmd;
882	bzero(dcmd->mbox, MFI_MBOX_SIZE);
883	dcmd->header.cmd = MFI_CMD_DCMD;
884	dcmd->header.timeout = 0;
885	dcmd->header.flags = 0;
886	dcmd->header.data_len = bufsize;
887	dcmd->header.scsi_status = 0;
888	dcmd->opcode = opcode;
889	cm->cm_sg = &dcmd->sgl;
890	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
891	cm->cm_flags = 0;
892	cm->cm_data = buf;
893	cm->cm_private = buf;
894	cm->cm_len = bufsize;
895
896	*cmp = cm;
897	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
898		*bufp = buf;
899	return (0);
900}
901
902static int
903mfi_comms_init(struct mfi_softc *sc)
904{
905	struct mfi_command *cm;
906	struct mfi_init_frame *init;
907	struct mfi_init_qinfo *qinfo;
908	int error;
909	uint32_t context = 0;
910
911	mtx_lock(&sc->mfi_io_lock);
912	if ((cm = mfi_dequeue_free(sc)) == NULL)
913		return (EBUSY);
914
915	/* Zero out the MFI frame */
916	context = cm->cm_frame->header.context;
917	bzero(cm->cm_frame, sizeof(union mfi_frame));
918	cm->cm_frame->header.context = context;
919
920	/*
921	 * Abuse the SG list area of the frame to hold the init_qinfo
922	 * object;
923	 */
924	init = &cm->cm_frame->init;
925	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
926
927	bzero(qinfo, sizeof(struct mfi_init_qinfo));
928	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
929	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
930	    offsetof(struct mfi_hwcomms, hw_reply_q);
931	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
932	    offsetof(struct mfi_hwcomms, hw_pi);
933	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
934	    offsetof(struct mfi_hwcomms, hw_ci);
935
936	init->header.cmd = MFI_CMD_INIT;
937	init->header.data_len = sizeof(struct mfi_init_qinfo);
938	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
939	cm->cm_data = NULL;
940	cm->cm_flags = MFI_CMD_POLLED;
941
942	if ((error = mfi_mapcmd(sc, cm)) != 0) {
943		device_printf(sc->mfi_dev, "failed to send init command\n");
944		mtx_unlock(&sc->mfi_io_lock);
945		return (error);
946	}
947	mfi_release_command(cm);
948	mtx_unlock(&sc->mfi_io_lock);
949
950	return (0);
951}
952
953static int
954mfi_get_controller_info(struct mfi_softc *sc)
955{
956	struct mfi_command *cm = NULL;
957	struct mfi_ctrl_info *ci = NULL;
958	uint32_t max_sectors_1, max_sectors_2;
959	int error;
960
961	mtx_lock(&sc->mfi_io_lock);
962	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
963	    (void **)&ci, sizeof(*ci));
964	if (error)
965		goto out;
966	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
967
968	if ((error = mfi_mapcmd(sc, cm)) != 0) {
969		device_printf(sc->mfi_dev, "Failed to get controller info\n");
970		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
971		    MFI_SECTOR_LEN;
972		error = 0;
973		goto out;
974	}
975
976	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
977	    BUS_DMASYNC_POSTREAD);
978	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
979
980	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
981	max_sectors_2 = ci->max_request_size;
982	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
983	sc->disableOnlineCtrlReset =
984	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
985
986out:
987	if (ci)
988		free(ci, M_MFIBUF);
989	if (cm)
990		mfi_release_command(cm);
991	mtx_unlock(&sc->mfi_io_lock);
992	return (error);
993}
994
995static int
996mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
997{
998	struct mfi_command *cm = NULL;
999	int error;
1000
1001	mtx_lock(&sc->mfi_io_lock);
1002	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1003	    (void **)log_state, sizeof(**log_state));
1004	if (error)
1005		goto out;
1006	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1007
1008	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1009		device_printf(sc->mfi_dev, "Failed to get log state\n");
1010		goto out;
1011	}
1012
1013	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1014	    BUS_DMASYNC_POSTREAD);
1015	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1016
1017out:
1018	if (cm)
1019		mfi_release_command(cm);
1020	mtx_unlock(&sc->mfi_io_lock);
1021
1022	return (error);
1023}
1024
1025int
1026mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1027{
1028	struct mfi_evt_log_state *log_state = NULL;
1029	union mfi_evt class_locale;
1030	int error = 0;
1031	uint32_t seq;
1032
1033	class_locale.members.reserved = 0;
1034	class_locale.members.locale = mfi_event_locale;
1035	class_locale.members.evt_class  = mfi_event_class;
1036
1037	if (seq_start == 0) {
1038		error = mfi_get_log_state(sc, &log_state);
1039		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1040		if (error) {
1041			if (log_state)
1042				free(log_state, M_MFIBUF);
1043			return (error);
1044		}
1045
1046		/*
1047		 * Walk through any events that fired since the last
1048		 * shutdown.
1049		 */
1050		mfi_parse_entries(sc, log_state->shutdown_seq_num,
1051		    log_state->newest_seq_num);
1052		seq = log_state->newest_seq_num;
1053	} else
1054		seq = seq_start;
1055	mfi_aen_register(sc, seq, class_locale.word);
1056	free(log_state, M_MFIBUF);
1057
1058	return 0;
1059}
1060
1061int
1062mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1063{
1064
1065	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1066	cm->cm_complete = NULL;
1067
1068
1069	/*
1070	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1071	 * and return 0 to it as status
1072	 */
1073	if (cm->cm_frame->dcmd.opcode == 0) {
1074		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1075		cm->cm_error = 0;
1076		return (cm->cm_error);
1077	}
1078	mfi_enqueue_ready(cm);
1079	mfi_startio(sc);
1080	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1081		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1082	return (cm->cm_error);
1083}
1084
1085void
1086mfi_free(struct mfi_softc *sc)
1087{
1088	struct mfi_command *cm;
1089	int i;
1090
1091	callout_drain(&sc->mfi_watchdog_callout);
1092
1093	if (sc->mfi_cdev != NULL)
1094		destroy_dev(sc->mfi_cdev);
1095
1096	if (sc->mfi_total_cmds != 0) {
1097		for (i = 0; i < sc->mfi_total_cmds; i++) {
1098			cm = &sc->mfi_commands[i];
1099			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1100		}
1101		free(sc->mfi_commands, M_MFIBUF);
1102	}
1103
1104	if (sc->mfi_intr)
1105		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1106	if (sc->mfi_irq != NULL)
1107		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1108		    sc->mfi_irq);
1109
1110	if (sc->mfi_sense_busaddr != 0)
1111		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1112	if (sc->mfi_sense != NULL)
1113		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1114		    sc->mfi_sense_dmamap);
1115	if (sc->mfi_sense_dmat != NULL)
1116		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1117
1118	if (sc->mfi_frames_busaddr != 0)
1119		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1120	if (sc->mfi_frames != NULL)
1121		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1122		    sc->mfi_frames_dmamap);
1123	if (sc->mfi_frames_dmat != NULL)
1124		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1125
1126	if (sc->mfi_comms_busaddr != 0)
1127		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1128	if (sc->mfi_comms != NULL)
1129		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1130		    sc->mfi_comms_dmamap);
1131	if (sc->mfi_comms_dmat != NULL)
1132		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1133
1134	/* ThunderBolt contiguous memory free here */
1135	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1136		if (sc->mfi_tb_busaddr != 0)
1137			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1138		if (sc->request_message_pool != NULL)
1139			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1140			    sc->mfi_tb_dmamap);
1141		if (sc->mfi_tb_dmat != NULL)
1142			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1143
1144		/* Version buffer memory free */
1145		/* Start LSIP200113393 */
1146		if (sc->verbuf_h_busaddr != 0)
1147			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1148		if (sc->verbuf != NULL)
1149			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1150			    sc->verbuf_h_dmamap);
1151		if (sc->verbuf_h_dmat != NULL)
1152			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1153
1154		/* End LSIP200113393 */
1155		/* ThunderBolt INIT packet memory Free */
1156		if (sc->mfi_tb_init_busaddr != 0)
1157			bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1158		if (sc->mfi_tb_init != NULL)
1159			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1160			    sc->mfi_tb_init_dmamap);
1161		if (sc->mfi_tb_init_dmat != NULL)
1162			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1163
1164		/* ThunderBolt IOC Init Desc memory free here */
1165		if (sc->mfi_tb_ioc_init_busaddr != 0)
1166			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1167			    sc->mfi_tb_ioc_init_dmamap);
1168		if (sc->mfi_tb_ioc_init_desc != NULL)
1169			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1170			    sc->mfi_tb_ioc_init_desc,
1171			    sc->mfi_tb_ioc_init_dmamap);
1172		if (sc->mfi_tb_ioc_init_dmat != NULL)
1173			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1174		for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1175			if (sc->mfi_cmd_pool_tbolt != NULL) {
1176				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1177					free(sc->mfi_cmd_pool_tbolt[i],
1178					    M_MFIBUF);
1179					sc->mfi_cmd_pool_tbolt[i] = NULL;
1180				}
1181			}
1182		}
1183		if (sc->mfi_cmd_pool_tbolt != NULL) {
1184			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1185			sc->mfi_cmd_pool_tbolt = NULL;
1186		}
1187		if (sc->request_desc_pool != NULL) {
1188			free(sc->request_desc_pool, M_MFIBUF);
1189			sc->request_desc_pool = NULL;
1190		}
1191	}
1192	if (sc->mfi_buffer_dmat != NULL)
1193		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1194	if (sc->mfi_parent_dmat != NULL)
1195		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1196
1197	if (mtx_initialized(&sc->mfi_io_lock)) {
1198		mtx_destroy(&sc->mfi_io_lock);
1199		sx_destroy(&sc->mfi_config_lock);
1200	}
1201
1202	return;
1203}
1204
1205static void
1206mfi_startup(void *arg)
1207{
1208	struct mfi_softc *sc;
1209
1210	sc = (struct mfi_softc *)arg;
1211
1212	config_intrhook_disestablish(&sc->mfi_ich);
1213
1214	sc->mfi_enable_intr(sc);
1215	sx_xlock(&sc->mfi_config_lock);
1216	mtx_lock(&sc->mfi_io_lock);
1217	mfi_ldprobe(sc);
1218	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1219	    mfi_syspdprobe(sc);
1220	mtx_unlock(&sc->mfi_io_lock);
1221	sx_xunlock(&sc->mfi_config_lock);
1222}
1223
1224static void
1225mfi_intr(void *arg)
1226{
1227	struct mfi_softc *sc;
1228	struct mfi_command *cm;
1229	uint32_t pi, ci, context;
1230
1231	sc = (struct mfi_softc *)arg;
1232
1233	if (sc->mfi_check_clear_intr(sc))
1234		return;
1235
1236restart:
1237	pi = sc->mfi_comms->hw_pi;
1238	ci = sc->mfi_comms->hw_ci;
1239	mtx_lock(&sc->mfi_io_lock);
1240	while (ci != pi) {
1241		context = sc->mfi_comms->hw_reply_q[ci];
1242		if (context < sc->mfi_max_fw_cmds) {
1243			cm = &sc->mfi_commands[context];
1244			mfi_remove_busy(cm);
1245			cm->cm_error = 0;
1246			mfi_complete(sc, cm);
1247		}
1248		if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1249			ci = 0;
1250		}
1251	}
1252
1253	sc->mfi_comms->hw_ci = ci;
1254
1255	/* Give defered I/O a chance to run */
1256	if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1257		sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1258	mfi_startio(sc);
1259	mtx_unlock(&sc->mfi_io_lock);
1260
1261	/*
1262	 * Dummy read to flush the bus; this ensures that the indexes are up
1263	 * to date.  Restart processing if more commands have come it.
1264	 */
1265	(void)sc->mfi_read_fw_status(sc);
1266	if (pi != sc->mfi_comms->hw_pi)
1267		goto restart;
1268
1269	return;
1270}
1271
1272int
1273mfi_shutdown(struct mfi_softc *sc)
1274{
1275	struct mfi_dcmd_frame *dcmd;
1276	struct mfi_command *cm;
1277	int error;
1278
1279	mtx_lock(&sc->mfi_io_lock);
1280	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1281	if (error) {
1282		mtx_unlock(&sc->mfi_io_lock);
1283		return (error);
1284	}
1285
1286	if (sc->mfi_aen_cm != NULL)
1287		mfi_abort(sc, sc->mfi_aen_cm);
1288
1289	if (sc->map_update_cmd != NULL)
1290		mfi_abort(sc, sc->map_update_cmd);
1291
1292	dcmd = &cm->cm_frame->dcmd;
1293	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1294	cm->cm_flags = MFI_CMD_POLLED;
1295	cm->cm_data = NULL;
1296
1297	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1298		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1299	}
1300
1301	mfi_release_command(cm);
1302	mtx_unlock(&sc->mfi_io_lock);
1303	return (error);
1304}
1305
1306static void
1307mfi_syspdprobe(struct mfi_softc *sc)
1308{
1309	struct mfi_frame_header *hdr;
1310	struct mfi_command *cm = NULL;
1311	struct mfi_pd_list *pdlist = NULL;
1312	struct mfi_system_pd *syspd, *tmp;
1313	int error, i, found;
1314
1315	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1316	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1317	/* Add SYSTEM PD's */
1318	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1319	    (void **)&pdlist, sizeof(*pdlist));
1320	if (error){
1321		device_printf(sc->mfi_dev,
1322		    "Error while forming SYSTEM PD list\n");
1323		goto out;
1324	}
1325
1326	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1327	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1328	cm->cm_frame->dcmd.mbox[1] = 0;
1329	if (mfi_mapcmd(sc, cm) != 0) {
1330		device_printf(sc->mfi_dev,
1331		    "Failed to get syspd device listing\n");
1332		goto out;
1333	}
1334	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1335	    BUS_DMASYNC_POSTREAD);
1336	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1337	hdr = &cm->cm_frame->header;
1338	if (hdr->cmd_status != MFI_STAT_OK) {
1339		device_printf(sc->mfi_dev,
1340		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1341		goto out;
1342	}
1343	/* Get each PD and add it to the system */
1344	for (i = 0; i < pdlist->count; i++) {
1345		if (pdlist->addr[i].device_id ==
1346		    pdlist->addr[i].encl_device_id)
1347			continue;
1348		found = 0;
1349		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1350			if (syspd->pd_id == pdlist->addr[i].device_id)
1351				found = 1;
1352		}
1353		if (found == 0)
1354			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1355	}
1356	/* Delete SYSPD's whose state has been changed */
1357	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1358		found = 0;
1359		for (i = 0; i < pdlist->count; i++) {
1360			if (syspd->pd_id == pdlist->addr[i].device_id)
1361				found = 1;
1362		}
1363		if (found == 0) {
1364			printf("DELETE\n");
1365			mtx_unlock(&sc->mfi_io_lock);
1366			mtx_lock(&Giant);
1367			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1368			mtx_unlock(&Giant);
1369			mtx_lock(&sc->mfi_io_lock);
1370		}
1371	}
1372out:
1373	if (pdlist)
1374	    free(pdlist, M_MFIBUF);
1375	if (cm)
1376	    mfi_release_command(cm);
1377
1378	return;
1379}
1380
1381static void
1382mfi_ldprobe(struct mfi_softc *sc)
1383{
1384	struct mfi_frame_header *hdr;
1385	struct mfi_command *cm = NULL;
1386	struct mfi_ld_list *list = NULL;
1387	struct mfi_disk *ld;
1388	int error, i;
1389
1390	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1391	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1392
1393	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1394	    (void **)&list, sizeof(*list));
1395	if (error)
1396		goto out;
1397
1398	cm->cm_flags = MFI_CMD_DATAIN;
1399	if (mfi_wait_command(sc, cm) != 0) {
1400		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1401		goto out;
1402	}
1403
1404	hdr = &cm->cm_frame->header;
1405	if (hdr->cmd_status != MFI_STAT_OK) {
1406		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1407		    hdr->cmd_status);
1408		goto out;
1409	}
1410
1411	for (i = 0; i < list->ld_count; i++) {
1412		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1413			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1414				goto skip_add;
1415		}
1416		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1417	skip_add:;
1418	}
1419out:
1420	if (list)
1421		free(list, M_MFIBUF);
1422	if (cm)
1423		mfi_release_command(cm);
1424
1425	return;
1426}
1427
1428/*
1429 * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1430 * the bits in 24-31 are all set, then it is the number of seconds since
1431 * boot.
1432 */
1433static const char *
1434format_timestamp(uint32_t timestamp)
1435{
1436	static char buffer[32];
1437
1438	if ((timestamp & 0xff000000) == 0xff000000)
1439		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1440		    0x00ffffff);
1441	else
1442		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1443	return (buffer);
1444}
1445
1446static const char *
1447format_class(int8_t class)
1448{
1449	static char buffer[6];
1450
1451	switch (class) {
1452	case MFI_EVT_CLASS_DEBUG:
1453		return ("debug");
1454	case MFI_EVT_CLASS_PROGRESS:
1455		return ("progress");
1456	case MFI_EVT_CLASS_INFO:
1457		return ("info");
1458	case MFI_EVT_CLASS_WARNING:
1459		return ("WARN");
1460	case MFI_EVT_CLASS_CRITICAL:
1461		return ("CRIT");
1462	case MFI_EVT_CLASS_FATAL:
1463		return ("FATAL");
1464	case MFI_EVT_CLASS_DEAD:
1465		return ("DEAD");
1466	default:
1467		snprintf(buffer, sizeof(buffer), "%d", class);
1468		return (buffer);
1469	}
1470}
1471
1472static void
1473mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1474{
1475	struct mfi_system_pd *syspd = NULL;
1476
1477	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1478	    format_timestamp(detail->time), detail->evt_class.members.locale,
1479	    format_class(detail->evt_class.members.evt_class),
1480	    detail->description);
1481
1482        /* Don't act on old AEN's or while shutting down */
1483        if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1484                return;
1485
1486	switch (detail->arg_type) {
1487	case MR_EVT_ARGS_NONE:
1488		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1489		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1490			if (mfi_detect_jbod_change) {
1491				/*
1492				 * Probe for new SYSPD's and Delete
1493				 * invalid SYSPD's
1494				 */
1495				sx_xlock(&sc->mfi_config_lock);
1496				mtx_lock(&sc->mfi_io_lock);
1497				mfi_syspdprobe(sc);
1498				mtx_unlock(&sc->mfi_io_lock);
1499				sx_xunlock(&sc->mfi_config_lock);
1500			}
1501		}
1502		break;
1503	case MR_EVT_ARGS_LD_STATE:
1504		/* During load time driver reads all the events starting
1505		 * from the one that has been logged after shutdown. Avoid
1506		 * these old events.
1507		 */
1508		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1509			/* Remove the LD */
1510			struct mfi_disk *ld;
1511			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1512				if (ld->ld_id ==
1513				    detail->args.ld_state.ld.target_id)
1514					break;
1515			}
1516			/*
1517			Fix: for kernel panics when SSCD is removed
1518			KASSERT(ld != NULL, ("volume dissappeared"));
1519			*/
1520			if (ld != NULL) {
1521				mtx_lock(&Giant);
1522				device_delete_child(sc->mfi_dev, ld->ld_dev);
1523				mtx_unlock(&Giant);
1524			}
1525		}
1526		break;
1527	case MR_EVT_ARGS_PD:
1528		if (detail->code == MR_EVT_PD_REMOVED) {
1529			if (mfi_detect_jbod_change) {
1530				/*
1531				 * If the removed device is a SYSPD then
1532				 * delete it
1533				 */
1534				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1535				    pd_link) {
1536					if (syspd->pd_id ==
1537					    detail->args.pd.device_id) {
1538						mtx_lock(&Giant);
1539						device_delete_child(
1540						    sc->mfi_dev,
1541						    syspd->pd_dev);
1542						mtx_unlock(&Giant);
1543						break;
1544					}
1545				}
1546			}
1547		}
1548		if (detail->code == MR_EVT_PD_INSERTED) {
1549			if (mfi_detect_jbod_change) {
1550				/* Probe for new SYSPD's */
1551				sx_xlock(&sc->mfi_config_lock);
1552				mtx_lock(&sc->mfi_io_lock);
1553				mfi_syspdprobe(sc);
1554				mtx_unlock(&sc->mfi_io_lock);
1555				sx_xunlock(&sc->mfi_config_lock);
1556			}
1557		}
1558		break;
1559	}
1560}
1561
1562static void
1563mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1564{
1565	struct mfi_evt_queue_elm *elm;
1566
1567	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1568	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1569	if (elm == NULL)
1570		return;
1571	memcpy(&elm->detail, detail, sizeof(*detail));
1572	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1573	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1574}
1575
1576static void
1577mfi_handle_evt(void *context, int pending)
1578{
1579	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1580	struct mfi_softc *sc;
1581	struct mfi_evt_queue_elm *elm;
1582
1583	sc = context;
1584	TAILQ_INIT(&queue);
1585	mtx_lock(&sc->mfi_io_lock);
1586	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1587	mtx_unlock(&sc->mfi_io_lock);
1588	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1589		TAILQ_REMOVE(&queue, elm, link);
1590		mfi_decode_evt(sc, &elm->detail);
1591		free(elm, M_MFIBUF);
1592	}
1593}
1594
1595static int
1596mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1597{
1598	struct mfi_command *cm;
1599	struct mfi_dcmd_frame *dcmd;
1600	union mfi_evt current_aen, prior_aen;
1601	struct mfi_evt_detail *ed = NULL;
1602	int error = 0;
1603
1604	current_aen.word = locale;
1605	if (sc->mfi_aen_cm != NULL) {
1606		prior_aen.word =
1607		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1608		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1609		    !((prior_aen.members.locale & current_aen.members.locale)
1610		    ^current_aen.members.locale)) {
1611			return (0);
1612		} else {
1613			prior_aen.members.locale |= current_aen.members.locale;
1614			if (prior_aen.members.evt_class
1615			    < current_aen.members.evt_class)
1616				current_aen.members.evt_class =
1617				    prior_aen.members.evt_class;
1618			mtx_lock(&sc->mfi_io_lock);
1619			mfi_abort(sc, sc->mfi_aen_cm);
1620			mtx_unlock(&sc->mfi_io_lock);
1621		}
1622	}
1623
1624	mtx_lock(&sc->mfi_io_lock);
1625	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1626	    (void **)&ed, sizeof(*ed));
1627	mtx_unlock(&sc->mfi_io_lock);
1628	if (error) {
1629		goto out;
1630	}
1631
1632	dcmd = &cm->cm_frame->dcmd;
1633	((uint32_t *)&dcmd->mbox)[0] = seq;
1634	((uint32_t *)&dcmd->mbox)[1] = locale;
1635	cm->cm_flags = MFI_CMD_DATAIN;
1636	cm->cm_complete = mfi_aen_complete;
1637
1638	sc->last_seq_num = seq;
1639	sc->mfi_aen_cm = cm;
1640
1641	mtx_lock(&sc->mfi_io_lock);
1642	mfi_enqueue_ready(cm);
1643	mfi_startio(sc);
1644	mtx_unlock(&sc->mfi_io_lock);
1645
1646out:
1647	return (error);
1648}
1649
1650static void
1651mfi_aen_complete(struct mfi_command *cm)
1652{
1653	struct mfi_frame_header *hdr;
1654	struct mfi_softc *sc;
1655	struct mfi_evt_detail *detail;
1656	struct mfi_aen *mfi_aen_entry, *tmp;
1657	int seq = 0, aborted = 0;
1658
1659	sc = cm->cm_sc;
1660	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1661
1662	hdr = &cm->cm_frame->header;
1663
1664	if (sc->mfi_aen_cm == NULL)
1665		return;
1666
1667	if (sc->mfi_aen_cm->cm_aen_abort ||
1668	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1669		sc->mfi_aen_cm->cm_aen_abort = 0;
1670		aborted = 1;
1671	} else {
1672		sc->mfi_aen_triggered = 1;
1673		if (sc->mfi_poll_waiting) {
1674			sc->mfi_poll_waiting = 0;
1675			selwakeup(&sc->mfi_select);
1676		}
1677		detail = cm->cm_data;
1678		mfi_queue_evt(sc, detail);
1679		seq = detail->seq + 1;
1680		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1681		    tmp) {
1682			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1683			    aen_link);
1684			PROC_LOCK(mfi_aen_entry->p);
1685			kern_psignal(mfi_aen_entry->p, SIGIO);
1686			PROC_UNLOCK(mfi_aen_entry->p);
1687			free(mfi_aen_entry, M_MFIBUF);
1688		}
1689	}
1690
1691	free(cm->cm_data, M_MFIBUF);
1692	sc->mfi_aen_cm = NULL;
1693	wakeup(&sc->mfi_aen_cm);
1694	mfi_release_command(cm);
1695
1696	/* set it up again so the driver can catch more events */
1697	if (!aborted) {
1698		mtx_unlock(&sc->mfi_io_lock);
1699		mfi_aen_setup(sc, seq);
1700		mtx_lock(&sc->mfi_io_lock);
1701	}
1702}
1703
1704#define MAX_EVENTS 15
1705
1706static int
1707mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1708{
1709	struct mfi_command *cm;
1710	struct mfi_dcmd_frame *dcmd;
1711	struct mfi_evt_list *el;
1712	union mfi_evt class_locale;
1713	int error, i, seq, size;
1714
1715	class_locale.members.reserved = 0;
1716	class_locale.members.locale = mfi_event_locale;
1717	class_locale.members.evt_class  = mfi_event_class;
1718
1719	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1720		* (MAX_EVENTS - 1);
1721	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1722	if (el == NULL)
1723		return (ENOMEM);
1724
1725	for (seq = start_seq;;) {
1726		mtx_lock(&sc->mfi_io_lock);
1727		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1728			free(el, M_MFIBUF);
1729			mtx_unlock(&sc->mfi_io_lock);
1730			return (EBUSY);
1731		}
1732		mtx_unlock(&sc->mfi_io_lock);
1733
1734		dcmd = &cm->cm_frame->dcmd;
1735		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1736		dcmd->header.cmd = MFI_CMD_DCMD;
1737		dcmd->header.timeout = 0;
1738		dcmd->header.data_len = size;
1739		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1740		((uint32_t *)&dcmd->mbox)[0] = seq;
1741		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1742		cm->cm_sg = &dcmd->sgl;
1743		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1744		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1745		cm->cm_data = el;
1746		cm->cm_len = size;
1747
1748		mtx_lock(&sc->mfi_io_lock);
1749		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1750			device_printf(sc->mfi_dev,
1751			    "Failed to get controller entries\n");
1752			mfi_release_command(cm);
1753			mtx_unlock(&sc->mfi_io_lock);
1754			break;
1755		}
1756
1757		mtx_unlock(&sc->mfi_io_lock);
1758		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1759		    BUS_DMASYNC_POSTREAD);
1760		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1761
1762		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1763			mtx_lock(&sc->mfi_io_lock);
1764			mfi_release_command(cm);
1765			mtx_unlock(&sc->mfi_io_lock);
1766			break;
1767		}
1768		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1769			device_printf(sc->mfi_dev,
1770			    "Error %d fetching controller entries\n",
1771			    dcmd->header.cmd_status);
1772			mtx_lock(&sc->mfi_io_lock);
1773			mfi_release_command(cm);
1774			mtx_unlock(&sc->mfi_io_lock);
1775			break;
1776		}
1777		mtx_lock(&sc->mfi_io_lock);
1778		mfi_release_command(cm);
1779		mtx_unlock(&sc->mfi_io_lock);
1780
1781		for (i = 0; i < el->count; i++) {
1782			/*
1783			 * If this event is newer than 'stop_seq' then
1784			 * break out of the loop.  Note that the log
1785			 * is a circular buffer so we have to handle
1786			 * the case that our stop point is earlier in
1787			 * the buffer than our start point.
1788			 */
1789			if (el->event[i].seq >= stop_seq) {
1790				if (start_seq <= stop_seq)
1791					break;
1792				else if (el->event[i].seq < start_seq)
1793					break;
1794			}
1795			mtx_lock(&sc->mfi_io_lock);
1796			mfi_queue_evt(sc, &el->event[i]);
1797			mtx_unlock(&sc->mfi_io_lock);
1798		}
1799		seq = el->event[el->count - 1].seq + 1;
1800	}
1801
1802	free(el, M_MFIBUF);
1803	return (0);
1804}
1805
1806static int
1807mfi_add_ld(struct mfi_softc *sc, int id)
1808{
1809	struct mfi_command *cm;
1810	struct mfi_dcmd_frame *dcmd = NULL;
1811	struct mfi_ld_info *ld_info = NULL;
1812	int error;
1813
1814	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1815
1816	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1817	    (void **)&ld_info, sizeof(*ld_info));
1818	if (error) {
1819		device_printf(sc->mfi_dev,
1820		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1821		if (ld_info)
1822			free(ld_info, M_MFIBUF);
1823		return (error);
1824	}
1825	cm->cm_flags = MFI_CMD_DATAIN;
1826	dcmd = &cm->cm_frame->dcmd;
1827	dcmd->mbox[0] = id;
1828	if (mfi_wait_command(sc, cm) != 0) {
1829		device_printf(sc->mfi_dev,
1830		    "Failed to get logical drive: %d\n", id);
1831		free(ld_info, M_MFIBUF);
1832		return (0);
1833	}
1834	if (ld_info->ld_config.params.isSSCD != 1)
1835		mfi_add_ld_complete(cm);
1836	else {
1837		mfi_release_command(cm);
1838		if (ld_info)		/* SSCD drives ld_info free here */
1839			free(ld_info, M_MFIBUF);
1840	}
1841	return (0);
1842}
1843
1844static void
1845mfi_add_ld_complete(struct mfi_command *cm)
1846{
1847	struct mfi_frame_header *hdr;
1848	struct mfi_ld_info *ld_info;
1849	struct mfi_softc *sc;
1850	device_t child;
1851
1852	sc = cm->cm_sc;
1853	hdr = &cm->cm_frame->header;
1854	ld_info = cm->cm_private;
1855
1856	if (hdr->cmd_status != MFI_STAT_OK) {
1857		free(ld_info, M_MFIBUF);
1858		mfi_release_command(cm);
1859		return;
1860	}
1861	mfi_release_command(cm);
1862
1863	mtx_unlock(&sc->mfi_io_lock);
1864	mtx_lock(&Giant);
1865	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1866		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1867		free(ld_info, M_MFIBUF);
1868		mtx_unlock(&Giant);
1869		mtx_lock(&sc->mfi_io_lock);
1870		return;
1871	}
1872
1873	device_set_ivars(child, ld_info);
1874	device_set_desc(child, "MFI Logical Disk");
1875	bus_generic_attach(sc->mfi_dev);
1876	mtx_unlock(&Giant);
1877	mtx_lock(&sc->mfi_io_lock);
1878}
1879
1880static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1881{
1882	struct mfi_command *cm;
1883	struct mfi_dcmd_frame *dcmd = NULL;
1884	struct mfi_pd_info *pd_info = NULL;
1885	int error;
1886
1887	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1888
1889	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1890		(void **)&pd_info, sizeof(*pd_info));
1891	if (error) {
1892		device_printf(sc->mfi_dev,
1893		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1894		    error);
1895		if (pd_info)
1896			free(pd_info, M_MFIBUF);
1897		return (error);
1898	}
1899	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1900	dcmd = &cm->cm_frame->dcmd;
1901	dcmd->mbox[0]=id;
1902	dcmd->header.scsi_status = 0;
1903	dcmd->header.pad0 = 0;
1904	if (mfi_mapcmd(sc, cm) != 0) {
1905		device_printf(sc->mfi_dev,
1906		    "Failed to get physical drive info %d\n", id);
1907		free(pd_info, M_MFIBUF);
1908		return (0);
1909	}
1910	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1911	    BUS_DMASYNC_POSTREAD);
1912	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1913	mfi_add_sys_pd_complete(cm);
1914	return (0);
1915}
1916
1917static void
1918mfi_add_sys_pd_complete(struct mfi_command *cm)
1919{
1920	struct mfi_frame_header *hdr;
1921	struct mfi_pd_info *pd_info;
1922	struct mfi_softc *sc;
1923	device_t child;
1924
1925	sc = cm->cm_sc;
1926	hdr = &cm->cm_frame->header;
1927	pd_info = cm->cm_private;
1928
1929	if (hdr->cmd_status != MFI_STAT_OK) {
1930		free(pd_info, M_MFIBUF);
1931		mfi_release_command(cm);
1932		return;
1933	}
1934	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1935		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1936		    pd_info->ref.v.device_id);
1937		free(pd_info, M_MFIBUF);
1938		mfi_release_command(cm);
1939		return;
1940	}
1941	mfi_release_command(cm);
1942
1943	mtx_unlock(&sc->mfi_io_lock);
1944	mtx_lock(&Giant);
1945	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1946		device_printf(sc->mfi_dev, "Failed to add system pd\n");
1947		free(pd_info, M_MFIBUF);
1948		mtx_unlock(&Giant);
1949		mtx_lock(&sc->mfi_io_lock);
1950		return;
1951	}
1952
1953	device_set_ivars(child, pd_info);
1954	device_set_desc(child, "MFI System PD");
1955	bus_generic_attach(sc->mfi_dev);
1956	mtx_unlock(&Giant);
1957	mtx_lock(&sc->mfi_io_lock);
1958}
1959static struct mfi_command *
1960mfi_bio_command(struct mfi_softc *sc)
1961{
1962	struct bio *bio;
1963	struct mfi_command *cm = NULL;
1964
1965	/*reserving two commands to avoid starvation for IOCTL*/
1966	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2){
1967		return (NULL);
1968	}
1969	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1970		return (NULL);
1971	}
1972	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
1973		cm = mfi_build_ldio(sc, bio);
1974	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
1975		cm = mfi_build_syspdio(sc, bio);
1976	}
1977	if (!cm)
1978	    mfi_enqueue_bio(sc, bio);
1979	return cm;
1980}
1981static struct mfi_command *
1982mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1983{
1984	struct mfi_command *cm;
1985	struct mfi_pass_frame *pass;
1986	int flags = 0, blkcount = 0;
1987	uint32_t context = 0;
1988
1989	if ((cm = mfi_dequeue_free(sc)) == NULL)
1990	    return (NULL);
1991
1992	/* Zero out the MFI frame */
1993 	context = cm->cm_frame->header.context;
1994	bzero(cm->cm_frame, sizeof(union mfi_frame));
1995	cm->cm_frame->header.context = context;
1996	pass = &cm->cm_frame->pass;
1997	bzero(pass->cdb, 16);
1998	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
1999	switch (bio->bio_cmd & 0x03) {
2000	case BIO_READ:
2001#define SCSI_READ 0x28
2002		pass->cdb[0] = SCSI_READ;
2003		flags = MFI_CMD_DATAIN;
2004		break;
2005	case BIO_WRITE:
2006#define SCSI_WRITE 0x2a
2007		pass->cdb[0] = SCSI_WRITE;
2008		flags = MFI_CMD_DATAOUT;
2009		break;
2010	default:
2011		panic("Invalid bio command");
2012	}
2013
2014	/* Cheat with the sector length to avoid a non-constant division */
2015	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2016	/* Fill the LBA and Transfer length in CDB */
2017	pass->cdb[2] = (bio->bio_pblkno & 0xff000000) >> 24;
2018	pass->cdb[3] = (bio->bio_pblkno & 0x00ff0000) >> 16;
2019	pass->cdb[4] = (bio->bio_pblkno & 0x0000ff00) >> 8;
2020	pass->cdb[5] = bio->bio_pblkno & 0x000000ff;
2021	pass->cdb[7] = (blkcount & 0xff00) >> 8;
2022	pass->cdb[8] = (blkcount & 0x00ff);
2023	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2024	pass->header.timeout = 0;
2025	pass->header.flags = 0;
2026	pass->header.scsi_status = 0;
2027	pass->header.sense_len = MFI_SENSE_LEN;
2028	pass->header.data_len = bio->bio_bcount;
2029	pass->header.cdb_len = 10;
2030	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2031	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2032	cm->cm_complete = mfi_bio_complete;
2033	cm->cm_private = bio;
2034	cm->cm_data = bio->bio_data;
2035	cm->cm_len = bio->bio_bcount;
2036	cm->cm_sg = &pass->sgl;
2037	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2038	cm->cm_flags = flags;
2039	return (cm);
2040}
2041
2042static struct mfi_command *
2043mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2044{
2045	struct mfi_io_frame *io;
2046	struct mfi_command *cm;
2047	int flags, blkcount;
2048	uint32_t context = 0;
2049
2050	if ((cm = mfi_dequeue_free(sc)) == NULL)
2051	    return (NULL);
2052
2053	/* Zero out the MFI frame */
2054	context = cm->cm_frame->header.context;
2055	bzero(cm->cm_frame, sizeof(union mfi_frame));
2056	cm->cm_frame->header.context = context;
2057	io = &cm->cm_frame->io;
2058	switch (bio->bio_cmd & 0x03) {
2059	case BIO_READ:
2060		io->header.cmd = MFI_CMD_LD_READ;
2061		flags = MFI_CMD_DATAIN;
2062		break;
2063	case BIO_WRITE:
2064		io->header.cmd = MFI_CMD_LD_WRITE;
2065		flags = MFI_CMD_DATAOUT;
2066		break;
2067	default:
2068		panic("Invalid bio command");
2069	}
2070
2071	/* Cheat with the sector length to avoid a non-constant division */
2072	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2073	io->header.target_id = (uintptr_t)bio->bio_driver1;
2074	io->header.timeout = 0;
2075	io->header.flags = 0;
2076	io->header.scsi_status = 0;
2077	io->header.sense_len = MFI_SENSE_LEN;
2078	io->header.data_len = blkcount;
2079	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2080	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2081	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2082	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2083	cm->cm_complete = mfi_bio_complete;
2084	cm->cm_private = bio;
2085	cm->cm_data = bio->bio_data;
2086	cm->cm_len = bio->bio_bcount;
2087	cm->cm_sg = &io->sgl;
2088	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2089	cm->cm_flags = flags;
2090	return (cm);
2091}
2092
2093static void
2094mfi_bio_complete(struct mfi_command *cm)
2095{
2096	struct bio *bio;
2097	struct mfi_frame_header *hdr;
2098	struct mfi_softc *sc;
2099
2100	bio = cm->cm_private;
2101	hdr = &cm->cm_frame->header;
2102	sc = cm->cm_sc;
2103
2104	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2105		bio->bio_flags |= BIO_ERROR;
2106		bio->bio_error = EIO;
2107		device_printf(sc->mfi_dev, "I/O error, status= %d "
2108		    "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2109		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2110	} else if (cm->cm_error != 0) {
2111		bio->bio_flags |= BIO_ERROR;
2112	}
2113
2114	mfi_release_command(cm);
2115	mfi_disk_complete(bio);
2116}
2117
2118void
2119mfi_startio(struct mfi_softc *sc)
2120{
2121	struct mfi_command *cm;
2122	struct ccb_hdr *ccbh;
2123
2124	for (;;) {
2125		/* Don't bother if we're short on resources */
2126		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2127			break;
2128
2129		/* Try a command that has already been prepared */
2130		cm = mfi_dequeue_ready(sc);
2131
2132		if (cm == NULL) {
2133			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2134				cm = sc->mfi_cam_start(ccbh);
2135		}
2136
2137		/* Nope, so look for work on the bioq */
2138		if (cm == NULL)
2139			cm = mfi_bio_command(sc);
2140
2141		/* No work available, so exit */
2142		if (cm == NULL)
2143			break;
2144
2145		/* Send the command to the controller */
2146		if (mfi_mapcmd(sc, cm) != 0) {
2147			mfi_requeue_ready(cm);
2148			break;
2149		}
2150	}
2151}
2152
2153int
2154mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2155{
2156	int error, polled;
2157
2158	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2159
2160	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2161		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2162		error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2163		    cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2164		if (error == EINPROGRESS) {
2165			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2166			return (0);
2167		}
2168	} else {
2169		if (sc->MFA_enabled)
2170			error = mfi_tbolt_send_frame(sc, cm);
2171		else
2172			error = mfi_send_frame(sc, cm);
2173	}
2174
2175	return (error);
2176}
2177
2178static void
2179mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2180{
2181	struct mfi_frame_header *hdr;
2182	struct mfi_command *cm;
2183	union mfi_sgl *sgl;
2184	struct mfi_softc *sc;
2185	int i, j, first, dir;
2186	int sge_size;
2187
2188	cm = (struct mfi_command *)arg;
2189	sc = cm->cm_sc;
2190	hdr = &cm->cm_frame->header;
2191	sgl = cm->cm_sg;
2192
2193	if (error) {
2194		printf("error %d in callback\n", error);
2195		cm->cm_error = error;
2196		mfi_complete(sc, cm);
2197		return;
2198	}
2199	/* Use IEEE sgl only for IO's on a SKINNY controller
2200	 * For other commands on a SKINNY controller use either
2201	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2202	 * Also calculate the total frame size based on the type
2203	 * of SGL used.
2204	 */
2205	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2206	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2207	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2208	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2209		for (i = 0; i < nsegs; i++) {
2210			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2211			sgl->sg_skinny[i].len = segs[i].ds_len;
2212			sgl->sg_skinny[i].flag = 0;
2213		}
2214		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2215		sge_size = sizeof(struct mfi_sg_skinny);
2216		hdr->sg_count = nsegs;
2217	} else {
2218		j = 0;
2219		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2220			first = cm->cm_stp_len;
2221			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2222				sgl->sg32[j].addr = segs[0].ds_addr;
2223				sgl->sg32[j++].len = first;
2224			} else {
2225				sgl->sg64[j].addr = segs[0].ds_addr;
2226				sgl->sg64[j++].len = first;
2227			}
2228		} else
2229			first = 0;
2230		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2231			for (i = 0; i < nsegs; i++) {
2232				sgl->sg32[j].addr = segs[i].ds_addr + first;
2233				sgl->sg32[j++].len = segs[i].ds_len - first;
2234				first = 0;
2235			}
2236		} else {
2237			for (i = 0; i < nsegs; i++) {
2238				sgl->sg64[j].addr = segs[i].ds_addr + first;
2239				sgl->sg64[j++].len = segs[i].ds_len - first;
2240				first = 0;
2241			}
2242			hdr->flags |= MFI_FRAME_SGL64;
2243		}
2244		hdr->sg_count = j;
2245		sge_size = sc->mfi_sge_size;
2246	}
2247
2248	dir = 0;
2249	if (cm->cm_flags & MFI_CMD_DATAIN) {
2250		dir |= BUS_DMASYNC_PREREAD;
2251		hdr->flags |= MFI_FRAME_DIR_READ;
2252	}
2253	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2254		dir |= BUS_DMASYNC_PREWRITE;
2255		hdr->flags |= MFI_FRAME_DIR_WRITE;
2256	}
2257	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2258	cm->cm_flags |= MFI_CMD_MAPPED;
2259
2260	/*
2261	 * Instead of calculating the total number of frames in the
2262	 * compound frame, it's already assumed that there will be at
2263	 * least 1 frame, so don't compensate for the modulo of the
2264	 * following division.
2265	 */
2266	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2267	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2268
2269	if (sc->MFA_enabled)
2270			mfi_tbolt_send_frame(sc, cm);
2271	else
2272		mfi_send_frame(sc, cm);
2273
2274	return;
2275}
2276
2277static int
2278mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2279{
2280	struct mfi_frame_header *hdr;
2281	int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2282
2283	hdr = &cm->cm_frame->header;
2284
2285	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2286		cm->cm_timestamp = time_uptime;
2287		mfi_enqueue_busy(cm);
2288	} else {
2289		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2290		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2291	}
2292
2293	/*
2294	 * The bus address of the command is aligned on a 64 byte boundary,
2295	 * leaving the least 6 bits as zero.  For whatever reason, the
2296	 * hardware wants the address shifted right by three, leaving just
2297	 * 3 zero bits.  These three bits are then used as a prefetching
2298	 * hint for the hardware to predict how many frames need to be
2299	 * fetched across the bus.  If a command has more than 8 frames
2300	 * then the 3 bits are set to 0x7 and the firmware uses other
2301	 * information in the command to determine the total amount to fetch.
2302	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2303	 * is enough for both 32bit and 64bit systems.
2304	 */
2305	if (cm->cm_extra_frames > 7)
2306		cm->cm_extra_frames = 7;
2307
2308	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2309
2310	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2311		return (0);
2312
2313	/* This is a polled command, so busy-wait for it to complete. */
2314	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2315		DELAY(1000);
2316		tm -= 1;
2317		if (tm <= 0)
2318			break;
2319	}
2320
2321	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2322		device_printf(sc->mfi_dev, "Frame %p timed out "
2323		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2324		return (ETIMEDOUT);
2325	}
2326
2327	return (0);
2328}
2329
2330
2331void
2332mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2333{
2334	int dir;
2335
2336	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2337		dir = 0;
2338		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2339		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2340			dir |= BUS_DMASYNC_POSTREAD;
2341		if (cm->cm_flags & MFI_CMD_DATAOUT)
2342			dir |= BUS_DMASYNC_POSTWRITE;
2343
2344		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2345		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2346		cm->cm_flags &= ~MFI_CMD_MAPPED;
2347	}
2348
2349	cm->cm_flags |= MFI_CMD_COMPLETED;
2350
2351	if (cm->cm_complete != NULL)
2352		cm->cm_complete(cm);
2353	else
2354		wakeup(cm);
2355}
2356
2357static int
2358mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2359{
2360	struct mfi_command *cm;
2361	struct mfi_abort_frame *abort;
2362	int i = 0;
2363	uint32_t context = 0;
2364
2365	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2366
2367	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2368		return (EBUSY);
2369	}
2370
2371	/* Zero out the MFI frame */
2372	context = cm->cm_frame->header.context;
2373	bzero(cm->cm_frame, sizeof(union mfi_frame));
2374	cm->cm_frame->header.context = context;
2375
2376	abort = &cm->cm_frame->abort;
2377	abort->header.cmd = MFI_CMD_ABORT;
2378	abort->header.flags = 0;
2379	abort->header.scsi_status = 0;
2380	abort->abort_context = cm_abort->cm_frame->header.context;
2381	abort->abort_mfi_addr_lo = (uint32_t)cm_abort->cm_frame_busaddr;
2382	abort->abort_mfi_addr_hi =
2383	    (uint32_t)((uint64_t)cm_abort->cm_frame_busaddr >> 32);
2384	cm->cm_data = NULL;
2385	cm->cm_flags = MFI_CMD_POLLED;
2386
2387	if (sc->mfi_aen_cm)
2388		sc->mfi_aen_cm->cm_aen_abort = 1;
2389	mfi_mapcmd(sc, cm);
2390	mfi_release_command(cm);
2391
2392	while (i < 5 && sc->mfi_aen_cm != NULL) {
2393		msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort",
2394		    5 * hz);
2395		i++;
2396	}
2397
2398	return (0);
2399}
2400
2401int
2402mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2403     int len)
2404{
2405	struct mfi_command *cm;
2406	struct mfi_io_frame *io;
2407	int error;
2408	uint32_t context = 0;
2409
2410	if ((cm = mfi_dequeue_free(sc)) == NULL)
2411		return (EBUSY);
2412
2413	/* Zero out the MFI frame */
2414	context = cm->cm_frame->header.context;
2415	bzero(cm->cm_frame, sizeof(union mfi_frame));
2416	cm->cm_frame->header.context = context;
2417
2418	io = &cm->cm_frame->io;
2419	io->header.cmd = MFI_CMD_LD_WRITE;
2420	io->header.target_id = id;
2421	io->header.timeout = 0;
2422	io->header.flags = 0;
2423	io->header.scsi_status = 0;
2424	io->header.sense_len = MFI_SENSE_LEN;
2425	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2426	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2427	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2428	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2429	io->lba_lo = lba & 0xffffffff;
2430	cm->cm_data = virt;
2431	cm->cm_len = len;
2432	cm->cm_sg = &io->sgl;
2433	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2434	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2435
2436	error = mfi_mapcmd(sc, cm);
2437	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2438	    BUS_DMASYNC_POSTWRITE);
2439	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2440	mfi_release_command(cm);
2441
2442	return (error);
2443}
2444
2445int
2446mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2447    int len)
2448{
2449	struct mfi_command *cm;
2450	struct mfi_pass_frame *pass;
2451	int error;
2452	int blkcount = 0;
2453
2454	if ((cm = mfi_dequeue_free(sc)) == NULL)
2455		return (EBUSY);
2456
2457	pass = &cm->cm_frame->pass;
2458	bzero(pass->cdb, 16);
2459	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2460	pass->cdb[0] = SCSI_WRITE;
2461	pass->cdb[2] = (lba & 0xff000000) >> 24;
2462	pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2463	pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2464	pass->cdb[5] = (lba & 0x000000ff);
2465	blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2466	pass->cdb[7] = (blkcount & 0xff00) >> 8;
2467	pass->cdb[8] = (blkcount & 0x00ff);
2468	pass->header.target_id = id;
2469	pass->header.timeout = 0;
2470	pass->header.flags = 0;
2471	pass->header.scsi_status = 0;
2472	pass->header.sense_len = MFI_SENSE_LEN;
2473	pass->header.data_len = len;
2474	pass->header.cdb_len = 10;
2475	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2476	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2477	cm->cm_data = virt;
2478	cm->cm_len = len;
2479	cm->cm_sg = &pass->sgl;
2480	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2481	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2482
2483	error = mfi_mapcmd(sc, cm);
2484	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2485	    BUS_DMASYNC_POSTWRITE);
2486	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2487	mfi_release_command(cm);
2488
2489	return (error);
2490}
2491
2492static int
2493mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2494{
2495	struct mfi_softc *sc;
2496	int error;
2497
2498	sc = dev->si_drv1;
2499
2500	mtx_lock(&sc->mfi_io_lock);
2501	if (sc->mfi_detaching)
2502		error = ENXIO;
2503	else {
2504		sc->mfi_flags |= MFI_FLAGS_OPEN;
2505		error = 0;
2506	}
2507	mtx_unlock(&sc->mfi_io_lock);
2508
2509	return (error);
2510}
2511
2512static int
2513mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2514{
2515	struct mfi_softc *sc;
2516	struct mfi_aen *mfi_aen_entry, *tmp;
2517
2518	sc = dev->si_drv1;
2519
2520	mtx_lock(&sc->mfi_io_lock);
2521	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2522
2523	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2524		if (mfi_aen_entry->p == curproc) {
2525			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2526			    aen_link);
2527			free(mfi_aen_entry, M_MFIBUF);
2528		}
2529	}
2530	mtx_unlock(&sc->mfi_io_lock);
2531	return (0);
2532}
2533
2534static int
2535mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2536{
2537
2538	switch (opcode) {
2539	case MFI_DCMD_LD_DELETE:
2540	case MFI_DCMD_CFG_ADD:
2541	case MFI_DCMD_CFG_CLEAR:
2542		sx_xlock(&sc->mfi_config_lock);
2543		return (1);
2544	default:
2545		return (0);
2546	}
2547}
2548
2549static void
2550mfi_config_unlock(struct mfi_softc *sc, int locked)
2551{
2552
2553	if (locked)
2554		sx_xunlock(&sc->mfi_config_lock);
2555}
2556
2557/*
2558 * Perform pre-issue checks on commands from userland and possibly veto
2559 * them.
2560 */
2561static int
2562mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2563{
2564	struct mfi_disk *ld, *ld2;
2565	int error;
2566	struct mfi_system_pd *syspd = NULL;
2567	uint16_t syspd_id;
2568	uint16_t *mbox;
2569
2570	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2571	error = 0;
2572	switch (cm->cm_frame->dcmd.opcode) {
2573	case MFI_DCMD_LD_DELETE:
2574		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2575			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2576				break;
2577		}
2578		if (ld == NULL)
2579			error = ENOENT;
2580		else
2581			error = mfi_disk_disable(ld);
2582		break;
2583	case MFI_DCMD_CFG_CLEAR:
2584		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2585			error = mfi_disk_disable(ld);
2586			if (error)
2587				break;
2588		}
2589		if (error) {
2590			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2591				if (ld2 == ld)
2592					break;
2593				mfi_disk_enable(ld2);
2594			}
2595		}
2596		break;
2597	case MFI_DCMD_PD_STATE_SET:
2598		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2599		syspd_id = mbox[0];
2600		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2601			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2602				if (syspd->pd_id == syspd_id)
2603					break;
2604			}
2605		}
2606		else
2607			break;
2608		if (syspd)
2609			error = mfi_syspd_disable(syspd);
2610		break;
2611	default:
2612		break;
2613	}
2614	return (error);
2615}
2616
2617/* Perform post-issue checks on commands from userland. */
2618static void
2619mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2620{
2621	struct mfi_disk *ld, *ldn;
2622	struct mfi_system_pd *syspd = NULL;
2623	uint16_t syspd_id;
2624	uint16_t *mbox;
2625
2626	switch (cm->cm_frame->dcmd.opcode) {
2627	case MFI_DCMD_LD_DELETE:
2628		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2629			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2630				break;
2631		}
2632		KASSERT(ld != NULL, ("volume dissappeared"));
2633		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2634			mtx_unlock(&sc->mfi_io_lock);
2635			mtx_lock(&Giant);
2636			device_delete_child(sc->mfi_dev, ld->ld_dev);
2637			mtx_unlock(&Giant);
2638			mtx_lock(&sc->mfi_io_lock);
2639		} else
2640			mfi_disk_enable(ld);
2641		break;
2642	case MFI_DCMD_CFG_CLEAR:
2643		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2644			mtx_unlock(&sc->mfi_io_lock);
2645			mtx_lock(&Giant);
2646			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2647				device_delete_child(sc->mfi_dev, ld->ld_dev);
2648			}
2649			mtx_unlock(&Giant);
2650			mtx_lock(&sc->mfi_io_lock);
2651		} else {
2652			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2653				mfi_disk_enable(ld);
2654		}
2655		break;
2656	case MFI_DCMD_CFG_ADD:
2657		mfi_ldprobe(sc);
2658		break;
2659	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2660		mfi_ldprobe(sc);
2661		break;
2662	case MFI_DCMD_PD_STATE_SET:
2663		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2664		syspd_id = mbox[0];
2665		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2666			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2667				if (syspd->pd_id == syspd_id)
2668					break;
2669			}
2670		}
2671		else
2672			break;
2673		/* If the transition fails then enable the syspd again */
2674		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2675			mfi_syspd_enable(syspd);
2676		break;
2677	}
2678}
2679
2680static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2681{
2682	struct mfi_config_data *conf_data=(struct mfi_config_data *)cm->cm_data;
2683	struct mfi_command *ld_cm = NULL;
2684	struct mfi_ld_info *ld_info = NULL;
2685	int error = 0;
2686
2687	if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2688	    (conf_data->ld[0].params.isSSCD == 1)){
2689		error = 1;
2690	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2691		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2692		    (void **)&ld_info, sizeof(*ld_info));
2693		if (error){
2694			device_printf(sc->mfi_dev, "Failed to allocate"
2695			    "MFI_DCMD_LD_GET_INFO %d", error);
2696			if (ld_info)
2697				free(ld_info, M_MFIBUF);
2698			return 0;
2699		}
2700		ld_cm->cm_flags = MFI_CMD_DATAIN;
2701		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2702		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2703		if (mfi_wait_command(sc, ld_cm) != 0){
2704			device_printf(sc->mfi_dev, "failed to get log drv\n");
2705			mfi_release_command(ld_cm);
2706			free(ld_info, M_MFIBUF);
2707			return 0;
2708		}
2709
2710		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2711			free(ld_info, M_MFIBUF);
2712			mfi_release_command(ld_cm);
2713			return 0;
2714		}
2715		else
2716			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2717
2718		if (ld_info->ld_config.params.isSSCD == 1)
2719			error = 1;
2720
2721		mfi_release_command(ld_cm);
2722		free(ld_info, M_MFIBUF);
2723
2724	}
2725	return error;
2726}
2727
2728static int
2729mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2730{
2731	uint8_t i;
2732	struct mfi_ioc_packet *ioc;
2733	ioc = (struct mfi_ioc_packet *)arg;
2734	int sge_size, error;
2735	struct megasas_sge *kern_sge;
2736
2737	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2738	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2739	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2740
2741	if (sizeof(bus_addr_t) == 8) {
2742		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2743		cm->cm_extra_frames = 2;
2744		sge_size = sizeof(struct mfi_sg64);
2745	} else {
2746		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2747		sge_size = sizeof(struct mfi_sg32);
2748	}
2749
2750	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2751	for (i = 0; i < ioc->mfi_sge_count; i++) {
2752			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2753			1, 0,			/* algnmnt, boundary */
2754			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2755			BUS_SPACE_MAXADDR,	/* highaddr */
2756			NULL, NULL,		/* filter, filterarg */
2757			ioc->mfi_sgl[i].iov_len,/* maxsize */
2758			2,			/* nsegments */
2759			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2760			BUS_DMA_ALLOCNOW,	/* flags */
2761			NULL, NULL,		/* lockfunc, lockarg */
2762			&sc->mfi_kbuff_arr_dmat[i])) {
2763			device_printf(sc->mfi_dev,
2764			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2765			return (ENOMEM);
2766		}
2767
2768		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2769		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2770		    &sc->mfi_kbuff_arr_dmamap[i])) {
2771			device_printf(sc->mfi_dev,
2772			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2773			return (ENOMEM);
2774		}
2775
2776		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2777		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2778		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2779		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2780
2781		if (!sc->kbuff_arr[i]) {
2782			device_printf(sc->mfi_dev,
2783			    "Could not allocate memory for kbuff_arr info\n");
2784			return -1;
2785		}
2786		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2787		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2788
2789		if (sizeof(bus_addr_t) == 8) {
2790			cm->cm_frame->stp.sgl.sg64[i].addr =
2791			    kern_sge[i].phys_addr;
2792			cm->cm_frame->stp.sgl.sg64[i].len =
2793			    ioc->mfi_sgl[i].iov_len;
2794		} else {
2795			cm->cm_frame->stp.sgl.sg32[i].len =
2796			    kern_sge[i].phys_addr;
2797			cm->cm_frame->stp.sgl.sg32[i].len =
2798			    ioc->mfi_sgl[i].iov_len;
2799		}
2800
2801		error = copyin(ioc->mfi_sgl[i].iov_base,
2802		    sc->kbuff_arr[i],
2803		    ioc->mfi_sgl[i].iov_len);
2804		if (error != 0) {
2805			device_printf(sc->mfi_dev, "Copy in failed\n");
2806			return error;
2807		}
2808	}
2809
2810	cm->cm_flags |=MFI_CMD_MAPPED;
2811	return 0;
2812}
2813
2814static int
2815mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2816{
2817	struct mfi_command *cm;
2818	struct mfi_dcmd_frame *dcmd;
2819	void *ioc_buf = NULL;
2820	uint32_t context;
2821	int error = 0, locked;
2822
2823
2824	if (ioc->buf_size > 0) {
2825		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2826		if (ioc_buf == NULL) {
2827			return (ENOMEM);
2828		}
2829		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2830		if (error) {
2831			device_printf(sc->mfi_dev, "failed to copyin\n");
2832			free(ioc_buf, M_MFIBUF);
2833			return (error);
2834		}
2835	}
2836
2837	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2838
2839	mtx_lock(&sc->mfi_io_lock);
2840	while ((cm = mfi_dequeue_free(sc)) == NULL)
2841		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2842
2843	/* Save context for later */
2844	context = cm->cm_frame->header.context;
2845
2846	dcmd = &cm->cm_frame->dcmd;
2847	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2848
2849	cm->cm_sg = &dcmd->sgl;
2850	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2851	cm->cm_data = ioc_buf;
2852	cm->cm_len = ioc->buf_size;
2853
2854	/* restore context */
2855	cm->cm_frame->header.context = context;
2856
2857	/* Cheat since we don't know if we're writing or reading */
2858	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2859
2860	error = mfi_check_command_pre(sc, cm);
2861	if (error)
2862		goto out;
2863
2864	error = mfi_wait_command(sc, cm);
2865	if (error) {
2866		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2867		goto out;
2868	}
2869	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2870	mfi_check_command_post(sc, cm);
2871out:
2872	mfi_release_command(cm);
2873	mtx_unlock(&sc->mfi_io_lock);
2874	mfi_config_unlock(sc, locked);
2875	if (ioc->buf_size > 0)
2876		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2877	if (ioc_buf)
2878		free(ioc_buf, M_MFIBUF);
2879	return (error);
2880}
2881
2882#define	PTRIN(p)		((void *)(uintptr_t)(p))
2883
2884static int
2885mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2886{
2887	struct mfi_softc *sc;
2888	union mfi_statrequest *ms;
2889	struct mfi_ioc_packet *ioc;
2890#ifdef COMPAT_FREEBSD32
2891	struct mfi_ioc_packet32 *ioc32;
2892#endif
2893	struct mfi_ioc_aen *aen;
2894	struct mfi_command *cm = NULL;
2895	uint32_t context = 0;
2896	union mfi_sense_ptr sense_ptr;
2897	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
2898	size_t len;
2899	int i, res;
2900	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2901#ifdef COMPAT_FREEBSD32
2902	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
2903	struct mfi_ioc_passthru iop_swab;
2904#endif
2905	int error, locked;
2906	union mfi_sgl *sgl;
2907	sc = dev->si_drv1;
2908	error = 0;
2909
2910	if (sc->adpreset)
2911		return EBUSY;
2912
2913	if (sc->hw_crit_error)
2914		return EBUSY;
2915
2916	if (sc->issuepend_done == 0)
2917		return EBUSY;
2918
2919	switch (cmd) {
2920	case MFIIO_STATS:
2921		ms = (union mfi_statrequest *)arg;
2922		switch (ms->ms_item) {
2923		case MFIQ_FREE:
2924		case MFIQ_BIO:
2925		case MFIQ_READY:
2926		case MFIQ_BUSY:
2927			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2928			    sizeof(struct mfi_qstat));
2929			break;
2930		default:
2931			error = ENOIOCTL;
2932			break;
2933		}
2934		break;
2935	case MFIIO_QUERY_DISK:
2936	{
2937		struct mfi_query_disk *qd;
2938		struct mfi_disk *ld;
2939
2940		qd = (struct mfi_query_disk *)arg;
2941		mtx_lock(&sc->mfi_io_lock);
2942		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2943			if (ld->ld_id == qd->array_id)
2944				break;
2945		}
2946		if (ld == NULL) {
2947			qd->present = 0;
2948			mtx_unlock(&sc->mfi_io_lock);
2949			return (0);
2950		}
2951		qd->present = 1;
2952		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2953			qd->open = 1;
2954		bzero(qd->devname, SPECNAMELEN + 1);
2955		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2956		mtx_unlock(&sc->mfi_io_lock);
2957		break;
2958	}
2959	case MFI_CMD:
2960#ifdef COMPAT_FREEBSD32
2961	case MFI_CMD32:
2962#endif
2963		{
2964		devclass_t devclass;
2965		ioc = (struct mfi_ioc_packet *)arg;
2966		int adapter;
2967
2968		adapter = ioc->mfi_adapter_no;
2969		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2970			devclass = devclass_find("mfi");
2971			sc = devclass_get_softc(devclass, adapter);
2972		}
2973		mtx_lock(&sc->mfi_io_lock);
2974		if ((cm = mfi_dequeue_free(sc)) == NULL) {
2975			mtx_unlock(&sc->mfi_io_lock);
2976			return (EBUSY);
2977		}
2978		mtx_unlock(&sc->mfi_io_lock);
2979		locked = 0;
2980
2981		/*
2982		 * save off original context since copying from user
2983		 * will clobber some data
2984		 */
2985		context = cm->cm_frame->header.context;
2986		cm->cm_frame->header.context = cm->cm_index;
2987
2988		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2989		    2 * MEGAMFI_FRAME_SIZE);
2990		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2991		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2992		cm->cm_frame->header.scsi_status = 0;
2993		cm->cm_frame->header.pad0 = 0;
2994		if (ioc->mfi_sge_count) {
2995			cm->cm_sg =
2996			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
2997		}
2998		sgl = cm->cm_sg;
2999		cm->cm_flags = 0;
3000		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3001			cm->cm_flags |= MFI_CMD_DATAIN;
3002		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3003			cm->cm_flags |= MFI_CMD_DATAOUT;
3004		/* Legacy app shim */
3005		if (cm->cm_flags == 0)
3006			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3007		cm->cm_len = cm->cm_frame->header.data_len;
3008		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3009#ifdef COMPAT_FREEBSD32
3010			if (cmd == MFI_CMD) {
3011#endif
3012				/* Native */
3013				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3014#ifdef COMPAT_FREEBSD32
3015			} else {
3016				/* 32bit on 64bit */
3017				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3018				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3019			}
3020#endif
3021			cm->cm_len += cm->cm_stp_len;
3022		}
3023		if (cm->cm_len &&
3024		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3025			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3026			    M_WAITOK | M_ZERO);
3027			if (cm->cm_data == NULL) {
3028				device_printf(sc->mfi_dev, "Malloc failed\n");
3029				goto out;
3030			}
3031		} else {
3032			cm->cm_data = 0;
3033		}
3034
3035		/* restore header context */
3036		cm->cm_frame->header.context = context;
3037
3038		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3039			res = mfi_stp_cmd(sc, cm, arg);
3040			if (res != 0)
3041				goto out;
3042		} else {
3043			temp = data;
3044			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3045			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3046				for (i = 0; i < ioc->mfi_sge_count; i++) {
3047#ifdef COMPAT_FREEBSD32
3048					if (cmd == MFI_CMD) {
3049#endif
3050						/* Native */
3051						addr = ioc->mfi_sgl[i].iov_base;
3052						len = ioc->mfi_sgl[i].iov_len;
3053#ifdef COMPAT_FREEBSD32
3054					} else {
3055						/* 32bit on 64bit */
3056						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3057						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3058						len = ioc32->mfi_sgl[i].iov_len;
3059					}
3060#endif
3061					error = copyin(addr, temp, len);
3062					if (error != 0) {
3063						device_printf(sc->mfi_dev,
3064						    "Copy in failed\n");
3065						goto out;
3066					}
3067					temp = &temp[len];
3068				}
3069			}
3070		}
3071
3072		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3073			locked = mfi_config_lock(sc,
3074			     cm->cm_frame->dcmd.opcode);
3075
3076		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3077			cm->cm_frame->pass.sense_addr_lo =
3078			    (uint32_t)cm->cm_sense_busaddr;
3079			cm->cm_frame->pass.sense_addr_hi =
3080			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3081		}
3082		mtx_lock(&sc->mfi_io_lock);
3083		skip_pre_post = mfi_check_for_sscd (sc, cm);
3084		if (!skip_pre_post) {
3085			error = mfi_check_command_pre(sc, cm);
3086			if (error) {
3087				mtx_unlock(&sc->mfi_io_lock);
3088				goto out;
3089			}
3090		}
3091		if ((error = mfi_wait_command(sc, cm)) != 0) {
3092			device_printf(sc->mfi_dev,
3093			    "Controller polled failed\n");
3094			mtx_unlock(&sc->mfi_io_lock);
3095			goto out;
3096		}
3097		if (!skip_pre_post) {
3098			mfi_check_command_post(sc, cm);
3099		}
3100		mtx_unlock(&sc->mfi_io_lock);
3101
3102		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3103			temp = data;
3104			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3105			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3106				for (i = 0; i < ioc->mfi_sge_count; i++) {
3107#ifdef COMPAT_FREEBSD32
3108					if (cmd == MFI_CMD) {
3109#endif
3110						/* Native */
3111						addr = ioc->mfi_sgl[i].iov_base;
3112						len = ioc->mfi_sgl[i].iov_len;
3113#ifdef COMPAT_FREEBSD32
3114					} else {
3115						/* 32bit on 64bit */
3116						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3117						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3118						len = ioc32->mfi_sgl[i].iov_len;
3119					}
3120#endif
3121					error = copyout(temp, addr, len);
3122					if (error != 0) {
3123						device_printf(sc->mfi_dev,
3124						    "Copy out failed\n");
3125						goto out;
3126					}
3127					temp = &temp[len];
3128				}
3129			}
3130		}
3131
3132		if (ioc->mfi_sense_len) {
3133			/* get user-space sense ptr then copy out sense */
3134			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3135			    &sense_ptr.sense_ptr_data[0],
3136			    sizeof(sense_ptr.sense_ptr_data));
3137#ifdef COMPAT_FREEBSD32
3138			if (cmd != MFI_CMD) {
3139				/*
3140				 * not 64bit native so zero out any address
3141				 * over 32bit */
3142				sense_ptr.addr.high = 0;
3143			}
3144#endif
3145			error = copyout(cm->cm_sense, sense_ptr.user_space,
3146			    ioc->mfi_sense_len);
3147			if (error != 0) {
3148				device_printf(sc->mfi_dev,
3149				    "Copy out failed\n");
3150				goto out;
3151			}
3152		}
3153
3154		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3155out:
3156		mfi_config_unlock(sc, locked);
3157		if (data)
3158			free(data, M_MFIBUF);
3159		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3160			for (i = 0; i < 2; i++) {
3161				if (sc->kbuff_arr[i]) {
3162					if (sc->mfi_kbuff_arr_busaddr != 0)
3163						bus_dmamap_unload(
3164						    sc->mfi_kbuff_arr_dmat[i],
3165						    sc->mfi_kbuff_arr_dmamap[i]
3166						    );
3167					if (sc->kbuff_arr[i] != NULL)
3168						bus_dmamem_free(
3169						    sc->mfi_kbuff_arr_dmat[i],
3170						    sc->kbuff_arr[i],
3171						    sc->mfi_kbuff_arr_dmamap[i]
3172						    );
3173					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3174						bus_dma_tag_destroy(
3175						    sc->mfi_kbuff_arr_dmat[i]);
3176				}
3177			}
3178		}
3179		if (cm) {
3180			mtx_lock(&sc->mfi_io_lock);
3181			mfi_release_command(cm);
3182			mtx_unlock(&sc->mfi_io_lock);
3183		}
3184
3185		break;
3186		}
3187	case MFI_SET_AEN:
3188		aen = (struct mfi_ioc_aen *)arg;
3189		error = mfi_aen_register(sc, aen->aen_seq_num,
3190		    aen->aen_class_locale);
3191
3192		break;
3193	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3194		{
3195			devclass_t devclass;
3196			struct mfi_linux_ioc_packet l_ioc;
3197			int adapter;
3198
3199			devclass = devclass_find("mfi");
3200			if (devclass == NULL)
3201				return (ENOENT);
3202
3203			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3204			if (error)
3205				return (error);
3206			adapter = l_ioc.lioc_adapter_no;
3207			sc = devclass_get_softc(devclass, adapter);
3208			if (sc == NULL)
3209				return (ENOENT);
3210			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3211			    cmd, arg, flag, td));
3212			break;
3213		}
3214	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3215		{
3216			devclass_t devclass;
3217			struct mfi_linux_ioc_aen l_aen;
3218			int adapter;
3219
3220			devclass = devclass_find("mfi");
3221			if (devclass == NULL)
3222				return (ENOENT);
3223
3224			error = copyin(arg, &l_aen, sizeof(l_aen));
3225			if (error)
3226				return (error);
3227			adapter = l_aen.laen_adapter_no;
3228			sc = devclass_get_softc(devclass, adapter);
3229			if (sc == NULL)
3230				return (ENOENT);
3231			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3232			    cmd, arg, flag, td));
3233			break;
3234		}
3235#ifdef COMPAT_FREEBSD32
3236	case MFIIO_PASSTHRU32:
3237		iop_swab.ioc_frame	= iop32->ioc_frame;
3238		iop_swab.buf_size	= iop32->buf_size;
3239		iop_swab.buf		= PTRIN(iop32->buf);
3240		iop			= &iop_swab;
3241		/* FALLTHROUGH */
3242#endif
3243	case MFIIO_PASSTHRU:
3244		error = mfi_user_command(sc, iop);
3245#ifdef COMPAT_FREEBSD32
3246		if (cmd == MFIIO_PASSTHRU32)
3247			iop32->ioc_frame = iop_swab.ioc_frame;
3248#endif
3249		break;
3250	default:
3251		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3252		error = ENOENT;
3253		break;
3254	}
3255
3256	return (error);
3257}
3258
3259static int
3260mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3261{
3262	struct mfi_softc *sc;
3263	struct mfi_linux_ioc_packet l_ioc;
3264	struct mfi_linux_ioc_aen l_aen;
3265	struct mfi_command *cm = NULL;
3266	struct mfi_aen *mfi_aen_entry;
3267	union mfi_sense_ptr sense_ptr;
3268	uint32_t context = 0;
3269	uint8_t *data = NULL, *temp;
3270	int i;
3271	int error, locked;
3272
3273	sc = dev->si_drv1;
3274	error = 0;
3275	switch (cmd) {
3276	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3277		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3278		if (error != 0)
3279			return (error);
3280
3281		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3282			return (EINVAL);
3283		}
3284
3285		mtx_lock(&sc->mfi_io_lock);
3286		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3287			mtx_unlock(&sc->mfi_io_lock);
3288			return (EBUSY);
3289		}
3290		mtx_unlock(&sc->mfi_io_lock);
3291		locked = 0;
3292
3293		/*
3294		 * save off original context since copying from user
3295		 * will clobber some data
3296		 */
3297		context = cm->cm_frame->header.context;
3298
3299		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3300		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3301		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3302		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3303		cm->cm_frame->header.scsi_status = 0;
3304		cm->cm_frame->header.pad0 = 0;
3305		if (l_ioc.lioc_sge_count)
3306			cm->cm_sg =
3307			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3308		cm->cm_flags = 0;
3309		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3310			cm->cm_flags |= MFI_CMD_DATAIN;
3311		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3312			cm->cm_flags |= MFI_CMD_DATAOUT;
3313		cm->cm_len = cm->cm_frame->header.data_len;
3314		if (cm->cm_len &&
3315		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3316			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3317			    M_WAITOK | M_ZERO);
3318			if (cm->cm_data == NULL) {
3319				device_printf(sc->mfi_dev, "Malloc failed\n");
3320				goto out;
3321			}
3322		} else {
3323			cm->cm_data = 0;
3324		}
3325
3326		/* restore header context */
3327		cm->cm_frame->header.context = context;
3328
3329		temp = data;
3330		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3331			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3332				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3333				       temp,
3334				       l_ioc.lioc_sgl[i].iov_len);
3335				if (error != 0) {
3336					device_printf(sc->mfi_dev,
3337					    "Copy in failed\n");
3338					goto out;
3339				}
3340				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3341			}
3342		}
3343
3344		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3345			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3346
3347		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3348			cm->cm_frame->pass.sense_addr_lo =
3349			    (uint32_t)cm->cm_sense_busaddr;
3350			cm->cm_frame->pass.sense_addr_hi =
3351			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3352		}
3353
3354		mtx_lock(&sc->mfi_io_lock);
3355		error = mfi_check_command_pre(sc, cm);
3356		if (error) {
3357			mtx_unlock(&sc->mfi_io_lock);
3358			goto out;
3359		}
3360
3361		if ((error = mfi_wait_command(sc, cm)) != 0) {
3362			device_printf(sc->mfi_dev,
3363			    "Controller polled failed\n");
3364			mtx_unlock(&sc->mfi_io_lock);
3365			goto out;
3366		}
3367
3368		mfi_check_command_post(sc, cm);
3369		mtx_unlock(&sc->mfi_io_lock);
3370
3371		temp = data;
3372		if (cm->cm_flags & MFI_CMD_DATAIN) {
3373			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3374				error = copyout(temp,
3375					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3376					l_ioc.lioc_sgl[i].iov_len);
3377				if (error != 0) {
3378					device_printf(sc->mfi_dev,
3379					    "Copy out failed\n");
3380					goto out;
3381				}
3382				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3383			}
3384		}
3385
3386		if (l_ioc.lioc_sense_len) {
3387			/* get user-space sense ptr then copy out sense */
3388			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3389                            ->lioc_frame.raw[l_ioc.lioc_sense_off],
3390			    &sense_ptr.sense_ptr_data[0],
3391			    sizeof(sense_ptr.sense_ptr_data));
3392#ifdef __amd64__
3393			/*
3394			 * only 32bit Linux support so zero out any
3395			 * address over 32bit
3396			 */
3397			sense_ptr.addr.high = 0;
3398#endif
3399			error = copyout(cm->cm_sense, sense_ptr.user_space,
3400			    l_ioc.lioc_sense_len);
3401			if (error != 0) {
3402				device_printf(sc->mfi_dev,
3403				    "Copy out failed\n");
3404				goto out;
3405			}
3406		}
3407
3408		error = copyout(&cm->cm_frame->header.cmd_status,
3409			&((struct mfi_linux_ioc_packet*)arg)
3410			->lioc_frame.hdr.cmd_status,
3411			1);
3412		if (error != 0) {
3413			device_printf(sc->mfi_dev,
3414				      "Copy out failed\n");
3415			goto out;
3416		}
3417
3418out:
3419		mfi_config_unlock(sc, locked);
3420		if (data)
3421			free(data, M_MFIBUF);
3422		if (cm) {
3423			mtx_lock(&sc->mfi_io_lock);
3424			mfi_release_command(cm);
3425			mtx_unlock(&sc->mfi_io_lock);
3426		}
3427
3428		return (error);
3429	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3430		error = copyin(arg, &l_aen, sizeof(l_aen));
3431		if (error != 0)
3432			return (error);
3433		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3434		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3435		    M_WAITOK);
3436		mtx_lock(&sc->mfi_io_lock);
3437		if (mfi_aen_entry != NULL) {
3438			mfi_aen_entry->p = curproc;
3439			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3440			    aen_link);
3441		}
3442		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3443		    l_aen.laen_class_locale);
3444
3445		if (error != 0) {
3446			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3447			    aen_link);
3448			free(mfi_aen_entry, M_MFIBUF);
3449		}
3450		mtx_unlock(&sc->mfi_io_lock);
3451
3452		return (error);
3453	default:
3454		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3455		error = ENOENT;
3456		break;
3457	}
3458
3459	return (error);
3460}
3461
3462static int
3463mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3464{
3465	struct mfi_softc *sc;
3466	int revents = 0;
3467
3468	sc = dev->si_drv1;
3469
3470	if (poll_events & (POLLIN | POLLRDNORM)) {
3471		if (sc->mfi_aen_triggered != 0) {
3472			revents |= poll_events & (POLLIN | POLLRDNORM);
3473			sc->mfi_aen_triggered = 0;
3474		}
3475		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3476			revents |= POLLERR;
3477		}
3478	}
3479
3480	if (revents == 0) {
3481		if (poll_events & (POLLIN | POLLRDNORM)) {
3482			sc->mfi_poll_waiting = 1;
3483			selrecord(td, &sc->mfi_select);
3484		}
3485	}
3486
3487	return revents;
3488}
3489
3490static void
3491mfi_dump_all(void)
3492{
3493	struct mfi_softc *sc;
3494	struct mfi_command *cm;
3495	devclass_t dc;
3496	time_t deadline;
3497	int timedout;
3498	int i;
3499
3500	dc = devclass_find("mfi");
3501	if (dc == NULL) {
3502		printf("No mfi dev class\n");
3503		return;
3504	}
3505
3506	for (i = 0; ; i++) {
3507		sc = devclass_get_softc(dc, i);
3508		if (sc == NULL)
3509			break;
3510		device_printf(sc->mfi_dev, "Dumping\n\n");
3511		timedout = 0;
3512		deadline = time_uptime - MFI_CMD_TIMEOUT;
3513		mtx_lock(&sc->mfi_io_lock);
3514		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3515			if (cm->cm_timestamp < deadline) {
3516				device_printf(sc->mfi_dev,
3517				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3518				    cm, (int)(time_uptime - cm->cm_timestamp));
3519				MFI_PRINT_CMD(cm);
3520				timedout++;
3521			}
3522		}
3523
3524#if 0
3525		if (timedout)
3526			MFI_DUMP_CMDS(SC);
3527#endif
3528
3529		mtx_unlock(&sc->mfi_io_lock);
3530	}
3531
3532	return;
3533}
3534
3535static void
3536mfi_timeout(void *data)
3537{
3538	struct mfi_softc *sc = (struct mfi_softc *)data;
3539	struct mfi_command *cm;
3540	time_t deadline;
3541	int timedout = 0;
3542
3543	deadline = time_uptime - MFI_CMD_TIMEOUT;
3544	if (sc->adpreset == 0) {
3545		if (!mfi_tbolt_reset(sc)) {
3546			callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, mfi_timeout, sc);
3547			return;
3548		}
3549	}
3550	mtx_lock(&sc->mfi_io_lock);
3551	TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3552		if (sc->mfi_aen_cm == cm)
3553			continue;
3554		if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
3555			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3556				cm->cm_timestamp = time_uptime;
3557			} else {
3558				device_printf(sc->mfi_dev,
3559				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3560				     cm, (int)(time_uptime - cm->cm_timestamp)
3561				     );
3562				MFI_PRINT_CMD(cm);
3563				MFI_VALIDATE_CMD(sc, cm);
3564				timedout++;
3565			}
3566		}
3567	}
3568
3569#if 0
3570	if (timedout)
3571		MFI_DUMP_CMDS(SC);
3572#endif
3573
3574	mtx_unlock(&sc->mfi_io_lock);
3575
3576	callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
3577	    mfi_timeout, sc);
3578
3579	if (0)
3580		mfi_dump_all();
3581	return;
3582}
3583