1/*-
2 * Copyright (c) 2006 IronPort Systems
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26/*-
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 *    notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 *    notice, this list of conditions and the following disclaimer in the
38 *    documentation and/or other materials provided with the distribution.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 */
52
53#include <sys/cdefs.h>
54__FBSDID("$FreeBSD: releng/10.3/sys/dev/mfi/mfi.c 284429 2015-06-15 21:08:08Z ambrisko $");
55
56#include "opt_compat.h"
57#include "opt_mfi.h"
58
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/sysctl.h>
62#include <sys/malloc.h>
63#include <sys/kernel.h>
64#include <sys/poll.h>
65#include <sys/selinfo.h>
66#include <sys/bus.h>
67#include <sys/conf.h>
68#include <sys/eventhandler.h>
69#include <sys/rman.h>
70#include <sys/bus_dma.h>
71#include <sys/bio.h>
72#include <sys/ioccom.h>
73#include <sys/uio.h>
74#include <sys/proc.h>
75#include <sys/signalvar.h>
76#include <sys/sysent.h>
77#include <sys/taskqueue.h>
78
79#include <machine/bus.h>
80#include <machine/resource.h>
81
82#include <dev/mfi/mfireg.h>
83#include <dev/mfi/mfi_ioctl.h>
84#include <dev/mfi/mfivar.h>
85#include <sys/interrupt.h>
86#include <sys/priority.h>
87
88static int	mfi_alloc_commands(struct mfi_softc *);
89static int	mfi_comms_init(struct mfi_softc *);
90static int	mfi_get_controller_info(struct mfi_softc *);
91static int	mfi_get_log_state(struct mfi_softc *,
92		    struct mfi_evt_log_state **);
93static int	mfi_parse_entries(struct mfi_softc *, int, int);
94static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95static void	mfi_startup(void *arg);
96static void	mfi_intr(void *arg);
97static void	mfi_ldprobe(struct mfi_softc *sc);
98static void	mfi_syspdprobe(struct mfi_softc *sc);
99static void	mfi_handle_evt(void *context, int pending);
100static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101static void	mfi_aen_complete(struct mfi_command *);
102static int	mfi_add_ld(struct mfi_softc *sc, int);
103static void	mfi_add_ld_complete(struct mfi_command *);
104static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
105static void	mfi_add_sys_pd_complete(struct mfi_command *);
106static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107static void	mfi_bio_complete(struct mfi_command *);
108static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111static int	mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112static int	mfi_abort(struct mfi_softc *, struct mfi_command **);
113static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114static void	mfi_timeout(void *);
115static int	mfi_user_command(struct mfi_softc *,
116		    struct mfi_ioc_passthru *);
117static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
118static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
119static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
120static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
121static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
124		    uint32_t frame_cnt);
125static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
126		    uint32_t frame_cnt);
127static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
132
133SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
135TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
137           0, "event message locale");
138
139static int	mfi_event_class = MFI_EVT_CLASS_INFO;
140TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
142           0, "event message class");
143
144static int	mfi_max_cmds = 128;
145TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
147	   0, "Max commands limit (-1 = controller limit)");
148
149static int	mfi_detect_jbod_change = 1;
150TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
152	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
153
154int		mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
155TUNABLE_INT("hw.mfi.polled_cmd_timeout", &mfi_polled_cmd_timeout);
156SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
157	   &mfi_polled_cmd_timeout, 0,
158	   "Polled command timeout - used for firmware flash etc (in seconds)");
159
160static int	mfi_cmd_timeout = MFI_CMD_TIMEOUT;
161TUNABLE_INT("hw.mfi.cmd_timeout", &mfi_cmd_timeout);
162SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
163	   0, "Command timeout (in seconds)");
164
165/* Management interface */
166static d_open_t		mfi_open;
167static d_close_t	mfi_close;
168static d_ioctl_t	mfi_ioctl;
169static d_poll_t		mfi_poll;
170
171static struct cdevsw mfi_cdevsw = {
172	.d_version = 	D_VERSION,
173	.d_flags =	0,
174	.d_open = 	mfi_open,
175	.d_close =	mfi_close,
176	.d_ioctl =	mfi_ioctl,
177	.d_poll =	mfi_poll,
178	.d_name =	"mfi",
179};
180
181MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
182
183#define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
184struct mfi_skinny_dma_info mfi_skinny;
185
186static void
187mfi_enable_intr_xscale(struct mfi_softc *sc)
188{
189	MFI_WRITE4(sc, MFI_OMSK, 0x01);
190}
191
192static void
193mfi_enable_intr_ppc(struct mfi_softc *sc)
194{
195	if (sc->mfi_flags & MFI_FLAGS_1078) {
196		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
197		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
198	}
199	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
200		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
201		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
202	}
203	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
204		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
205	}
206}
207
208static int32_t
209mfi_read_fw_status_xscale(struct mfi_softc *sc)
210{
211	return MFI_READ4(sc, MFI_OMSG0);
212}
213
214static int32_t
215mfi_read_fw_status_ppc(struct mfi_softc *sc)
216{
217	return MFI_READ4(sc, MFI_OSP0);
218}
219
220static int
221mfi_check_clear_intr_xscale(struct mfi_softc *sc)
222{
223	int32_t status;
224
225	status = MFI_READ4(sc, MFI_OSTS);
226	if ((status & MFI_OSTS_INTR_VALID) == 0)
227		return 1;
228
229	MFI_WRITE4(sc, MFI_OSTS, status);
230	return 0;
231}
232
233static int
234mfi_check_clear_intr_ppc(struct mfi_softc *sc)
235{
236	int32_t status;
237
238	status = MFI_READ4(sc, MFI_OSTS);
239	if (sc->mfi_flags & MFI_FLAGS_1078) {
240		if (!(status & MFI_1078_RM)) {
241			return 1;
242		}
243	}
244	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
245		if (!(status & MFI_GEN2_RM)) {
246			return 1;
247		}
248	}
249	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
250		if (!(status & MFI_SKINNY_RM)) {
251			return 1;
252		}
253	}
254	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
255		MFI_WRITE4(sc, MFI_OSTS, status);
256	else
257		MFI_WRITE4(sc, MFI_ODCR0, status);
258	return 0;
259}
260
261static void
262mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
263{
264	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
265}
266
267static void
268mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
269{
270	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
271	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
272	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
273	} else {
274	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
275	}
276}
277
278int
279mfi_transition_firmware(struct mfi_softc *sc)
280{
281	uint32_t fw_state, cur_state;
282	int max_wait, i;
283	uint32_t cur_abs_reg_val = 0;
284	uint32_t prev_abs_reg_val = 0;
285
286	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
287	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
288	while (fw_state != MFI_FWSTATE_READY) {
289		if (bootverbose)
290			device_printf(sc->mfi_dev, "Waiting for firmware to "
291			"become ready\n");
292		cur_state = fw_state;
293		switch (fw_state) {
294		case MFI_FWSTATE_FAULT:
295			device_printf(sc->mfi_dev, "Firmware fault\n");
296			return (ENXIO);
297		case MFI_FWSTATE_WAIT_HANDSHAKE:
298			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
299			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
300			else
301			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
302			max_wait = MFI_RESET_WAIT_TIME;
303			break;
304		case MFI_FWSTATE_OPERATIONAL:
305			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
306			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
307			else
308			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
309			max_wait = MFI_RESET_WAIT_TIME;
310			break;
311		case MFI_FWSTATE_UNDEFINED:
312		case MFI_FWSTATE_BB_INIT:
313			max_wait = MFI_RESET_WAIT_TIME;
314			break;
315		case MFI_FWSTATE_FW_INIT_2:
316			max_wait = MFI_RESET_WAIT_TIME;
317			break;
318		case MFI_FWSTATE_FW_INIT:
319		case MFI_FWSTATE_FLUSH_CACHE:
320			max_wait = MFI_RESET_WAIT_TIME;
321			break;
322		case MFI_FWSTATE_DEVICE_SCAN:
323			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
324			prev_abs_reg_val = cur_abs_reg_val;
325			break;
326		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
327			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
328			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
329			else
330			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
331			max_wait = MFI_RESET_WAIT_TIME;
332			break;
333		default:
334			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
335			    fw_state);
336			return (ENXIO);
337		}
338		for (i = 0; i < (max_wait * 10); i++) {
339			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
340			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
341			if (fw_state == cur_state)
342				DELAY(100000);
343			else
344				break;
345		}
346		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
347			/* Check the device scanning progress */
348			if (prev_abs_reg_val != cur_abs_reg_val) {
349				continue;
350			}
351		}
352		if (fw_state == cur_state) {
353			device_printf(sc->mfi_dev, "Firmware stuck in state "
354			    "%#x\n", fw_state);
355			return (ENXIO);
356		}
357	}
358	return (0);
359}
360
361static void
362mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
363{
364	bus_addr_t *addr;
365
366	addr = arg;
367	*addr = segs[0].ds_addr;
368}
369
370
371int
372mfi_attach(struct mfi_softc *sc)
373{
374	uint32_t status;
375	int error, commsz, framessz, sensesz;
376	int frames, unit, max_fw_sge, max_fw_cmds;
377	uint32_t tb_mem_size = 0;
378	struct cdev *dev_t;
379
380	if (sc == NULL)
381		return EINVAL;
382
383	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
384	    MEGASAS_VERSION);
385
386	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
387	sx_init(&sc->mfi_config_lock, "MFI config");
388	TAILQ_INIT(&sc->mfi_ld_tqh);
389	TAILQ_INIT(&sc->mfi_syspd_tqh);
390	TAILQ_INIT(&sc->mfi_ld_pend_tqh);
391	TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
392	TAILQ_INIT(&sc->mfi_evt_queue);
393	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
394	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
395	TAILQ_INIT(&sc->mfi_aen_pids);
396	TAILQ_INIT(&sc->mfi_cam_ccbq);
397
398	mfi_initq_free(sc);
399	mfi_initq_ready(sc);
400	mfi_initq_busy(sc);
401	mfi_initq_bio(sc);
402
403	sc->adpreset = 0;
404	sc->last_seq_num = 0;
405	sc->disableOnlineCtrlReset = 1;
406	sc->issuepend_done = 1;
407	sc->hw_crit_error = 0;
408
409	if (sc->mfi_flags & MFI_FLAGS_1064R) {
410		sc->mfi_enable_intr = mfi_enable_intr_xscale;
411		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
412		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
413		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
414	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
415		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
416		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
417		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
418		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
419		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
420		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
421		sc->mfi_tbolt = 1;
422		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
423	} else {
424		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
425		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
426		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
427		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
428	}
429
430
431	/* Before we get too far, see if the firmware is working */
432	if ((error = mfi_transition_firmware(sc)) != 0) {
433		device_printf(sc->mfi_dev, "Firmware not in READY state, "
434		    "error %d\n", error);
435		return (ENXIO);
436	}
437
438	/* Start: LSIP200113393 */
439	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
440				1, 0,			/* algnmnt, boundary */
441				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
442				BUS_SPACE_MAXADDR,	/* highaddr */
443				NULL, NULL,		/* filter, filterarg */
444				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
445				1,			/* msegments */
446				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
447				0,			/* flags */
448				NULL, NULL,		/* lockfunc, lockarg */
449				&sc->verbuf_h_dmat)) {
450		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
451		return (ENOMEM);
452	}
453	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
454	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
455		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
456		return (ENOMEM);
457	}
458	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
459	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
460	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
461	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
462	/* End: LSIP200113393 */
463
464	/*
465	 * Get information needed for sizing the contiguous memory for the
466	 * frame pool.  Size down the sgl parameter since we know that
467	 * we will never need more than what's required for MAXPHYS.
468	 * It would be nice if these constants were available at runtime
469	 * instead of compile time.
470	 */
471	status = sc->mfi_read_fw_status(sc);
472	max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
473	if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
474		device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
475		    max_fw_cmds, mfi_max_cmds);
476		sc->mfi_max_fw_cmds = mfi_max_cmds;
477	} else {
478		sc->mfi_max_fw_cmds = max_fw_cmds;
479	}
480	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
481	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
482
483	/* ThunderBolt Support get the contiguous memory */
484
485	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
486		mfi_tbolt_init_globals(sc);
487		device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
488		    "MaxSgl = %d, state = %#x\n", max_fw_cmds,
489		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
490		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
491
492		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
493				1, 0,			/* algnmnt, boundary */
494				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
495				BUS_SPACE_MAXADDR,	/* highaddr */
496				NULL, NULL,		/* filter, filterarg */
497				tb_mem_size,		/* maxsize */
498				1,			/* msegments */
499				tb_mem_size,		/* maxsegsize */
500				0,			/* flags */
501				NULL, NULL,		/* lockfunc, lockarg */
502				&sc->mfi_tb_dmat)) {
503			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
504			return (ENOMEM);
505		}
506		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
507		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
508			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
509			return (ENOMEM);
510		}
511		bzero(sc->request_message_pool, tb_mem_size);
512		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
513		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
514
515		/* For ThunderBolt memory init */
516		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
517				0x100, 0,		/* alignmnt, boundary */
518				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
519				BUS_SPACE_MAXADDR,	/* highaddr */
520				NULL, NULL,		/* filter, filterarg */
521				MFI_FRAME_SIZE,		/* maxsize */
522				1,			/* msegments */
523				MFI_FRAME_SIZE,		/* maxsegsize */
524				0,			/* flags */
525				NULL, NULL,		/* lockfunc, lockarg */
526				&sc->mfi_tb_init_dmat)) {
527			device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
528			return (ENOMEM);
529		}
530		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
531		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
532			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
533			return (ENOMEM);
534		}
535		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
536		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
537		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
538		    &sc->mfi_tb_init_busaddr, 0);
539		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
540		    tb_mem_size)) {
541			device_printf(sc->mfi_dev,
542			    "Thunderbolt pool preparation error\n");
543			return 0;
544		}
545
546		/*
547		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
548		  we are taking it diffrent from what we have allocated for Request
549		  and reply descriptors to avoid confusion later
550		*/
551		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
552		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
553				1, 0,			/* algnmnt, boundary */
554				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
555				BUS_SPACE_MAXADDR,	/* highaddr */
556				NULL, NULL,		/* filter, filterarg */
557				tb_mem_size,		/* maxsize */
558				1,			/* msegments */
559				tb_mem_size,		/* maxsegsize */
560				0,			/* flags */
561				NULL, NULL,		/* lockfunc, lockarg */
562				&sc->mfi_tb_ioc_init_dmat)) {
563			device_printf(sc->mfi_dev,
564			    "Cannot allocate comms DMA tag\n");
565			return (ENOMEM);
566		}
567		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
568		    (void **)&sc->mfi_tb_ioc_init_desc,
569		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
570			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
571			return (ENOMEM);
572		}
573		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
574		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
575		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
576		    &sc->mfi_tb_ioc_init_busaddr, 0);
577	}
578	/*
579	 * Create the dma tag for data buffers.  Used both for block I/O
580	 * and for various internal data queries.
581	 */
582	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
583				1, 0,			/* algnmnt, boundary */
584				BUS_SPACE_MAXADDR,	/* lowaddr */
585				BUS_SPACE_MAXADDR,	/* highaddr */
586				NULL, NULL,		/* filter, filterarg */
587				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
588				sc->mfi_max_sge,	/* nsegments */
589				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
590				BUS_DMA_ALLOCNOW,	/* flags */
591				busdma_lock_mutex,	/* lockfunc */
592				&sc->mfi_io_lock,	/* lockfuncarg */
593				&sc->mfi_buffer_dmat)) {
594		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
595		return (ENOMEM);
596	}
597
598	/*
599	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
600	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
601	 * entry, so the calculated size here will be will be 1 more than
602	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
603	 */
604	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
605	    sizeof(struct mfi_hwcomms);
606	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
607				1, 0,			/* algnmnt, boundary */
608				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
609				BUS_SPACE_MAXADDR,	/* highaddr */
610				NULL, NULL,		/* filter, filterarg */
611				commsz,			/* maxsize */
612				1,			/* msegments */
613				commsz,			/* maxsegsize */
614				0,			/* flags */
615				NULL, NULL,		/* lockfunc, lockarg */
616				&sc->mfi_comms_dmat)) {
617		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
618		return (ENOMEM);
619	}
620	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
621	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
622		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
623		return (ENOMEM);
624	}
625	bzero(sc->mfi_comms, commsz);
626	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
627	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
628	/*
629	 * Allocate DMA memory for the command frames.  Keep them in the
630	 * lower 4GB for efficiency.  Calculate the size of the commands at
631	 * the same time; each command is one 64 byte frame plus a set of
632         * additional frames for holding sg lists or other data.
633	 * The assumption here is that the SG list will start at the second
634	 * frame and not use the unused bytes in the first frame.  While this
635	 * isn't technically correct, it simplifies the calculation and allows
636	 * for command frames that might be larger than an mfi_io_frame.
637	 */
638	if (sizeof(bus_addr_t) == 8) {
639		sc->mfi_sge_size = sizeof(struct mfi_sg64);
640		sc->mfi_flags |= MFI_FLAGS_SG64;
641	} else {
642		sc->mfi_sge_size = sizeof(struct mfi_sg32);
643	}
644	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
645		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
646	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
647	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
648	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
649	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
650				64, 0,			/* algnmnt, boundary */
651				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
652				BUS_SPACE_MAXADDR,	/* highaddr */
653				NULL, NULL,		/* filter, filterarg */
654				framessz,		/* maxsize */
655				1,			/* nsegments */
656				framessz,		/* maxsegsize */
657				0,			/* flags */
658				NULL, NULL,		/* lockfunc, lockarg */
659				&sc->mfi_frames_dmat)) {
660		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
661		return (ENOMEM);
662	}
663	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
664	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
665		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
666		return (ENOMEM);
667	}
668	bzero(sc->mfi_frames, framessz);
669	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
670	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
671	/*
672	 * Allocate DMA memory for the frame sense data.  Keep them in the
673	 * lower 4GB for efficiency
674	 */
675	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
676	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
677				4, 0,			/* algnmnt, boundary */
678				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
679				BUS_SPACE_MAXADDR,	/* highaddr */
680				NULL, NULL,		/* filter, filterarg */
681				sensesz,		/* maxsize */
682				1,			/* nsegments */
683				sensesz,		/* maxsegsize */
684				0,			/* flags */
685				NULL, NULL,		/* lockfunc, lockarg */
686				&sc->mfi_sense_dmat)) {
687		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
688		return (ENOMEM);
689	}
690	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
691	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
692		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
693		return (ENOMEM);
694	}
695	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
696	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
697	if ((error = mfi_alloc_commands(sc)) != 0)
698		return (error);
699
700	/* Before moving the FW to operational state, check whether
701	 * hostmemory is required by the FW or not
702	 */
703
704	/* ThunderBolt MFI_IOC2 INIT */
705	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
706		sc->mfi_disable_intr(sc);
707		mtx_lock(&sc->mfi_io_lock);
708		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
709			device_printf(sc->mfi_dev,
710			    "TB Init has failed with error %d\n",error);
711			mtx_unlock(&sc->mfi_io_lock);
712			return error;
713		}
714		mtx_unlock(&sc->mfi_io_lock);
715
716		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
717			return error;
718		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
719		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
720		    &sc->mfi_intr)) {
721			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
722			return (EINVAL);
723		}
724		sc->mfi_intr_ptr = mfi_intr_tbolt;
725		sc->mfi_enable_intr(sc);
726	} else {
727		if ((error = mfi_comms_init(sc)) != 0)
728			return (error);
729
730		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
731		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
732			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
733			return (EINVAL);
734		}
735		sc->mfi_intr_ptr = mfi_intr;
736		sc->mfi_enable_intr(sc);
737	}
738	if ((error = mfi_get_controller_info(sc)) != 0)
739		return (error);
740	sc->disableOnlineCtrlReset = 0;
741
742	/* Register a config hook to probe the bus for arrays */
743	sc->mfi_ich.ich_func = mfi_startup;
744	sc->mfi_ich.ich_arg = sc;
745	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
746		device_printf(sc->mfi_dev, "Cannot establish configuration "
747		    "hook\n");
748		return (EINVAL);
749	}
750	mtx_lock(&sc->mfi_io_lock);
751	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
752		mtx_unlock(&sc->mfi_io_lock);
753		return (error);
754	}
755	mtx_unlock(&sc->mfi_io_lock);
756
757	/*
758	 * Register a shutdown handler.
759	 */
760	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
761	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
762		device_printf(sc->mfi_dev, "Warning: shutdown event "
763		    "registration failed\n");
764	}
765
766	/*
767	 * Create the control device for doing management
768	 */
769	unit = device_get_unit(sc->mfi_dev);
770	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
771	    0640, "mfi%d", unit);
772	if (unit == 0)
773		make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
774		    sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
775	if (sc->mfi_cdev != NULL)
776		sc->mfi_cdev->si_drv1 = sc;
777	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
778	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
779	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
780	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
781	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
782	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
783	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
784	    &sc->mfi_keep_deleted_volumes, 0,
785	    "Don't detach the mfid device for a busy volume that is deleted");
786
787	device_add_child(sc->mfi_dev, "mfip", -1);
788	bus_generic_attach(sc->mfi_dev);
789
790	/* Start the timeout watchdog */
791	callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
792	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
793	    mfi_timeout, sc);
794
795	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
796		mtx_lock(&sc->mfi_io_lock);
797		mfi_tbolt_sync_map_info(sc);
798		mtx_unlock(&sc->mfi_io_lock);
799	}
800
801	return (0);
802}
803
804static int
805mfi_alloc_commands(struct mfi_softc *sc)
806{
807	struct mfi_command *cm;
808	int i, j;
809
810	/*
811	 * XXX Should we allocate all the commands up front, or allocate on
812	 * demand later like 'aac' does?
813	 */
814	sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
815	    sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
816
817	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
818		cm = &sc->mfi_commands[i];
819		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
820		    sc->mfi_cmd_size * i);
821		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
822		    sc->mfi_cmd_size * i;
823		cm->cm_frame->header.context = i;
824		cm->cm_sense = &sc->mfi_sense[i];
825		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
826		cm->cm_sc = sc;
827		cm->cm_index = i;
828		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
829		    &cm->cm_dmamap) == 0) {
830			mtx_lock(&sc->mfi_io_lock);
831			mfi_release_command(cm);
832			mtx_unlock(&sc->mfi_io_lock);
833		} else {
834			device_printf(sc->mfi_dev, "Failed to allocate %d "
835			   "command blocks, only allocated %d\n",
836			    sc->mfi_max_fw_cmds, i - 1);
837			for (j = 0; j < i; j++) {
838				cm = &sc->mfi_commands[i];
839				bus_dmamap_destroy(sc->mfi_buffer_dmat,
840				    cm->cm_dmamap);
841			}
842			free(sc->mfi_commands, M_MFIBUF);
843			sc->mfi_commands = NULL;
844
845			return (ENOMEM);
846		}
847	}
848
849	return (0);
850}
851
852void
853mfi_release_command(struct mfi_command *cm)
854{
855	struct mfi_frame_header *hdr;
856	uint32_t *hdr_data;
857
858	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
859
860	/*
861	 * Zero out the important fields of the frame, but make sure the
862	 * context field is preserved.  For efficiency, handle the fields
863	 * as 32 bit words.  Clear out the first S/G entry too for safety.
864	 */
865	hdr = &cm->cm_frame->header;
866	if (cm->cm_data != NULL && hdr->sg_count) {
867		cm->cm_sg->sg32[0].len = 0;
868		cm->cm_sg->sg32[0].addr = 0;
869	}
870
871	/*
872	 * Command may be on other queues e.g. busy queue depending on the
873	 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
874	 * properly
875	 */
876	if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
877		mfi_remove_busy(cm);
878	if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
879		mfi_remove_ready(cm);
880
881	/* We're not expecting it to be on any other queue but check */
882	if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
883		panic("Command %p is still on another queue, flags = %#x",
884		    cm, cm->cm_flags);
885	}
886
887	/* tbolt cleanup */
888	if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
889		mfi_tbolt_return_cmd(cm->cm_sc,
890		    cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
891		    cm);
892	}
893
894	hdr_data = (uint32_t *)cm->cm_frame;
895	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
896	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
897	hdr_data[4] = 0;	/* flags, timeout */
898	hdr_data[5] = 0;	/* data_len */
899
900	cm->cm_extra_frames = 0;
901	cm->cm_flags = 0;
902	cm->cm_complete = NULL;
903	cm->cm_private = NULL;
904	cm->cm_data = NULL;
905	cm->cm_sg = 0;
906	cm->cm_total_frame_size = 0;
907	cm->retry_for_fw_reset = 0;
908
909	mfi_enqueue_free(cm);
910}
911
912int
913mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
914    uint32_t opcode, void **bufp, size_t bufsize)
915{
916	struct mfi_command *cm;
917	struct mfi_dcmd_frame *dcmd;
918	void *buf = NULL;
919	uint32_t context = 0;
920
921	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
922
923	cm = mfi_dequeue_free(sc);
924	if (cm == NULL)
925		return (EBUSY);
926
927	/* Zero out the MFI frame */
928	context = cm->cm_frame->header.context;
929	bzero(cm->cm_frame, sizeof(union mfi_frame));
930	cm->cm_frame->header.context = context;
931
932	if ((bufsize > 0) && (bufp != NULL)) {
933		if (*bufp == NULL) {
934			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
935			if (buf == NULL) {
936				mfi_release_command(cm);
937				return (ENOMEM);
938			}
939			*bufp = buf;
940		} else {
941			buf = *bufp;
942		}
943	}
944
945	dcmd =  &cm->cm_frame->dcmd;
946	bzero(dcmd->mbox, MFI_MBOX_SIZE);
947	dcmd->header.cmd = MFI_CMD_DCMD;
948	dcmd->header.timeout = 0;
949	dcmd->header.flags = 0;
950	dcmd->header.data_len = bufsize;
951	dcmd->header.scsi_status = 0;
952	dcmd->opcode = opcode;
953	cm->cm_sg = &dcmd->sgl;
954	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
955	cm->cm_flags = 0;
956	cm->cm_data = buf;
957	cm->cm_private = buf;
958	cm->cm_len = bufsize;
959
960	*cmp = cm;
961	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
962		*bufp = buf;
963	return (0);
964}
965
966static int
967mfi_comms_init(struct mfi_softc *sc)
968{
969	struct mfi_command *cm;
970	struct mfi_init_frame *init;
971	struct mfi_init_qinfo *qinfo;
972	int error;
973	uint32_t context = 0;
974
975	mtx_lock(&sc->mfi_io_lock);
976	if ((cm = mfi_dequeue_free(sc)) == NULL) {
977		mtx_unlock(&sc->mfi_io_lock);
978		return (EBUSY);
979	}
980
981	/* Zero out the MFI frame */
982	context = cm->cm_frame->header.context;
983	bzero(cm->cm_frame, sizeof(union mfi_frame));
984	cm->cm_frame->header.context = context;
985
986	/*
987	 * Abuse the SG list area of the frame to hold the init_qinfo
988	 * object;
989	 */
990	init = &cm->cm_frame->init;
991	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
992
993	bzero(qinfo, sizeof(struct mfi_init_qinfo));
994	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
995	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
996	    offsetof(struct mfi_hwcomms, hw_reply_q);
997	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
998	    offsetof(struct mfi_hwcomms, hw_pi);
999	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
1000	    offsetof(struct mfi_hwcomms, hw_ci);
1001
1002	init->header.cmd = MFI_CMD_INIT;
1003	init->header.data_len = sizeof(struct mfi_init_qinfo);
1004	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
1005	cm->cm_data = NULL;
1006	cm->cm_flags = MFI_CMD_POLLED;
1007
1008	if ((error = mfi_mapcmd(sc, cm)) != 0)
1009		device_printf(sc->mfi_dev, "failed to send init command\n");
1010	mfi_release_command(cm);
1011	mtx_unlock(&sc->mfi_io_lock);
1012
1013	return (error);
1014}
1015
1016static int
1017mfi_get_controller_info(struct mfi_softc *sc)
1018{
1019	struct mfi_command *cm = NULL;
1020	struct mfi_ctrl_info *ci = NULL;
1021	uint32_t max_sectors_1, max_sectors_2;
1022	int error;
1023
1024	mtx_lock(&sc->mfi_io_lock);
1025	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1026	    (void **)&ci, sizeof(*ci));
1027	if (error)
1028		goto out;
1029	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1030
1031	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1032		device_printf(sc->mfi_dev, "Failed to get controller info\n");
1033		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1034		    MFI_SECTOR_LEN;
1035		error = 0;
1036		goto out;
1037	}
1038
1039	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1040	    BUS_DMASYNC_POSTREAD);
1041	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1042
1043	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1044	max_sectors_2 = ci->max_request_size;
1045	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1046	sc->disableOnlineCtrlReset =
1047	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
1048
1049out:
1050	if (ci)
1051		free(ci, M_MFIBUF);
1052	if (cm)
1053		mfi_release_command(cm);
1054	mtx_unlock(&sc->mfi_io_lock);
1055	return (error);
1056}
1057
1058static int
1059mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1060{
1061	struct mfi_command *cm = NULL;
1062	int error;
1063
1064	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1065	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1066	    (void **)log_state, sizeof(**log_state));
1067	if (error)
1068		goto out;
1069	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1070
1071	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1072		device_printf(sc->mfi_dev, "Failed to get log state\n");
1073		goto out;
1074	}
1075
1076	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1077	    BUS_DMASYNC_POSTREAD);
1078	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1079
1080out:
1081	if (cm)
1082		mfi_release_command(cm);
1083
1084	return (error);
1085}
1086
1087int
1088mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1089{
1090	struct mfi_evt_log_state *log_state = NULL;
1091	union mfi_evt class_locale;
1092	int error = 0;
1093	uint32_t seq;
1094
1095	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1096
1097	class_locale.members.reserved = 0;
1098	class_locale.members.locale = mfi_event_locale;
1099	class_locale.members.evt_class  = mfi_event_class;
1100
1101	if (seq_start == 0) {
1102		if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1103			goto out;
1104		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1105
1106		/*
1107		 * Walk through any events that fired since the last
1108		 * shutdown.
1109		 */
1110		if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1111		    log_state->newest_seq_num)) != 0)
1112			goto out;
1113		seq = log_state->newest_seq_num;
1114	} else
1115		seq = seq_start;
1116	error = mfi_aen_register(sc, seq, class_locale.word);
1117out:
1118	free(log_state, M_MFIBUF);
1119
1120	return (error);
1121}
1122
1123int
1124mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1125{
1126
1127	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1128	cm->cm_complete = NULL;
1129
1130	/*
1131	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1132	 * and return 0 to it as status
1133	 */
1134	if (cm->cm_frame->dcmd.opcode == 0) {
1135		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1136		cm->cm_error = 0;
1137		return (cm->cm_error);
1138	}
1139	mfi_enqueue_ready(cm);
1140	mfi_startio(sc);
1141	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1142		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1143	return (cm->cm_error);
1144}
1145
1146void
1147mfi_free(struct mfi_softc *sc)
1148{
1149	struct mfi_command *cm;
1150	int i;
1151
1152	callout_drain(&sc->mfi_watchdog_callout);
1153
1154	if (sc->mfi_cdev != NULL)
1155		destroy_dev(sc->mfi_cdev);
1156
1157	if (sc->mfi_commands != NULL) {
1158		for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1159			cm = &sc->mfi_commands[i];
1160			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1161		}
1162		free(sc->mfi_commands, M_MFIBUF);
1163		sc->mfi_commands = NULL;
1164	}
1165
1166	if (sc->mfi_intr)
1167		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1168	if (sc->mfi_irq != NULL)
1169		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1170		    sc->mfi_irq);
1171
1172	if (sc->mfi_sense_busaddr != 0)
1173		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1174	if (sc->mfi_sense != NULL)
1175		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1176		    sc->mfi_sense_dmamap);
1177	if (sc->mfi_sense_dmat != NULL)
1178		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1179
1180	if (sc->mfi_frames_busaddr != 0)
1181		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1182	if (sc->mfi_frames != NULL)
1183		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1184		    sc->mfi_frames_dmamap);
1185	if (sc->mfi_frames_dmat != NULL)
1186		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1187
1188	if (sc->mfi_comms_busaddr != 0)
1189		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1190	if (sc->mfi_comms != NULL)
1191		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1192		    sc->mfi_comms_dmamap);
1193	if (sc->mfi_comms_dmat != NULL)
1194		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1195
1196	/* ThunderBolt contiguous memory free here */
1197	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1198		if (sc->mfi_tb_busaddr != 0)
1199			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1200		if (sc->request_message_pool != NULL)
1201			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1202			    sc->mfi_tb_dmamap);
1203		if (sc->mfi_tb_dmat != NULL)
1204			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1205
1206		/* Version buffer memory free */
1207		/* Start LSIP200113393 */
1208		if (sc->verbuf_h_busaddr != 0)
1209			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1210		if (sc->verbuf != NULL)
1211			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1212			    sc->verbuf_h_dmamap);
1213		if (sc->verbuf_h_dmat != NULL)
1214			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1215
1216		/* End LSIP200113393 */
1217		/* ThunderBolt INIT packet memory Free */
1218		if (sc->mfi_tb_init_busaddr != 0)
1219			bus_dmamap_unload(sc->mfi_tb_init_dmat,
1220			    sc->mfi_tb_init_dmamap);
1221		if (sc->mfi_tb_init != NULL)
1222			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1223			    sc->mfi_tb_init_dmamap);
1224		if (sc->mfi_tb_init_dmat != NULL)
1225			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1226
1227		/* ThunderBolt IOC Init Desc memory free here */
1228		if (sc->mfi_tb_ioc_init_busaddr != 0)
1229			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1230			    sc->mfi_tb_ioc_init_dmamap);
1231		if (sc->mfi_tb_ioc_init_desc != NULL)
1232			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1233			    sc->mfi_tb_ioc_init_desc,
1234			    sc->mfi_tb_ioc_init_dmamap);
1235		if (sc->mfi_tb_ioc_init_dmat != NULL)
1236			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1237		if (sc->mfi_cmd_pool_tbolt != NULL) {
1238			for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1239				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1240					free(sc->mfi_cmd_pool_tbolt[i],
1241					    M_MFIBUF);
1242					sc->mfi_cmd_pool_tbolt[i] = NULL;
1243				}
1244			}
1245			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1246			sc->mfi_cmd_pool_tbolt = NULL;
1247		}
1248		if (sc->request_desc_pool != NULL) {
1249			free(sc->request_desc_pool, M_MFIBUF);
1250			sc->request_desc_pool = NULL;
1251		}
1252	}
1253	if (sc->mfi_buffer_dmat != NULL)
1254		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1255	if (sc->mfi_parent_dmat != NULL)
1256		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1257
1258	if (mtx_initialized(&sc->mfi_io_lock)) {
1259		mtx_destroy(&sc->mfi_io_lock);
1260		sx_destroy(&sc->mfi_config_lock);
1261	}
1262
1263	return;
1264}
1265
1266static void
1267mfi_startup(void *arg)
1268{
1269	struct mfi_softc *sc;
1270
1271	sc = (struct mfi_softc *)arg;
1272
1273	config_intrhook_disestablish(&sc->mfi_ich);
1274
1275	sc->mfi_enable_intr(sc);
1276	sx_xlock(&sc->mfi_config_lock);
1277	mtx_lock(&sc->mfi_io_lock);
1278	mfi_ldprobe(sc);
1279	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1280	    mfi_syspdprobe(sc);
1281	mtx_unlock(&sc->mfi_io_lock);
1282	sx_xunlock(&sc->mfi_config_lock);
1283}
1284
1285static void
1286mfi_intr(void *arg)
1287{
1288	struct mfi_softc *sc;
1289	struct mfi_command *cm;
1290	uint32_t pi, ci, context;
1291
1292	sc = (struct mfi_softc *)arg;
1293
1294	if (sc->mfi_check_clear_intr(sc))
1295		return;
1296
1297restart:
1298	pi = sc->mfi_comms->hw_pi;
1299	ci = sc->mfi_comms->hw_ci;
1300	mtx_lock(&sc->mfi_io_lock);
1301	while (ci != pi) {
1302		context = sc->mfi_comms->hw_reply_q[ci];
1303		if (context < sc->mfi_max_fw_cmds) {
1304			cm = &sc->mfi_commands[context];
1305			mfi_remove_busy(cm);
1306			cm->cm_error = 0;
1307			mfi_complete(sc, cm);
1308		}
1309		if (++ci == (sc->mfi_max_fw_cmds + 1))
1310			ci = 0;
1311	}
1312
1313	sc->mfi_comms->hw_ci = ci;
1314
1315	/* Give defered I/O a chance to run */
1316	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1317	mfi_startio(sc);
1318	mtx_unlock(&sc->mfi_io_lock);
1319
1320	/*
1321	 * Dummy read to flush the bus; this ensures that the indexes are up
1322	 * to date.  Restart processing if more commands have come it.
1323	 */
1324	(void)sc->mfi_read_fw_status(sc);
1325	if (pi != sc->mfi_comms->hw_pi)
1326		goto restart;
1327
1328	return;
1329}
1330
1331int
1332mfi_shutdown(struct mfi_softc *sc)
1333{
1334	struct mfi_dcmd_frame *dcmd;
1335	struct mfi_command *cm;
1336	int error;
1337
1338
1339	if (sc->mfi_aen_cm != NULL) {
1340		sc->cm_aen_abort = 1;
1341		mfi_abort(sc, &sc->mfi_aen_cm);
1342	}
1343
1344	if (sc->mfi_map_sync_cm != NULL) {
1345		sc->cm_map_abort = 1;
1346		mfi_abort(sc, &sc->mfi_map_sync_cm);
1347	}
1348
1349	mtx_lock(&sc->mfi_io_lock);
1350	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1351	if (error) {
1352		mtx_unlock(&sc->mfi_io_lock);
1353		return (error);
1354	}
1355
1356	dcmd = &cm->cm_frame->dcmd;
1357	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1358	cm->cm_flags = MFI_CMD_POLLED;
1359	cm->cm_data = NULL;
1360
1361	if ((error = mfi_mapcmd(sc, cm)) != 0)
1362		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1363
1364	mfi_release_command(cm);
1365	mtx_unlock(&sc->mfi_io_lock);
1366	return (error);
1367}
1368
1369static void
1370mfi_syspdprobe(struct mfi_softc *sc)
1371{
1372	struct mfi_frame_header *hdr;
1373	struct mfi_command *cm = NULL;
1374	struct mfi_pd_list *pdlist = NULL;
1375	struct mfi_system_pd *syspd, *tmp;
1376	struct mfi_system_pending *syspd_pend;
1377	int error, i, found;
1378
1379	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1380	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1381	/* Add SYSTEM PD's */
1382	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1383	    (void **)&pdlist, sizeof(*pdlist));
1384	if (error) {
1385		device_printf(sc->mfi_dev,
1386		    "Error while forming SYSTEM PD list\n");
1387		goto out;
1388	}
1389
1390	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1391	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1392	cm->cm_frame->dcmd.mbox[1] = 0;
1393	if (mfi_mapcmd(sc, cm) != 0) {
1394		device_printf(sc->mfi_dev,
1395		    "Failed to get syspd device listing\n");
1396		goto out;
1397	}
1398	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1399	    BUS_DMASYNC_POSTREAD);
1400	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1401	hdr = &cm->cm_frame->header;
1402	if (hdr->cmd_status != MFI_STAT_OK) {
1403		device_printf(sc->mfi_dev,
1404		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1405		goto out;
1406	}
1407	/* Get each PD and add it to the system */
1408	for (i = 0; i < pdlist->count; i++) {
1409		if (pdlist->addr[i].device_id ==
1410		    pdlist->addr[i].encl_device_id)
1411			continue;
1412		found = 0;
1413		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1414			if (syspd->pd_id == pdlist->addr[i].device_id)
1415				found = 1;
1416		}
1417		TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1418			if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1419				found = 1;
1420		}
1421		if (found == 0)
1422			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1423	}
1424	/* Delete SYSPD's whose state has been changed */
1425	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1426		found = 0;
1427		for (i = 0; i < pdlist->count; i++) {
1428			if (syspd->pd_id == pdlist->addr[i].device_id) {
1429				found = 1;
1430				break;
1431			}
1432		}
1433		if (found == 0) {
1434			printf("DELETE\n");
1435			mtx_unlock(&sc->mfi_io_lock);
1436			mtx_lock(&Giant);
1437			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1438			mtx_unlock(&Giant);
1439			mtx_lock(&sc->mfi_io_lock);
1440		}
1441	}
1442out:
1443	if (pdlist)
1444	    free(pdlist, M_MFIBUF);
1445	if (cm)
1446	    mfi_release_command(cm);
1447
1448	return;
1449}
1450
1451static void
1452mfi_ldprobe(struct mfi_softc *sc)
1453{
1454	struct mfi_frame_header *hdr;
1455	struct mfi_command *cm = NULL;
1456	struct mfi_ld_list *list = NULL;
1457	struct mfi_disk *ld;
1458	struct mfi_disk_pending *ld_pend;
1459	int error, i;
1460
1461	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1462	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1463
1464	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1465	    (void **)&list, sizeof(*list));
1466	if (error)
1467		goto out;
1468
1469	cm->cm_flags = MFI_CMD_DATAIN;
1470	if (mfi_wait_command(sc, cm) != 0) {
1471		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1472		goto out;
1473	}
1474
1475	hdr = &cm->cm_frame->header;
1476	if (hdr->cmd_status != MFI_STAT_OK) {
1477		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1478		    hdr->cmd_status);
1479		goto out;
1480	}
1481
1482	for (i = 0; i < list->ld_count; i++) {
1483		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1484			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1485				goto skip_add;
1486		}
1487		TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1488			if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1489				goto skip_add;
1490		}
1491		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1492	skip_add:;
1493	}
1494out:
1495	if (list)
1496		free(list, M_MFIBUF);
1497	if (cm)
1498		mfi_release_command(cm);
1499
1500	return;
1501}
1502
1503/*
1504 * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1505 * the bits in 24-31 are all set, then it is the number of seconds since
1506 * boot.
1507 */
1508static const char *
1509format_timestamp(uint32_t timestamp)
1510{
1511	static char buffer[32];
1512
1513	if ((timestamp & 0xff000000) == 0xff000000)
1514		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1515		    0x00ffffff);
1516	else
1517		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1518	return (buffer);
1519}
1520
1521static const char *
1522format_class(int8_t class)
1523{
1524	static char buffer[6];
1525
1526	switch (class) {
1527	case MFI_EVT_CLASS_DEBUG:
1528		return ("debug");
1529	case MFI_EVT_CLASS_PROGRESS:
1530		return ("progress");
1531	case MFI_EVT_CLASS_INFO:
1532		return ("info");
1533	case MFI_EVT_CLASS_WARNING:
1534		return ("WARN");
1535	case MFI_EVT_CLASS_CRITICAL:
1536		return ("CRIT");
1537	case MFI_EVT_CLASS_FATAL:
1538		return ("FATAL");
1539	case MFI_EVT_CLASS_DEAD:
1540		return ("DEAD");
1541	default:
1542		snprintf(buffer, sizeof(buffer), "%d", class);
1543		return (buffer);
1544	}
1545}
1546
1547static void
1548mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1549{
1550	struct mfi_system_pd *syspd = NULL;
1551
1552	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1553	    format_timestamp(detail->time), detail->evt_class.members.locale,
1554	    format_class(detail->evt_class.members.evt_class),
1555	    detail->description);
1556
1557        /* Don't act on old AEN's or while shutting down */
1558        if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1559                return;
1560
1561	switch (detail->arg_type) {
1562	case MR_EVT_ARGS_NONE:
1563		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1564		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1565			if (mfi_detect_jbod_change) {
1566				/*
1567				 * Probe for new SYSPD's and Delete
1568				 * invalid SYSPD's
1569				 */
1570				sx_xlock(&sc->mfi_config_lock);
1571				mtx_lock(&sc->mfi_io_lock);
1572				mfi_syspdprobe(sc);
1573				mtx_unlock(&sc->mfi_io_lock);
1574				sx_xunlock(&sc->mfi_config_lock);
1575			}
1576		}
1577		break;
1578	case MR_EVT_ARGS_LD_STATE:
1579		/* During load time driver reads all the events starting
1580		 * from the one that has been logged after shutdown. Avoid
1581		 * these old events.
1582		 */
1583		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1584			/* Remove the LD */
1585			struct mfi_disk *ld;
1586			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1587				if (ld->ld_id ==
1588				    detail->args.ld_state.ld.target_id)
1589					break;
1590			}
1591			/*
1592			Fix: for kernel panics when SSCD is removed
1593			KASSERT(ld != NULL, ("volume dissappeared"));
1594			*/
1595			if (ld != NULL) {
1596				mtx_lock(&Giant);
1597				device_delete_child(sc->mfi_dev, ld->ld_dev);
1598				mtx_unlock(&Giant);
1599			}
1600		}
1601		break;
1602	case MR_EVT_ARGS_PD:
1603		if (detail->code == MR_EVT_PD_REMOVED) {
1604			if (mfi_detect_jbod_change) {
1605				/*
1606				 * If the removed device is a SYSPD then
1607				 * delete it
1608				 */
1609				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1610				    pd_link) {
1611					if (syspd->pd_id ==
1612					    detail->args.pd.device_id) {
1613						mtx_lock(&Giant);
1614						device_delete_child(
1615						    sc->mfi_dev,
1616						    syspd->pd_dev);
1617						mtx_unlock(&Giant);
1618						break;
1619					}
1620				}
1621			}
1622		}
1623		if (detail->code == MR_EVT_PD_INSERTED) {
1624			if (mfi_detect_jbod_change) {
1625				/* Probe for new SYSPD's */
1626				sx_xlock(&sc->mfi_config_lock);
1627				mtx_lock(&sc->mfi_io_lock);
1628				mfi_syspdprobe(sc);
1629				mtx_unlock(&sc->mfi_io_lock);
1630				sx_xunlock(&sc->mfi_config_lock);
1631			}
1632		}
1633		if (sc->mfi_cam_rescan_cb != NULL &&
1634		    (detail->code == MR_EVT_PD_INSERTED ||
1635		    detail->code == MR_EVT_PD_REMOVED)) {
1636			sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1637		}
1638		break;
1639	}
1640}
1641
1642static void
1643mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1644{
1645	struct mfi_evt_queue_elm *elm;
1646
1647	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1648	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1649	if (elm == NULL)
1650		return;
1651	memcpy(&elm->detail, detail, sizeof(*detail));
1652	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1653	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1654}
1655
1656static void
1657mfi_handle_evt(void *context, int pending)
1658{
1659	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1660	struct mfi_softc *sc;
1661	struct mfi_evt_queue_elm *elm;
1662
1663	sc = context;
1664	TAILQ_INIT(&queue);
1665	mtx_lock(&sc->mfi_io_lock);
1666	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1667	mtx_unlock(&sc->mfi_io_lock);
1668	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1669		TAILQ_REMOVE(&queue, elm, link);
1670		mfi_decode_evt(sc, &elm->detail);
1671		free(elm, M_MFIBUF);
1672	}
1673}
1674
1675static int
1676mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1677{
1678	struct mfi_command *cm;
1679	struct mfi_dcmd_frame *dcmd;
1680	union mfi_evt current_aen, prior_aen;
1681	struct mfi_evt_detail *ed = NULL;
1682	int error = 0;
1683
1684	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1685
1686	current_aen.word = locale;
1687	if (sc->mfi_aen_cm != NULL) {
1688		prior_aen.word =
1689		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1690		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1691		    !((prior_aen.members.locale & current_aen.members.locale)
1692		    ^current_aen.members.locale)) {
1693			return (0);
1694		} else {
1695			prior_aen.members.locale |= current_aen.members.locale;
1696			if (prior_aen.members.evt_class
1697			    < current_aen.members.evt_class)
1698				current_aen.members.evt_class =
1699				    prior_aen.members.evt_class;
1700			mfi_abort(sc, &sc->mfi_aen_cm);
1701		}
1702	}
1703
1704	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1705	    (void **)&ed, sizeof(*ed));
1706	if (error)
1707		goto out;
1708
1709	dcmd = &cm->cm_frame->dcmd;
1710	((uint32_t *)&dcmd->mbox)[0] = seq;
1711	((uint32_t *)&dcmd->mbox)[1] = locale;
1712	cm->cm_flags = MFI_CMD_DATAIN;
1713	cm->cm_complete = mfi_aen_complete;
1714
1715	sc->last_seq_num = seq;
1716	sc->mfi_aen_cm = cm;
1717
1718	mfi_enqueue_ready(cm);
1719	mfi_startio(sc);
1720
1721out:
1722	return (error);
1723}
1724
1725static void
1726mfi_aen_complete(struct mfi_command *cm)
1727{
1728	struct mfi_frame_header *hdr;
1729	struct mfi_softc *sc;
1730	struct mfi_evt_detail *detail;
1731	struct mfi_aen *mfi_aen_entry, *tmp;
1732	int seq = 0, aborted = 0;
1733
1734	sc = cm->cm_sc;
1735	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1736
1737	if (sc->mfi_aen_cm == NULL)
1738		return;
1739
1740	hdr = &cm->cm_frame->header;
1741
1742	if (sc->cm_aen_abort ||
1743	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1744		sc->cm_aen_abort = 0;
1745		aborted = 1;
1746	} else {
1747		sc->mfi_aen_triggered = 1;
1748		if (sc->mfi_poll_waiting) {
1749			sc->mfi_poll_waiting = 0;
1750			selwakeup(&sc->mfi_select);
1751		}
1752		detail = cm->cm_data;
1753		mfi_queue_evt(sc, detail);
1754		seq = detail->seq + 1;
1755		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1756		    tmp) {
1757			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1758			    aen_link);
1759			PROC_LOCK(mfi_aen_entry->p);
1760			kern_psignal(mfi_aen_entry->p, SIGIO);
1761			PROC_UNLOCK(mfi_aen_entry->p);
1762			free(mfi_aen_entry, M_MFIBUF);
1763		}
1764	}
1765
1766	free(cm->cm_data, M_MFIBUF);
1767	wakeup(&sc->mfi_aen_cm);
1768	sc->mfi_aen_cm = NULL;
1769	mfi_release_command(cm);
1770
1771	/* set it up again so the driver can catch more events */
1772	if (!aborted)
1773		mfi_aen_setup(sc, seq);
1774}
1775
1776#define MAX_EVENTS 15
1777
1778static int
1779mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1780{
1781	struct mfi_command *cm;
1782	struct mfi_dcmd_frame *dcmd;
1783	struct mfi_evt_list *el;
1784	union mfi_evt class_locale;
1785	int error, i, seq, size;
1786
1787	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1788
1789	class_locale.members.reserved = 0;
1790	class_locale.members.locale = mfi_event_locale;
1791	class_locale.members.evt_class  = mfi_event_class;
1792
1793	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1794		* (MAX_EVENTS - 1);
1795	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1796	if (el == NULL)
1797		return (ENOMEM);
1798
1799	for (seq = start_seq;;) {
1800		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1801			free(el, M_MFIBUF);
1802			return (EBUSY);
1803		}
1804
1805		dcmd = &cm->cm_frame->dcmd;
1806		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1807		dcmd->header.cmd = MFI_CMD_DCMD;
1808		dcmd->header.timeout = 0;
1809		dcmd->header.data_len = size;
1810		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1811		((uint32_t *)&dcmd->mbox)[0] = seq;
1812		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1813		cm->cm_sg = &dcmd->sgl;
1814		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1815		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1816		cm->cm_data = el;
1817		cm->cm_len = size;
1818
1819		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1820			device_printf(sc->mfi_dev,
1821			    "Failed to get controller entries\n");
1822			mfi_release_command(cm);
1823			break;
1824		}
1825
1826		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1827		    BUS_DMASYNC_POSTREAD);
1828		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1829
1830		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1831			mfi_release_command(cm);
1832			break;
1833		}
1834		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1835			device_printf(sc->mfi_dev,
1836			    "Error %d fetching controller entries\n",
1837			    dcmd->header.cmd_status);
1838			mfi_release_command(cm);
1839			error = EIO;
1840			break;
1841		}
1842		mfi_release_command(cm);
1843
1844		for (i = 0; i < el->count; i++) {
1845			/*
1846			 * If this event is newer than 'stop_seq' then
1847			 * break out of the loop.  Note that the log
1848			 * is a circular buffer so we have to handle
1849			 * the case that our stop point is earlier in
1850			 * the buffer than our start point.
1851			 */
1852			if (el->event[i].seq >= stop_seq) {
1853				if (start_seq <= stop_seq)
1854					break;
1855				else if (el->event[i].seq < start_seq)
1856					break;
1857			}
1858			mfi_queue_evt(sc, &el->event[i]);
1859		}
1860		seq = el->event[el->count - 1].seq + 1;
1861	}
1862
1863	free(el, M_MFIBUF);
1864	return (error);
1865}
1866
1867static int
1868mfi_add_ld(struct mfi_softc *sc, int id)
1869{
1870	struct mfi_command *cm;
1871	struct mfi_dcmd_frame *dcmd = NULL;
1872	struct mfi_ld_info *ld_info = NULL;
1873	struct mfi_disk_pending *ld_pend;
1874	int error;
1875
1876	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1877
1878	ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1879	if (ld_pend != NULL) {
1880		ld_pend->ld_id = id;
1881		TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1882	}
1883
1884	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1885	    (void **)&ld_info, sizeof(*ld_info));
1886	if (error) {
1887		device_printf(sc->mfi_dev,
1888		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1889		if (ld_info)
1890			free(ld_info, M_MFIBUF);
1891		return (error);
1892	}
1893	cm->cm_flags = MFI_CMD_DATAIN;
1894	dcmd = &cm->cm_frame->dcmd;
1895	dcmd->mbox[0] = id;
1896	if (mfi_wait_command(sc, cm) != 0) {
1897		device_printf(sc->mfi_dev,
1898		    "Failed to get logical drive: %d\n", id);
1899		free(ld_info, M_MFIBUF);
1900		return (0);
1901	}
1902	if (ld_info->ld_config.params.isSSCD != 1)
1903		mfi_add_ld_complete(cm);
1904	else {
1905		mfi_release_command(cm);
1906		if (ld_info)		/* SSCD drives ld_info free here */
1907			free(ld_info, M_MFIBUF);
1908	}
1909	return (0);
1910}
1911
1912static void
1913mfi_add_ld_complete(struct mfi_command *cm)
1914{
1915	struct mfi_frame_header *hdr;
1916	struct mfi_ld_info *ld_info;
1917	struct mfi_softc *sc;
1918	device_t child;
1919
1920	sc = cm->cm_sc;
1921	hdr = &cm->cm_frame->header;
1922	ld_info = cm->cm_private;
1923
1924	if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1925		free(ld_info, M_MFIBUF);
1926		wakeup(&sc->mfi_map_sync_cm);
1927		mfi_release_command(cm);
1928		return;
1929	}
1930	wakeup(&sc->mfi_map_sync_cm);
1931	mfi_release_command(cm);
1932
1933	mtx_unlock(&sc->mfi_io_lock);
1934	mtx_lock(&Giant);
1935	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1936		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1937		free(ld_info, M_MFIBUF);
1938		mtx_unlock(&Giant);
1939		mtx_lock(&sc->mfi_io_lock);
1940		return;
1941	}
1942
1943	device_set_ivars(child, ld_info);
1944	device_set_desc(child, "MFI Logical Disk");
1945	bus_generic_attach(sc->mfi_dev);
1946	mtx_unlock(&Giant);
1947	mtx_lock(&sc->mfi_io_lock);
1948}
1949
1950static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1951{
1952	struct mfi_command *cm;
1953	struct mfi_dcmd_frame *dcmd = NULL;
1954	struct mfi_pd_info *pd_info = NULL;
1955	struct mfi_system_pending *syspd_pend;
1956	int error;
1957
1958	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1959
1960	syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1961	if (syspd_pend != NULL) {
1962		syspd_pend->pd_id = id;
1963		TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1964	}
1965
1966	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1967		(void **)&pd_info, sizeof(*pd_info));
1968	if (error) {
1969		device_printf(sc->mfi_dev,
1970		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1971		    error);
1972		if (pd_info)
1973			free(pd_info, M_MFIBUF);
1974		return (error);
1975	}
1976	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1977	dcmd = &cm->cm_frame->dcmd;
1978	dcmd->mbox[0]=id;
1979	dcmd->header.scsi_status = 0;
1980	dcmd->header.pad0 = 0;
1981	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1982		device_printf(sc->mfi_dev,
1983		    "Failed to get physical drive info %d\n", id);
1984		free(pd_info, M_MFIBUF);
1985		mfi_release_command(cm);
1986		return (error);
1987	}
1988	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1989	    BUS_DMASYNC_POSTREAD);
1990	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1991	mfi_add_sys_pd_complete(cm);
1992	return (0);
1993}
1994
1995static void
1996mfi_add_sys_pd_complete(struct mfi_command *cm)
1997{
1998	struct mfi_frame_header *hdr;
1999	struct mfi_pd_info *pd_info;
2000	struct mfi_softc *sc;
2001	device_t child;
2002
2003	sc = cm->cm_sc;
2004	hdr = &cm->cm_frame->header;
2005	pd_info = cm->cm_private;
2006
2007	if (hdr->cmd_status != MFI_STAT_OK) {
2008		free(pd_info, M_MFIBUF);
2009		mfi_release_command(cm);
2010		return;
2011	}
2012	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2013		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2014		    pd_info->ref.v.device_id);
2015		free(pd_info, M_MFIBUF);
2016		mfi_release_command(cm);
2017		return;
2018	}
2019	mfi_release_command(cm);
2020
2021	mtx_unlock(&sc->mfi_io_lock);
2022	mtx_lock(&Giant);
2023	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2024		device_printf(sc->mfi_dev, "Failed to add system pd\n");
2025		free(pd_info, M_MFIBUF);
2026		mtx_unlock(&Giant);
2027		mtx_lock(&sc->mfi_io_lock);
2028		return;
2029	}
2030
2031	device_set_ivars(child, pd_info);
2032	device_set_desc(child, "MFI System PD");
2033	bus_generic_attach(sc->mfi_dev);
2034	mtx_unlock(&Giant);
2035	mtx_lock(&sc->mfi_io_lock);
2036}
2037
2038static struct mfi_command *
2039mfi_bio_command(struct mfi_softc *sc)
2040{
2041	struct bio *bio;
2042	struct mfi_command *cm = NULL;
2043
2044	/*reserving two commands to avoid starvation for IOCTL*/
2045	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2046		return (NULL);
2047	}
2048	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2049		return (NULL);
2050	}
2051	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2052		cm = mfi_build_ldio(sc, bio);
2053	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2054		cm = mfi_build_syspdio(sc, bio);
2055	}
2056	if (!cm)
2057	    mfi_enqueue_bio(sc, bio);
2058	return cm;
2059}
2060
2061/*
2062 * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2063 */
2064
2065int
2066mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2067{
2068	int cdb_len;
2069
2070	if (((lba & 0x1fffff) == lba)
2071         && ((block_count & 0xff) == block_count)
2072         && (byte2 == 0)) {
2073		/* We can fit in a 6 byte cdb */
2074		struct scsi_rw_6 *scsi_cmd;
2075
2076		scsi_cmd = (struct scsi_rw_6 *)cdb;
2077		scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2078		scsi_ulto3b(lba, scsi_cmd->addr);
2079		scsi_cmd->length = block_count & 0xff;
2080		scsi_cmd->control = 0;
2081		cdb_len = sizeof(*scsi_cmd);
2082	} else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2083		/* Need a 10 byte CDB */
2084		struct scsi_rw_10 *scsi_cmd;
2085
2086		scsi_cmd = (struct scsi_rw_10 *)cdb;
2087		scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2088		scsi_cmd->byte2 = byte2;
2089		scsi_ulto4b(lba, scsi_cmd->addr);
2090		scsi_cmd->reserved = 0;
2091		scsi_ulto2b(block_count, scsi_cmd->length);
2092		scsi_cmd->control = 0;
2093		cdb_len = sizeof(*scsi_cmd);
2094	} else if (((block_count & 0xffffffff) == block_count) &&
2095	    ((lba & 0xffffffff) == lba)) {
2096		/* Block count is too big for 10 byte CDB use a 12 byte CDB */
2097		struct scsi_rw_12 *scsi_cmd;
2098
2099		scsi_cmd = (struct scsi_rw_12 *)cdb;
2100		scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2101		scsi_cmd->byte2 = byte2;
2102		scsi_ulto4b(lba, scsi_cmd->addr);
2103		scsi_cmd->reserved = 0;
2104		scsi_ulto4b(block_count, scsi_cmd->length);
2105		scsi_cmd->control = 0;
2106		cdb_len = sizeof(*scsi_cmd);
2107	} else {
2108		/*
2109		 * 16 byte CDB.  We'll only get here if the LBA is larger
2110		 * than 2^32
2111		 */
2112		struct scsi_rw_16 *scsi_cmd;
2113
2114		scsi_cmd = (struct scsi_rw_16 *)cdb;
2115		scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2116		scsi_cmd->byte2 = byte2;
2117		scsi_u64to8b(lba, scsi_cmd->addr);
2118		scsi_cmd->reserved = 0;
2119		scsi_ulto4b(block_count, scsi_cmd->length);
2120		scsi_cmd->control = 0;
2121		cdb_len = sizeof(*scsi_cmd);
2122	}
2123
2124	return cdb_len;
2125}
2126
2127extern char *unmapped_buf;
2128
2129static struct mfi_command *
2130mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2131{
2132	struct mfi_command *cm;
2133	struct mfi_pass_frame *pass;
2134	uint32_t context = 0;
2135	int flags = 0, blkcount = 0, readop;
2136	uint8_t cdb_len;
2137
2138	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2139
2140	if ((cm = mfi_dequeue_free(sc)) == NULL)
2141	    return (NULL);
2142
2143	/* Zero out the MFI frame */
2144	context = cm->cm_frame->header.context;
2145	bzero(cm->cm_frame, sizeof(union mfi_frame));
2146	cm->cm_frame->header.context = context;
2147	pass = &cm->cm_frame->pass;
2148	bzero(pass->cdb, 16);
2149	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2150	switch (bio->bio_cmd & 0x03) {
2151	case BIO_READ:
2152		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2153		readop = 1;
2154		break;
2155	case BIO_WRITE:
2156		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2157		readop = 0;
2158		break;
2159	default:
2160		/* TODO: what about BIO_DELETE??? */
2161		panic("Unsupported bio command %x\n", bio->bio_cmd);
2162	}
2163
2164	/* Cheat with the sector length to avoid a non-constant division */
2165	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2166	/* Fill the LBA and Transfer length in CDB */
2167	cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2168	    pass->cdb);
2169	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2170	pass->header.lun_id = 0;
2171	pass->header.timeout = 0;
2172	pass->header.flags = 0;
2173	pass->header.scsi_status = 0;
2174	pass->header.sense_len = MFI_SENSE_LEN;
2175	pass->header.data_len = bio->bio_bcount;
2176	pass->header.cdb_len = cdb_len;
2177	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2178	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2179	cm->cm_complete = mfi_bio_complete;
2180	cm->cm_private = bio;
2181	cm->cm_data = unmapped_buf;
2182	cm->cm_len = bio->bio_bcount;
2183	cm->cm_sg = &pass->sgl;
2184	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2185	cm->cm_flags = flags;
2186
2187	return (cm);
2188}
2189
2190static struct mfi_command *
2191mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2192{
2193	struct mfi_io_frame *io;
2194	struct mfi_command *cm;
2195	int flags;
2196	uint32_t blkcount;
2197	uint32_t context = 0;
2198
2199	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2200
2201	if ((cm = mfi_dequeue_free(sc)) == NULL)
2202	    return (NULL);
2203
2204	/* Zero out the MFI frame */
2205	context = cm->cm_frame->header.context;
2206	bzero(cm->cm_frame, sizeof(union mfi_frame));
2207	cm->cm_frame->header.context = context;
2208	io = &cm->cm_frame->io;
2209	switch (bio->bio_cmd & 0x03) {
2210	case BIO_READ:
2211		io->header.cmd = MFI_CMD_LD_READ;
2212		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2213		break;
2214	case BIO_WRITE:
2215		io->header.cmd = MFI_CMD_LD_WRITE;
2216		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2217		break;
2218	default:
2219		/* TODO: what about BIO_DELETE??? */
2220		panic("Unsupported bio command %x\n", bio->bio_cmd);
2221	}
2222
2223	/* Cheat with the sector length to avoid a non-constant division */
2224	blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2225	io->header.target_id = (uintptr_t)bio->bio_driver1;
2226	io->header.timeout = 0;
2227	io->header.flags = 0;
2228	io->header.scsi_status = 0;
2229	io->header.sense_len = MFI_SENSE_LEN;
2230	io->header.data_len = blkcount;
2231	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2232	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2233	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2234	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2235	cm->cm_complete = mfi_bio_complete;
2236	cm->cm_private = bio;
2237	cm->cm_data = unmapped_buf;
2238	cm->cm_len = bio->bio_bcount;
2239	cm->cm_sg = &io->sgl;
2240	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2241	cm->cm_flags = flags;
2242
2243	return (cm);
2244}
2245
2246static void
2247mfi_bio_complete(struct mfi_command *cm)
2248{
2249	struct bio *bio;
2250	struct mfi_frame_header *hdr;
2251	struct mfi_softc *sc;
2252
2253	bio = cm->cm_private;
2254	hdr = &cm->cm_frame->header;
2255	sc = cm->cm_sc;
2256
2257	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2258		bio->bio_flags |= BIO_ERROR;
2259		bio->bio_error = EIO;
2260		device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2261		    "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2262		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2263	} else if (cm->cm_error != 0) {
2264		bio->bio_flags |= BIO_ERROR;
2265		bio->bio_error = cm->cm_error;
2266		device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2267		    cm, cm->cm_error);
2268	}
2269
2270	mfi_release_command(cm);
2271	mfi_disk_complete(bio);
2272}
2273
2274void
2275mfi_startio(struct mfi_softc *sc)
2276{
2277	struct mfi_command *cm;
2278	struct ccb_hdr *ccbh;
2279
2280	for (;;) {
2281		/* Don't bother if we're short on resources */
2282		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2283			break;
2284
2285		/* Try a command that has already been prepared */
2286		cm = mfi_dequeue_ready(sc);
2287
2288		if (cm == NULL) {
2289			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2290				cm = sc->mfi_cam_start(ccbh);
2291		}
2292
2293		/* Nope, so look for work on the bioq */
2294		if (cm == NULL)
2295			cm = mfi_bio_command(sc);
2296
2297		/* No work available, so exit */
2298		if (cm == NULL)
2299			break;
2300
2301		/* Send the command to the controller */
2302		if (mfi_mapcmd(sc, cm) != 0) {
2303			device_printf(sc->mfi_dev, "Failed to startio\n");
2304			mfi_requeue_ready(cm);
2305			break;
2306		}
2307	}
2308}
2309
2310int
2311mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2312{
2313	int error, polled;
2314
2315	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2316
2317	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2318		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2319		if (cm->cm_flags & MFI_CMD_CCB)
2320			error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2321			    cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2322			    polled);
2323		else if (cm->cm_flags & MFI_CMD_BIO)
2324			error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2325			    cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2326			    polled);
2327		else
2328			error = bus_dmamap_load(sc->mfi_buffer_dmat,
2329			    cm->cm_dmamap, cm->cm_data, cm->cm_len,
2330			    mfi_data_cb, cm, polled);
2331		if (error == EINPROGRESS) {
2332			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2333			return (0);
2334		}
2335	} else {
2336		error = mfi_send_frame(sc, cm);
2337	}
2338
2339	return (error);
2340}
2341
2342static void
2343mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2344{
2345	struct mfi_frame_header *hdr;
2346	struct mfi_command *cm;
2347	union mfi_sgl *sgl;
2348	struct mfi_softc *sc;
2349	int i, j, first, dir;
2350	int sge_size, locked;
2351
2352	cm = (struct mfi_command *)arg;
2353	sc = cm->cm_sc;
2354	hdr = &cm->cm_frame->header;
2355	sgl = cm->cm_sg;
2356
2357	/*
2358	 * We need to check if we have the lock as this is async
2359	 * callback so even though our caller mfi_mapcmd asserts
2360	 * it has the lock, there is no garantee that hasn't been
2361	 * dropped if bus_dmamap_load returned prior to our
2362	 * completion.
2363	 */
2364	if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2365		mtx_lock(&sc->mfi_io_lock);
2366
2367	if (error) {
2368		printf("error %d in callback\n", error);
2369		cm->cm_error = error;
2370		mfi_complete(sc, cm);
2371		goto out;
2372	}
2373	/* Use IEEE sgl only for IO's on a SKINNY controller
2374	 * For other commands on a SKINNY controller use either
2375	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2376	 * Also calculate the total frame size based on the type
2377	 * of SGL used.
2378	 */
2379	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2380	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2381	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2382	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2383		for (i = 0; i < nsegs; i++) {
2384			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2385			sgl->sg_skinny[i].len = segs[i].ds_len;
2386			sgl->sg_skinny[i].flag = 0;
2387		}
2388		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2389		sge_size = sizeof(struct mfi_sg_skinny);
2390		hdr->sg_count = nsegs;
2391	} else {
2392		j = 0;
2393		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2394			first = cm->cm_stp_len;
2395			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2396				sgl->sg32[j].addr = segs[0].ds_addr;
2397				sgl->sg32[j++].len = first;
2398			} else {
2399				sgl->sg64[j].addr = segs[0].ds_addr;
2400				sgl->sg64[j++].len = first;
2401			}
2402		} else
2403			first = 0;
2404		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2405			for (i = 0; i < nsegs; i++) {
2406				sgl->sg32[j].addr = segs[i].ds_addr + first;
2407				sgl->sg32[j++].len = segs[i].ds_len - first;
2408				first = 0;
2409			}
2410		} else {
2411			for (i = 0; i < nsegs; i++) {
2412				sgl->sg64[j].addr = segs[i].ds_addr + first;
2413				sgl->sg64[j++].len = segs[i].ds_len - first;
2414				first = 0;
2415			}
2416			hdr->flags |= MFI_FRAME_SGL64;
2417		}
2418		hdr->sg_count = j;
2419		sge_size = sc->mfi_sge_size;
2420	}
2421
2422	dir = 0;
2423	if (cm->cm_flags & MFI_CMD_DATAIN) {
2424		dir |= BUS_DMASYNC_PREREAD;
2425		hdr->flags |= MFI_FRAME_DIR_READ;
2426	}
2427	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2428		dir |= BUS_DMASYNC_PREWRITE;
2429		hdr->flags |= MFI_FRAME_DIR_WRITE;
2430	}
2431	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2432	cm->cm_flags |= MFI_CMD_MAPPED;
2433
2434	/*
2435	 * Instead of calculating the total number of frames in the
2436	 * compound frame, it's already assumed that there will be at
2437	 * least 1 frame, so don't compensate for the modulo of the
2438	 * following division.
2439	 */
2440	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2441	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2442
2443	if ((error = mfi_send_frame(sc, cm)) != 0) {
2444		printf("error %d in callback from mfi_send_frame\n", error);
2445		cm->cm_error = error;
2446		mfi_complete(sc, cm);
2447		goto out;
2448	}
2449
2450out:
2451	/* leave the lock in the state we found it */
2452	if (locked == 0)
2453		mtx_unlock(&sc->mfi_io_lock);
2454
2455	return;
2456}
2457
2458static int
2459mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2460{
2461	int error;
2462
2463	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2464
2465	if (sc->MFA_enabled)
2466		error = mfi_tbolt_send_frame(sc, cm);
2467	else
2468		error = mfi_std_send_frame(sc, cm);
2469
2470	if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2471		mfi_remove_busy(cm);
2472
2473	return (error);
2474}
2475
2476static int
2477mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2478{
2479	struct mfi_frame_header *hdr;
2480	int tm = mfi_polled_cmd_timeout * 1000;
2481
2482	hdr = &cm->cm_frame->header;
2483
2484	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2485		cm->cm_timestamp = time_uptime;
2486		mfi_enqueue_busy(cm);
2487	} else {
2488		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2489		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2490	}
2491
2492	/*
2493	 * The bus address of the command is aligned on a 64 byte boundary,
2494	 * leaving the least 6 bits as zero.  For whatever reason, the
2495	 * hardware wants the address shifted right by three, leaving just
2496	 * 3 zero bits.  These three bits are then used as a prefetching
2497	 * hint for the hardware to predict how many frames need to be
2498	 * fetched across the bus.  If a command has more than 8 frames
2499	 * then the 3 bits are set to 0x7 and the firmware uses other
2500	 * information in the command to determine the total amount to fetch.
2501	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2502	 * is enough for both 32bit and 64bit systems.
2503	 */
2504	if (cm->cm_extra_frames > 7)
2505		cm->cm_extra_frames = 7;
2506
2507	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2508
2509	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2510		return (0);
2511
2512	/* This is a polled command, so busy-wait for it to complete. */
2513	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2514		DELAY(1000);
2515		tm -= 1;
2516		if (tm <= 0)
2517			break;
2518	}
2519
2520	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2521		device_printf(sc->mfi_dev, "Frame %p timed out "
2522		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2523		return (ETIMEDOUT);
2524	}
2525
2526	return (0);
2527}
2528
2529
2530void
2531mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2532{
2533	int dir;
2534	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2535
2536	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2537		dir = 0;
2538		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2539		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2540			dir |= BUS_DMASYNC_POSTREAD;
2541		if (cm->cm_flags & MFI_CMD_DATAOUT)
2542			dir |= BUS_DMASYNC_POSTWRITE;
2543
2544		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2545		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2546		cm->cm_flags &= ~MFI_CMD_MAPPED;
2547	}
2548
2549	cm->cm_flags |= MFI_CMD_COMPLETED;
2550
2551	if (cm->cm_complete != NULL)
2552		cm->cm_complete(cm);
2553	else
2554		wakeup(cm);
2555}
2556
2557static int
2558mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2559{
2560	struct mfi_command *cm;
2561	struct mfi_abort_frame *abort;
2562	int i = 0, error;
2563	uint32_t context = 0;
2564
2565	mtx_lock(&sc->mfi_io_lock);
2566	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2567		mtx_unlock(&sc->mfi_io_lock);
2568		return (EBUSY);
2569	}
2570
2571	/* Zero out the MFI frame */
2572	context = cm->cm_frame->header.context;
2573	bzero(cm->cm_frame, sizeof(union mfi_frame));
2574	cm->cm_frame->header.context = context;
2575
2576	abort = &cm->cm_frame->abort;
2577	abort->header.cmd = MFI_CMD_ABORT;
2578	abort->header.flags = 0;
2579	abort->header.scsi_status = 0;
2580	abort->abort_context = (*cm_abort)->cm_frame->header.context;
2581	abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2582	abort->abort_mfi_addr_hi =
2583		(uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2584	cm->cm_data = NULL;
2585	cm->cm_flags = MFI_CMD_POLLED;
2586
2587	if ((error = mfi_mapcmd(sc, cm)) != 0)
2588		device_printf(sc->mfi_dev, "failed to abort command\n");
2589	mfi_release_command(cm);
2590
2591	mtx_unlock(&sc->mfi_io_lock);
2592	while (i < 5 && *cm_abort != NULL) {
2593		tsleep(cm_abort, 0, "mfiabort",
2594		    5 * hz);
2595		i++;
2596	}
2597	if (*cm_abort != NULL) {
2598		/* Force a complete if command didn't abort */
2599		mtx_lock(&sc->mfi_io_lock);
2600		(*cm_abort)->cm_complete(*cm_abort);
2601		mtx_unlock(&sc->mfi_io_lock);
2602	}
2603
2604	return (error);
2605}
2606
2607int
2608mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2609     int len)
2610{
2611	struct mfi_command *cm;
2612	struct mfi_io_frame *io;
2613	int error;
2614	uint32_t context = 0;
2615
2616	if ((cm = mfi_dequeue_free(sc)) == NULL)
2617		return (EBUSY);
2618
2619	/* Zero out the MFI frame */
2620	context = cm->cm_frame->header.context;
2621	bzero(cm->cm_frame, sizeof(union mfi_frame));
2622	cm->cm_frame->header.context = context;
2623
2624	io = &cm->cm_frame->io;
2625	io->header.cmd = MFI_CMD_LD_WRITE;
2626	io->header.target_id = id;
2627	io->header.timeout = 0;
2628	io->header.flags = 0;
2629	io->header.scsi_status = 0;
2630	io->header.sense_len = MFI_SENSE_LEN;
2631	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2632	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2633	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2634	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2635	io->lba_lo = lba & 0xffffffff;
2636	cm->cm_data = virt;
2637	cm->cm_len = len;
2638	cm->cm_sg = &io->sgl;
2639	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2640	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2641
2642	if ((error = mfi_mapcmd(sc, cm)) != 0)
2643		device_printf(sc->mfi_dev, "failed dump blocks\n");
2644	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2645	    BUS_DMASYNC_POSTWRITE);
2646	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2647	mfi_release_command(cm);
2648
2649	return (error);
2650}
2651
2652int
2653mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2654    int len)
2655{
2656	struct mfi_command *cm;
2657	struct mfi_pass_frame *pass;
2658	int error, readop, cdb_len;
2659	uint32_t blkcount;
2660
2661	if ((cm = mfi_dequeue_free(sc)) == NULL)
2662		return (EBUSY);
2663
2664	pass = &cm->cm_frame->pass;
2665	bzero(pass->cdb, 16);
2666	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2667
2668	readop = 0;
2669	blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2670	cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2671	pass->header.target_id = id;
2672	pass->header.timeout = 0;
2673	pass->header.flags = 0;
2674	pass->header.scsi_status = 0;
2675	pass->header.sense_len = MFI_SENSE_LEN;
2676	pass->header.data_len = len;
2677	pass->header.cdb_len = cdb_len;
2678	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2679	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2680	cm->cm_data = virt;
2681	cm->cm_len = len;
2682	cm->cm_sg = &pass->sgl;
2683	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2684	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2685
2686	if ((error = mfi_mapcmd(sc, cm)) != 0)
2687		device_printf(sc->mfi_dev, "failed dump blocks\n");
2688	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2689	    BUS_DMASYNC_POSTWRITE);
2690	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2691	mfi_release_command(cm);
2692
2693	return (error);
2694}
2695
2696static int
2697mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2698{
2699	struct mfi_softc *sc;
2700	int error;
2701
2702	sc = dev->si_drv1;
2703
2704	mtx_lock(&sc->mfi_io_lock);
2705	if (sc->mfi_detaching)
2706		error = ENXIO;
2707	else {
2708		sc->mfi_flags |= MFI_FLAGS_OPEN;
2709		error = 0;
2710	}
2711	mtx_unlock(&sc->mfi_io_lock);
2712
2713	return (error);
2714}
2715
2716static int
2717mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2718{
2719	struct mfi_softc *sc;
2720	struct mfi_aen *mfi_aen_entry, *tmp;
2721
2722	sc = dev->si_drv1;
2723
2724	mtx_lock(&sc->mfi_io_lock);
2725	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2726
2727	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2728		if (mfi_aen_entry->p == curproc) {
2729			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2730			    aen_link);
2731			free(mfi_aen_entry, M_MFIBUF);
2732		}
2733	}
2734	mtx_unlock(&sc->mfi_io_lock);
2735	return (0);
2736}
2737
2738static int
2739mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2740{
2741
2742	switch (opcode) {
2743	case MFI_DCMD_LD_DELETE:
2744	case MFI_DCMD_CFG_ADD:
2745	case MFI_DCMD_CFG_CLEAR:
2746	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2747		sx_xlock(&sc->mfi_config_lock);
2748		return (1);
2749	default:
2750		return (0);
2751	}
2752}
2753
2754static void
2755mfi_config_unlock(struct mfi_softc *sc, int locked)
2756{
2757
2758	if (locked)
2759		sx_xunlock(&sc->mfi_config_lock);
2760}
2761
2762/*
2763 * Perform pre-issue checks on commands from userland and possibly veto
2764 * them.
2765 */
2766static int
2767mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2768{
2769	struct mfi_disk *ld, *ld2;
2770	int error;
2771	struct mfi_system_pd *syspd = NULL;
2772	uint16_t syspd_id;
2773	uint16_t *mbox;
2774
2775	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2776	error = 0;
2777	switch (cm->cm_frame->dcmd.opcode) {
2778	case MFI_DCMD_LD_DELETE:
2779		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2780			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2781				break;
2782		}
2783		if (ld == NULL)
2784			error = ENOENT;
2785		else
2786			error = mfi_disk_disable(ld);
2787		break;
2788	case MFI_DCMD_CFG_CLEAR:
2789		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2790			error = mfi_disk_disable(ld);
2791			if (error)
2792				break;
2793		}
2794		if (error) {
2795			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2796				if (ld2 == ld)
2797					break;
2798				mfi_disk_enable(ld2);
2799			}
2800		}
2801		break;
2802	case MFI_DCMD_PD_STATE_SET:
2803		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2804		syspd_id = mbox[0];
2805		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2806			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2807				if (syspd->pd_id == syspd_id)
2808					break;
2809			}
2810		}
2811		else
2812			break;
2813		if (syspd)
2814			error = mfi_syspd_disable(syspd);
2815		break;
2816	default:
2817		break;
2818	}
2819	return (error);
2820}
2821
2822/* Perform post-issue checks on commands from userland. */
2823static void
2824mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2825{
2826	struct mfi_disk *ld, *ldn;
2827	struct mfi_system_pd *syspd = NULL;
2828	uint16_t syspd_id;
2829	uint16_t *mbox;
2830
2831	switch (cm->cm_frame->dcmd.opcode) {
2832	case MFI_DCMD_LD_DELETE:
2833		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2834			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2835				break;
2836		}
2837		KASSERT(ld != NULL, ("volume dissappeared"));
2838		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2839			mtx_unlock(&sc->mfi_io_lock);
2840			mtx_lock(&Giant);
2841			device_delete_child(sc->mfi_dev, ld->ld_dev);
2842			mtx_unlock(&Giant);
2843			mtx_lock(&sc->mfi_io_lock);
2844		} else
2845			mfi_disk_enable(ld);
2846		break;
2847	case MFI_DCMD_CFG_CLEAR:
2848		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2849			mtx_unlock(&sc->mfi_io_lock);
2850			mtx_lock(&Giant);
2851			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2852				device_delete_child(sc->mfi_dev, ld->ld_dev);
2853			}
2854			mtx_unlock(&Giant);
2855			mtx_lock(&sc->mfi_io_lock);
2856		} else {
2857			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2858				mfi_disk_enable(ld);
2859		}
2860		break;
2861	case MFI_DCMD_CFG_ADD:
2862		mfi_ldprobe(sc);
2863		break;
2864	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2865		mfi_ldprobe(sc);
2866		break;
2867	case MFI_DCMD_PD_STATE_SET:
2868		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2869		syspd_id = mbox[0];
2870		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2871			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2872				if (syspd->pd_id == syspd_id)
2873					break;
2874			}
2875		}
2876		else
2877			break;
2878		/* If the transition fails then enable the syspd again */
2879		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2880			mfi_syspd_enable(syspd);
2881		break;
2882	}
2883}
2884
2885static int
2886mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2887{
2888	struct mfi_config_data *conf_data;
2889	struct mfi_command *ld_cm = NULL;
2890	struct mfi_ld_info *ld_info = NULL;
2891	struct mfi_ld_config *ld;
2892	char *p;
2893	int error = 0;
2894
2895	conf_data = (struct mfi_config_data *)cm->cm_data;
2896
2897	if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2898		p = (char *)conf_data->array;
2899		p += conf_data->array_size * conf_data->array_count;
2900		ld = (struct mfi_ld_config *)p;
2901		if (ld->params.isSSCD == 1)
2902			error = 1;
2903	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2904		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2905		    (void **)&ld_info, sizeof(*ld_info));
2906		if (error) {
2907			device_printf(sc->mfi_dev, "Failed to allocate"
2908			    "MFI_DCMD_LD_GET_INFO %d", error);
2909			if (ld_info)
2910				free(ld_info, M_MFIBUF);
2911			return 0;
2912		}
2913		ld_cm->cm_flags = MFI_CMD_DATAIN;
2914		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2915		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2916		if (mfi_wait_command(sc, ld_cm) != 0) {
2917			device_printf(sc->mfi_dev, "failed to get log drv\n");
2918			mfi_release_command(ld_cm);
2919			free(ld_info, M_MFIBUF);
2920			return 0;
2921		}
2922
2923		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2924			free(ld_info, M_MFIBUF);
2925			mfi_release_command(ld_cm);
2926			return 0;
2927		}
2928		else
2929			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2930
2931		if (ld_info->ld_config.params.isSSCD == 1)
2932			error = 1;
2933
2934		mfi_release_command(ld_cm);
2935		free(ld_info, M_MFIBUF);
2936
2937	}
2938	return error;
2939}
2940
2941static int
2942mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2943{
2944	uint8_t i;
2945	struct mfi_ioc_packet *ioc;
2946	ioc = (struct mfi_ioc_packet *)arg;
2947	int sge_size, error;
2948	struct megasas_sge *kern_sge;
2949
2950	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2951	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2952	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2953
2954	if (sizeof(bus_addr_t) == 8) {
2955		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2956		cm->cm_extra_frames = 2;
2957		sge_size = sizeof(struct mfi_sg64);
2958	} else {
2959		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2960		sge_size = sizeof(struct mfi_sg32);
2961	}
2962
2963	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2964	for (i = 0; i < ioc->mfi_sge_count; i++) {
2965			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2966			1, 0,			/* algnmnt, boundary */
2967			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2968			BUS_SPACE_MAXADDR,	/* highaddr */
2969			NULL, NULL,		/* filter, filterarg */
2970			ioc->mfi_sgl[i].iov_len,/* maxsize */
2971			2,			/* nsegments */
2972			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2973			BUS_DMA_ALLOCNOW,	/* flags */
2974			NULL, NULL,		/* lockfunc, lockarg */
2975			&sc->mfi_kbuff_arr_dmat[i])) {
2976			device_printf(sc->mfi_dev,
2977			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2978			return (ENOMEM);
2979		}
2980
2981		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2982		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2983		    &sc->mfi_kbuff_arr_dmamap[i])) {
2984			device_printf(sc->mfi_dev,
2985			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2986			return (ENOMEM);
2987		}
2988
2989		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2990		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2991		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2992		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2993
2994		if (!sc->kbuff_arr[i]) {
2995			device_printf(sc->mfi_dev,
2996			    "Could not allocate memory for kbuff_arr info\n");
2997			return -1;
2998		}
2999		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
3000		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
3001
3002		if (sizeof(bus_addr_t) == 8) {
3003			cm->cm_frame->stp.sgl.sg64[i].addr =
3004			    kern_sge[i].phys_addr;
3005			cm->cm_frame->stp.sgl.sg64[i].len =
3006			    ioc->mfi_sgl[i].iov_len;
3007		} else {
3008			cm->cm_frame->stp.sgl.sg32[i].addr =
3009			    kern_sge[i].phys_addr;
3010			cm->cm_frame->stp.sgl.sg32[i].len =
3011			    ioc->mfi_sgl[i].iov_len;
3012		}
3013
3014		error = copyin(ioc->mfi_sgl[i].iov_base,
3015		    sc->kbuff_arr[i],
3016		    ioc->mfi_sgl[i].iov_len);
3017		if (error != 0) {
3018			device_printf(sc->mfi_dev, "Copy in failed\n");
3019			return error;
3020		}
3021	}
3022
3023	cm->cm_flags |=MFI_CMD_MAPPED;
3024	return 0;
3025}
3026
3027static int
3028mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3029{
3030	struct mfi_command *cm;
3031	struct mfi_dcmd_frame *dcmd;
3032	void *ioc_buf = NULL;
3033	uint32_t context;
3034	int error = 0, locked;
3035
3036
3037	if (ioc->buf_size > 0) {
3038		if (ioc->buf_size > 1024 * 1024)
3039			return (ENOMEM);
3040		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3041		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3042		if (error) {
3043			device_printf(sc->mfi_dev, "failed to copyin\n");
3044			free(ioc_buf, M_MFIBUF);
3045			return (error);
3046		}
3047	}
3048
3049	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3050
3051	mtx_lock(&sc->mfi_io_lock);
3052	while ((cm = mfi_dequeue_free(sc)) == NULL)
3053		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3054
3055	/* Save context for later */
3056	context = cm->cm_frame->header.context;
3057
3058	dcmd = &cm->cm_frame->dcmd;
3059	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3060
3061	cm->cm_sg = &dcmd->sgl;
3062	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3063	cm->cm_data = ioc_buf;
3064	cm->cm_len = ioc->buf_size;
3065
3066	/* restore context */
3067	cm->cm_frame->header.context = context;
3068
3069	/* Cheat since we don't know if we're writing or reading */
3070	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3071
3072	error = mfi_check_command_pre(sc, cm);
3073	if (error)
3074		goto out;
3075
3076	error = mfi_wait_command(sc, cm);
3077	if (error) {
3078		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3079		goto out;
3080	}
3081	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3082	mfi_check_command_post(sc, cm);
3083out:
3084	mfi_release_command(cm);
3085	mtx_unlock(&sc->mfi_io_lock);
3086	mfi_config_unlock(sc, locked);
3087	if (ioc->buf_size > 0)
3088		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3089	if (ioc_buf)
3090		free(ioc_buf, M_MFIBUF);
3091	return (error);
3092}
3093
3094#define	PTRIN(p)		((void *)(uintptr_t)(p))
3095
3096static int
3097mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3098{
3099	struct mfi_softc *sc;
3100	union mfi_statrequest *ms;
3101	struct mfi_ioc_packet *ioc;
3102#ifdef COMPAT_FREEBSD32
3103	struct mfi_ioc_packet32 *ioc32;
3104#endif
3105	struct mfi_ioc_aen *aen;
3106	struct mfi_command *cm = NULL;
3107	uint32_t context = 0;
3108	union mfi_sense_ptr sense_ptr;
3109	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3110	size_t len;
3111	int i, res;
3112	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3113#ifdef COMPAT_FREEBSD32
3114	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3115	struct mfi_ioc_passthru iop_swab;
3116#endif
3117	int error, locked;
3118	union mfi_sgl *sgl;
3119	sc = dev->si_drv1;
3120	error = 0;
3121
3122	if (sc->adpreset)
3123		return EBUSY;
3124
3125	if (sc->hw_crit_error)
3126		return EBUSY;
3127
3128	if (sc->issuepend_done == 0)
3129		return EBUSY;
3130
3131	switch (cmd) {
3132	case MFIIO_STATS:
3133		ms = (union mfi_statrequest *)arg;
3134		switch (ms->ms_item) {
3135		case MFIQ_FREE:
3136		case MFIQ_BIO:
3137		case MFIQ_READY:
3138		case MFIQ_BUSY:
3139			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3140			    sizeof(struct mfi_qstat));
3141			break;
3142		default:
3143			error = ENOIOCTL;
3144			break;
3145		}
3146		break;
3147	case MFIIO_QUERY_DISK:
3148	{
3149		struct mfi_query_disk *qd;
3150		struct mfi_disk *ld;
3151
3152		qd = (struct mfi_query_disk *)arg;
3153		mtx_lock(&sc->mfi_io_lock);
3154		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3155			if (ld->ld_id == qd->array_id)
3156				break;
3157		}
3158		if (ld == NULL) {
3159			qd->present = 0;
3160			mtx_unlock(&sc->mfi_io_lock);
3161			return (0);
3162		}
3163		qd->present = 1;
3164		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3165			qd->open = 1;
3166		bzero(qd->devname, SPECNAMELEN + 1);
3167		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3168		mtx_unlock(&sc->mfi_io_lock);
3169		break;
3170	}
3171	case MFI_CMD:
3172#ifdef COMPAT_FREEBSD32
3173	case MFI_CMD32:
3174#endif
3175		{
3176		devclass_t devclass;
3177		ioc = (struct mfi_ioc_packet *)arg;
3178		int adapter;
3179
3180		adapter = ioc->mfi_adapter_no;
3181		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3182			devclass = devclass_find("mfi");
3183			sc = devclass_get_softc(devclass, adapter);
3184		}
3185		mtx_lock(&sc->mfi_io_lock);
3186		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3187			mtx_unlock(&sc->mfi_io_lock);
3188			return (EBUSY);
3189		}
3190		mtx_unlock(&sc->mfi_io_lock);
3191		locked = 0;
3192
3193		/*
3194		 * save off original context since copying from user
3195		 * will clobber some data
3196		 */
3197		context = cm->cm_frame->header.context;
3198		cm->cm_frame->header.context = cm->cm_index;
3199
3200		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3201		    2 * MEGAMFI_FRAME_SIZE);
3202		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3203		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3204		cm->cm_frame->header.scsi_status = 0;
3205		cm->cm_frame->header.pad0 = 0;
3206		if (ioc->mfi_sge_count) {
3207			cm->cm_sg =
3208			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3209		}
3210		sgl = cm->cm_sg;
3211		cm->cm_flags = 0;
3212		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3213			cm->cm_flags |= MFI_CMD_DATAIN;
3214		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3215			cm->cm_flags |= MFI_CMD_DATAOUT;
3216		/* Legacy app shim */
3217		if (cm->cm_flags == 0)
3218			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3219		cm->cm_len = cm->cm_frame->header.data_len;
3220		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3221#ifdef COMPAT_FREEBSD32
3222			if (cmd == MFI_CMD) {
3223#endif
3224				/* Native */
3225				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3226#ifdef COMPAT_FREEBSD32
3227			} else {
3228				/* 32bit on 64bit */
3229				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3230				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3231			}
3232#endif
3233			cm->cm_len += cm->cm_stp_len;
3234		}
3235		if (cm->cm_len &&
3236		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3237			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3238			    M_WAITOK | M_ZERO);
3239			if (cm->cm_data == NULL) {
3240				device_printf(sc->mfi_dev, "Malloc failed\n");
3241				goto out;
3242			}
3243		} else {
3244			cm->cm_data = 0;
3245		}
3246
3247		/* restore header context */
3248		cm->cm_frame->header.context = context;
3249
3250		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3251			res = mfi_stp_cmd(sc, cm, arg);
3252			if (res != 0)
3253				goto out;
3254		} else {
3255			temp = data;
3256			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3257			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3258				for (i = 0; i < ioc->mfi_sge_count; i++) {
3259#ifdef COMPAT_FREEBSD32
3260					if (cmd == MFI_CMD) {
3261#endif
3262						/* Native */
3263						addr = ioc->mfi_sgl[i].iov_base;
3264						len = ioc->mfi_sgl[i].iov_len;
3265#ifdef COMPAT_FREEBSD32
3266					} else {
3267						/* 32bit on 64bit */
3268						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3269						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3270						len = ioc32->mfi_sgl[i].iov_len;
3271					}
3272#endif
3273					error = copyin(addr, temp, len);
3274					if (error != 0) {
3275						device_printf(sc->mfi_dev,
3276						    "Copy in failed\n");
3277						goto out;
3278					}
3279					temp = &temp[len];
3280				}
3281			}
3282		}
3283
3284		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3285			locked = mfi_config_lock(sc,
3286			     cm->cm_frame->dcmd.opcode);
3287
3288		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3289			cm->cm_frame->pass.sense_addr_lo =
3290			    (uint32_t)cm->cm_sense_busaddr;
3291			cm->cm_frame->pass.sense_addr_hi =
3292			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3293		}
3294		mtx_lock(&sc->mfi_io_lock);
3295		skip_pre_post = mfi_check_for_sscd (sc, cm);
3296		if (!skip_pre_post) {
3297			error = mfi_check_command_pre(sc, cm);
3298			if (error) {
3299				mtx_unlock(&sc->mfi_io_lock);
3300				goto out;
3301			}
3302		}
3303		if ((error = mfi_wait_command(sc, cm)) != 0) {
3304			device_printf(sc->mfi_dev,
3305			    "Controller polled failed\n");
3306			mtx_unlock(&sc->mfi_io_lock);
3307			goto out;
3308		}
3309		if (!skip_pre_post) {
3310			mfi_check_command_post(sc, cm);
3311		}
3312		mtx_unlock(&sc->mfi_io_lock);
3313
3314		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3315			temp = data;
3316			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3317			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3318				for (i = 0; i < ioc->mfi_sge_count; i++) {
3319#ifdef COMPAT_FREEBSD32
3320					if (cmd == MFI_CMD) {
3321#endif
3322						/* Native */
3323						addr = ioc->mfi_sgl[i].iov_base;
3324						len = ioc->mfi_sgl[i].iov_len;
3325#ifdef COMPAT_FREEBSD32
3326					} else {
3327						/* 32bit on 64bit */
3328						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3329						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3330						len = ioc32->mfi_sgl[i].iov_len;
3331					}
3332#endif
3333					error = copyout(temp, addr, len);
3334					if (error != 0) {
3335						device_printf(sc->mfi_dev,
3336						    "Copy out failed\n");
3337						goto out;
3338					}
3339					temp = &temp[len];
3340				}
3341			}
3342		}
3343
3344		if (ioc->mfi_sense_len) {
3345			/* get user-space sense ptr then copy out sense */
3346			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3347			    &sense_ptr.sense_ptr_data[0],
3348			    sizeof(sense_ptr.sense_ptr_data));
3349#ifdef COMPAT_FREEBSD32
3350			if (cmd != MFI_CMD) {
3351				/*
3352				 * not 64bit native so zero out any address
3353				 * over 32bit */
3354				sense_ptr.addr.high = 0;
3355			}
3356#endif
3357			error = copyout(cm->cm_sense, sense_ptr.user_space,
3358			    ioc->mfi_sense_len);
3359			if (error != 0) {
3360				device_printf(sc->mfi_dev,
3361				    "Copy out failed\n");
3362				goto out;
3363			}
3364		}
3365
3366		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3367out:
3368		mfi_config_unlock(sc, locked);
3369		if (data)
3370			free(data, M_MFIBUF);
3371		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3372			for (i = 0; i < 2; i++) {
3373				if (sc->kbuff_arr[i]) {
3374					if (sc->mfi_kbuff_arr_busaddr != 0)
3375						bus_dmamap_unload(
3376						    sc->mfi_kbuff_arr_dmat[i],
3377						    sc->mfi_kbuff_arr_dmamap[i]
3378						    );
3379					if (sc->kbuff_arr[i] != NULL)
3380						bus_dmamem_free(
3381						    sc->mfi_kbuff_arr_dmat[i],
3382						    sc->kbuff_arr[i],
3383						    sc->mfi_kbuff_arr_dmamap[i]
3384						    );
3385					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3386						bus_dma_tag_destroy(
3387						    sc->mfi_kbuff_arr_dmat[i]);
3388				}
3389			}
3390		}
3391		if (cm) {
3392			mtx_lock(&sc->mfi_io_lock);
3393			mfi_release_command(cm);
3394			mtx_unlock(&sc->mfi_io_lock);
3395		}
3396
3397		break;
3398		}
3399	case MFI_SET_AEN:
3400		aen = (struct mfi_ioc_aen *)arg;
3401		mtx_lock(&sc->mfi_io_lock);
3402		error = mfi_aen_register(sc, aen->aen_seq_num,
3403		    aen->aen_class_locale);
3404		mtx_unlock(&sc->mfi_io_lock);
3405
3406		break;
3407	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3408		{
3409			devclass_t devclass;
3410			struct mfi_linux_ioc_packet l_ioc;
3411			int adapter;
3412
3413			devclass = devclass_find("mfi");
3414			if (devclass == NULL)
3415				return (ENOENT);
3416
3417			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3418			if (error)
3419				return (error);
3420			adapter = l_ioc.lioc_adapter_no;
3421			sc = devclass_get_softc(devclass, adapter);
3422			if (sc == NULL)
3423				return (ENOENT);
3424			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3425			    cmd, arg, flag, td));
3426			break;
3427		}
3428	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3429		{
3430			devclass_t devclass;
3431			struct mfi_linux_ioc_aen l_aen;
3432			int adapter;
3433
3434			devclass = devclass_find("mfi");
3435			if (devclass == NULL)
3436				return (ENOENT);
3437
3438			error = copyin(arg, &l_aen, sizeof(l_aen));
3439			if (error)
3440				return (error);
3441			adapter = l_aen.laen_adapter_no;
3442			sc = devclass_get_softc(devclass, adapter);
3443			if (sc == NULL)
3444				return (ENOENT);
3445			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3446			    cmd, arg, flag, td));
3447			break;
3448		}
3449#ifdef COMPAT_FREEBSD32
3450	case MFIIO_PASSTHRU32:
3451		if (!SV_CURPROC_FLAG(SV_ILP32)) {
3452			error = ENOTTY;
3453			break;
3454		}
3455		iop_swab.ioc_frame	= iop32->ioc_frame;
3456		iop_swab.buf_size	= iop32->buf_size;
3457		iop_swab.buf		= PTRIN(iop32->buf);
3458		iop			= &iop_swab;
3459		/* FALLTHROUGH */
3460#endif
3461	case MFIIO_PASSTHRU:
3462		error = mfi_user_command(sc, iop);
3463#ifdef COMPAT_FREEBSD32
3464		if (cmd == MFIIO_PASSTHRU32)
3465			iop32->ioc_frame = iop_swab.ioc_frame;
3466#endif
3467		break;
3468	default:
3469		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3470		error = ENOTTY;
3471		break;
3472	}
3473
3474	return (error);
3475}
3476
3477static int
3478mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3479{
3480	struct mfi_softc *sc;
3481	struct mfi_linux_ioc_packet l_ioc;
3482	struct mfi_linux_ioc_aen l_aen;
3483	struct mfi_command *cm = NULL;
3484	struct mfi_aen *mfi_aen_entry;
3485	union mfi_sense_ptr sense_ptr;
3486	uint32_t context = 0;
3487	uint8_t *data = NULL, *temp;
3488	int i;
3489	int error, locked;
3490
3491	sc = dev->si_drv1;
3492	error = 0;
3493	switch (cmd) {
3494	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3495		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3496		if (error != 0)
3497			return (error);
3498
3499		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3500			return (EINVAL);
3501		}
3502
3503		mtx_lock(&sc->mfi_io_lock);
3504		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3505			mtx_unlock(&sc->mfi_io_lock);
3506			return (EBUSY);
3507		}
3508		mtx_unlock(&sc->mfi_io_lock);
3509		locked = 0;
3510
3511		/*
3512		 * save off original context since copying from user
3513		 * will clobber some data
3514		 */
3515		context = cm->cm_frame->header.context;
3516
3517		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3518		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3519		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3520		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3521		cm->cm_frame->header.scsi_status = 0;
3522		cm->cm_frame->header.pad0 = 0;
3523		if (l_ioc.lioc_sge_count)
3524			cm->cm_sg =
3525			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3526		cm->cm_flags = 0;
3527		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3528			cm->cm_flags |= MFI_CMD_DATAIN;
3529		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3530			cm->cm_flags |= MFI_CMD_DATAOUT;
3531		cm->cm_len = cm->cm_frame->header.data_len;
3532		if (cm->cm_len &&
3533		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3534			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3535			    M_WAITOK | M_ZERO);
3536			if (cm->cm_data == NULL) {
3537				device_printf(sc->mfi_dev, "Malloc failed\n");
3538				goto out;
3539			}
3540		} else {
3541			cm->cm_data = 0;
3542		}
3543
3544		/* restore header context */
3545		cm->cm_frame->header.context = context;
3546
3547		temp = data;
3548		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3549			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3550				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3551				       temp,
3552				       l_ioc.lioc_sgl[i].iov_len);
3553				if (error != 0) {
3554					device_printf(sc->mfi_dev,
3555					    "Copy in failed\n");
3556					goto out;
3557				}
3558				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3559			}
3560		}
3561
3562		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3563			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3564
3565		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3566			cm->cm_frame->pass.sense_addr_lo =
3567			    (uint32_t)cm->cm_sense_busaddr;
3568			cm->cm_frame->pass.sense_addr_hi =
3569			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3570		}
3571
3572		mtx_lock(&sc->mfi_io_lock);
3573		error = mfi_check_command_pre(sc, cm);
3574		if (error) {
3575			mtx_unlock(&sc->mfi_io_lock);
3576			goto out;
3577		}
3578
3579		if ((error = mfi_wait_command(sc, cm)) != 0) {
3580			device_printf(sc->mfi_dev,
3581			    "Controller polled failed\n");
3582			mtx_unlock(&sc->mfi_io_lock);
3583			goto out;
3584		}
3585
3586		mfi_check_command_post(sc, cm);
3587		mtx_unlock(&sc->mfi_io_lock);
3588
3589		temp = data;
3590		if (cm->cm_flags & MFI_CMD_DATAIN) {
3591			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3592				error = copyout(temp,
3593					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3594					l_ioc.lioc_sgl[i].iov_len);
3595				if (error != 0) {
3596					device_printf(sc->mfi_dev,
3597					    "Copy out failed\n");
3598					goto out;
3599				}
3600				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3601			}
3602		}
3603
3604		if (l_ioc.lioc_sense_len) {
3605			/* get user-space sense ptr then copy out sense */
3606			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3607                            ->lioc_frame.raw[l_ioc.lioc_sense_off],
3608			    &sense_ptr.sense_ptr_data[0],
3609			    sizeof(sense_ptr.sense_ptr_data));
3610#ifdef __amd64__
3611			/*
3612			 * only 32bit Linux support so zero out any
3613			 * address over 32bit
3614			 */
3615			sense_ptr.addr.high = 0;
3616#endif
3617			error = copyout(cm->cm_sense, sense_ptr.user_space,
3618			    l_ioc.lioc_sense_len);
3619			if (error != 0) {
3620				device_printf(sc->mfi_dev,
3621				    "Copy out failed\n");
3622				goto out;
3623			}
3624		}
3625
3626		error = copyout(&cm->cm_frame->header.cmd_status,
3627			&((struct mfi_linux_ioc_packet*)arg)
3628			->lioc_frame.hdr.cmd_status,
3629			1);
3630		if (error != 0) {
3631			device_printf(sc->mfi_dev,
3632				      "Copy out failed\n");
3633			goto out;
3634		}
3635
3636out:
3637		mfi_config_unlock(sc, locked);
3638		if (data)
3639			free(data, M_MFIBUF);
3640		if (cm) {
3641			mtx_lock(&sc->mfi_io_lock);
3642			mfi_release_command(cm);
3643			mtx_unlock(&sc->mfi_io_lock);
3644		}
3645
3646		return (error);
3647	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3648		error = copyin(arg, &l_aen, sizeof(l_aen));
3649		if (error != 0)
3650			return (error);
3651		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3652		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3653		    M_WAITOK);
3654		mtx_lock(&sc->mfi_io_lock);
3655		if (mfi_aen_entry != NULL) {
3656			mfi_aen_entry->p = curproc;
3657			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3658			    aen_link);
3659		}
3660		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3661		    l_aen.laen_class_locale);
3662
3663		if (error != 0) {
3664			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3665			    aen_link);
3666			free(mfi_aen_entry, M_MFIBUF);
3667		}
3668		mtx_unlock(&sc->mfi_io_lock);
3669
3670		return (error);
3671	default:
3672		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3673		error = ENOENT;
3674		break;
3675	}
3676
3677	return (error);
3678}
3679
3680static int
3681mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3682{
3683	struct mfi_softc *sc;
3684	int revents = 0;
3685
3686	sc = dev->si_drv1;
3687
3688	if (poll_events & (POLLIN | POLLRDNORM)) {
3689		if (sc->mfi_aen_triggered != 0) {
3690			revents |= poll_events & (POLLIN | POLLRDNORM);
3691			sc->mfi_aen_triggered = 0;
3692		}
3693		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3694			revents |= POLLERR;
3695		}
3696	}
3697
3698	if (revents == 0) {
3699		if (poll_events & (POLLIN | POLLRDNORM)) {
3700			sc->mfi_poll_waiting = 1;
3701			selrecord(td, &sc->mfi_select);
3702		}
3703	}
3704
3705	return revents;
3706}
3707
3708static void
3709mfi_dump_all(void)
3710{
3711	struct mfi_softc *sc;
3712	struct mfi_command *cm;
3713	devclass_t dc;
3714	time_t deadline;
3715	int timedout;
3716	int i;
3717
3718	dc = devclass_find("mfi");
3719	if (dc == NULL) {
3720		printf("No mfi dev class\n");
3721		return;
3722	}
3723
3724	for (i = 0; ; i++) {
3725		sc = devclass_get_softc(dc, i);
3726		if (sc == NULL)
3727			break;
3728		device_printf(sc->mfi_dev, "Dumping\n\n");
3729		timedout = 0;
3730		deadline = time_uptime - mfi_cmd_timeout;
3731		mtx_lock(&sc->mfi_io_lock);
3732		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3733			if (cm->cm_timestamp <= deadline) {
3734				device_printf(sc->mfi_dev,
3735				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3736				    cm, (int)(time_uptime - cm->cm_timestamp));
3737				MFI_PRINT_CMD(cm);
3738				timedout++;
3739			}
3740		}
3741
3742#if 0
3743		if (timedout)
3744			MFI_DUMP_CMDS(sc);
3745#endif
3746
3747		mtx_unlock(&sc->mfi_io_lock);
3748	}
3749
3750	return;
3751}
3752
3753static void
3754mfi_timeout(void *data)
3755{
3756	struct mfi_softc *sc = (struct mfi_softc *)data;
3757	struct mfi_command *cm, *tmp;
3758	time_t deadline;
3759	int timedout = 0;
3760
3761	deadline = time_uptime - mfi_cmd_timeout;
3762	if (sc->adpreset == 0) {
3763		if (!mfi_tbolt_reset(sc)) {
3764			callout_reset(&sc->mfi_watchdog_callout,
3765			    mfi_cmd_timeout * hz, mfi_timeout, sc);
3766			return;
3767		}
3768	}
3769	mtx_lock(&sc->mfi_io_lock);
3770	TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3771		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3772			continue;
3773		if (cm->cm_timestamp <= deadline) {
3774			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3775				cm->cm_timestamp = time_uptime;
3776			} else {
3777				device_printf(sc->mfi_dev,
3778				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3779				     cm, (int)(time_uptime - cm->cm_timestamp)
3780				     );
3781				MFI_PRINT_CMD(cm);
3782				MFI_VALIDATE_CMD(sc, cm);
3783				/*
3784				 * While commands can get stuck forever we do
3785				 * not fail them as there is no way to tell if
3786				 * the controller has actually processed them
3787				 * or not.
3788				 *
3789				 * In addition its very likely that force
3790				 * failing a command here would cause a panic
3791				 * e.g. in UFS.
3792				 */
3793				timedout++;
3794			}
3795		}
3796	}
3797
3798#if 0
3799	if (timedout)
3800		MFI_DUMP_CMDS(sc);
3801#endif
3802
3803	mtx_unlock(&sc->mfi_io_lock);
3804
3805	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
3806	    mfi_timeout, sc);
3807
3808	if (0)
3809		mfi_dump_all();
3810	return;
3811}
3812