Deleted Added
full compact
mfi.c (233711) mfi.c (235014)
1/*-
2 * Copyright (c) 2006 IronPort Systems
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26/*-
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 */
52
53#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2006 IronPort Systems
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26/*-
27 * Copyright (c) 2007 LSI Corp.
28 * Copyright (c) 2007 Rajesh Prabhakaran.
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 */
52
53#include <sys/cdefs.h>
54__FBSDID("$FreeBSD: head/sys/dev/mfi/mfi.c 233711 2012-03-30 23:05:48Z ambrisko $");
54__FBSDID("$FreeBSD: head/sys/dev/mfi/mfi.c 235014 2012-05-04 16:00:39Z ambrisko $");
55
56#include "opt_compat.h"
57#include "opt_mfi.h"
58
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/sysctl.h>
62#include <sys/malloc.h>
63#include <sys/kernel.h>
64#include <sys/poll.h>
65#include <sys/selinfo.h>
66#include <sys/bus.h>
67#include <sys/conf.h>
68#include <sys/eventhandler.h>
69#include <sys/rman.h>
70#include <sys/bus_dma.h>
71#include <sys/bio.h>
72#include <sys/ioccom.h>
73#include <sys/uio.h>
74#include <sys/proc.h>
75#include <sys/signalvar.h>
76#include <sys/taskqueue.h>
77
78#include <machine/bus.h>
79#include <machine/resource.h>
80
81#include <dev/mfi/mfireg.h>
82#include <dev/mfi/mfi_ioctl.h>
83#include <dev/mfi/mfivar.h>
84#include <sys/interrupt.h>
85#include <sys/priority.h>
86
87static int mfi_alloc_commands(struct mfi_softc *);
88static int mfi_comms_init(struct mfi_softc *);
89static int mfi_get_controller_info(struct mfi_softc *);
90static int mfi_get_log_state(struct mfi_softc *,
91 struct mfi_evt_log_state **);
92static int mfi_parse_entries(struct mfi_softc *, int, int);
55
56#include "opt_compat.h"
57#include "opt_mfi.h"
58
59#include <sys/param.h>
60#include <sys/systm.h>
61#include <sys/sysctl.h>
62#include <sys/malloc.h>
63#include <sys/kernel.h>
64#include <sys/poll.h>
65#include <sys/selinfo.h>
66#include <sys/bus.h>
67#include <sys/conf.h>
68#include <sys/eventhandler.h>
69#include <sys/rman.h>
70#include <sys/bus_dma.h>
71#include <sys/bio.h>
72#include <sys/ioccom.h>
73#include <sys/uio.h>
74#include <sys/proc.h>
75#include <sys/signalvar.h>
76#include <sys/taskqueue.h>
77
78#include <machine/bus.h>
79#include <machine/resource.h>
80
81#include <dev/mfi/mfireg.h>
82#include <dev/mfi/mfi_ioctl.h>
83#include <dev/mfi/mfivar.h>
84#include <sys/interrupt.h>
85#include <sys/priority.h>
86
87static int mfi_alloc_commands(struct mfi_softc *);
88static int mfi_comms_init(struct mfi_softc *);
89static int mfi_get_controller_info(struct mfi_softc *);
90static int mfi_get_log_state(struct mfi_softc *,
91 struct mfi_evt_log_state **);
92static int mfi_parse_entries(struct mfi_softc *, int, int);
93static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
94 uint32_t, void **, size_t);
95static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
96static void mfi_startup(void *arg);
97static void mfi_intr(void *arg);
98static void mfi_ldprobe(struct mfi_softc *sc);
99static void mfi_syspdprobe(struct mfi_softc *sc);
100static void mfi_handle_evt(void *context, int pending);
101static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
102static void mfi_aen_complete(struct mfi_command *);
103static int mfi_add_ld(struct mfi_softc *sc, int);
104static void mfi_add_ld_complete(struct mfi_command *);
105static int mfi_add_sys_pd(struct mfi_softc *sc, int);
106static void mfi_add_sys_pd_complete(struct mfi_command *);
107static struct mfi_command * mfi_bio_command(struct mfi_softc *);
108static void mfi_bio_complete(struct mfi_command *);
109static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
110static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
111static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
112static int mfi_abort(struct mfi_softc *, struct mfi_command *);
113static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114static void mfi_timeout(void *);
115static int mfi_user_command(struct mfi_softc *,
116 struct mfi_ioc_passthru *);
117static void mfi_enable_intr_xscale(struct mfi_softc *sc);
118static void mfi_enable_intr_ppc(struct mfi_softc *sc);
119static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
120static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
121static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
124 uint32_t frame_cnt);
125static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
126 uint32_t frame_cnt);
127static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
132
133SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
135TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
137 0, "event message locale");
138
139static int mfi_event_class = MFI_EVT_CLASS_INFO;
140TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
142 0, "event message class");
143
144static int mfi_max_cmds = 128;
145TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
147 0, "Max commands");
148
149static int mfi_detect_jbod_change = 1;
150TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
152 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
153
154/* Management interface */
155static d_open_t mfi_open;
156static d_close_t mfi_close;
157static d_ioctl_t mfi_ioctl;
158static d_poll_t mfi_poll;
159
160static struct cdevsw mfi_cdevsw = {
161 .d_version = D_VERSION,
162 .d_flags = 0,
163 .d_open = mfi_open,
164 .d_close = mfi_close,
165 .d_ioctl = mfi_ioctl,
166 .d_poll = mfi_poll,
167 .d_name = "mfi",
168};
169
170MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
171
172#define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
173struct mfi_skinny_dma_info mfi_skinny;
174
175static void
176mfi_enable_intr_xscale(struct mfi_softc *sc)
177{
178 MFI_WRITE4(sc, MFI_OMSK, 0x01);
179}
180
181static void
182mfi_enable_intr_ppc(struct mfi_softc *sc)
183{
184 if (sc->mfi_flags & MFI_FLAGS_1078) {
185 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
186 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
187 }
188 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
189 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
190 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
191 }
192 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
193 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
194 }
195}
196
197static int32_t
198mfi_read_fw_status_xscale(struct mfi_softc *sc)
199{
200 return MFI_READ4(sc, MFI_OMSG0);
201}
202
203static int32_t
204mfi_read_fw_status_ppc(struct mfi_softc *sc)
205{
206 return MFI_READ4(sc, MFI_OSP0);
207}
208
209static int
210mfi_check_clear_intr_xscale(struct mfi_softc *sc)
211{
212 int32_t status;
213
214 status = MFI_READ4(sc, MFI_OSTS);
215 if ((status & MFI_OSTS_INTR_VALID) == 0)
216 return 1;
217
218 MFI_WRITE4(sc, MFI_OSTS, status);
219 return 0;
220}
221
222static int
223mfi_check_clear_intr_ppc(struct mfi_softc *sc)
224{
225 int32_t status;
226
227 status = MFI_READ4(sc, MFI_OSTS);
228 if (sc->mfi_flags & MFI_FLAGS_1078) {
229 if (!(status & MFI_1078_RM)) {
230 return 1;
231 }
232 }
233 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
234 if (!(status & MFI_GEN2_RM)) {
235 return 1;
236 }
237 }
238 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
239 if (!(status & MFI_SKINNY_RM)) {
240 return 1;
241 }
242 }
243 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
244 MFI_WRITE4(sc, MFI_OSTS, status);
245 else
246 MFI_WRITE4(sc, MFI_ODCR0, status);
247 return 0;
248}
249
250static void
251mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
252{
253 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
254}
255
256static void
257mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
258{
259 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
260 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
261 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
262 } else {
263 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
264 }
265}
266
267int
268mfi_transition_firmware(struct mfi_softc *sc)
269{
270 uint32_t fw_state, cur_state;
271 int max_wait, i;
272 uint32_t cur_abs_reg_val = 0;
273 uint32_t prev_abs_reg_val = 0;
274
275 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
276 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
277 while (fw_state != MFI_FWSTATE_READY) {
278 if (bootverbose)
279 device_printf(sc->mfi_dev, "Waiting for firmware to "
280 "become ready\n");
281 cur_state = fw_state;
282 switch (fw_state) {
283 case MFI_FWSTATE_FAULT:
284 device_printf(sc->mfi_dev, "Firmware fault\n");
285 return (ENXIO);
286 case MFI_FWSTATE_WAIT_HANDSHAKE:
287 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
288 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
289 else
290 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
291 max_wait = MFI_RESET_WAIT_TIME;
292 break;
293 case MFI_FWSTATE_OPERATIONAL:
294 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
295 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
296 else
297 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
298 max_wait = MFI_RESET_WAIT_TIME;
299 break;
300 case MFI_FWSTATE_UNDEFINED:
301 case MFI_FWSTATE_BB_INIT:
302 max_wait = MFI_RESET_WAIT_TIME;
303 break;
304 case MFI_FWSTATE_FW_INIT_2:
305 max_wait = MFI_RESET_WAIT_TIME;
306 break;
307 case MFI_FWSTATE_FW_INIT:
308 case MFI_FWSTATE_FLUSH_CACHE:
309 max_wait = MFI_RESET_WAIT_TIME;
310 break;
311 case MFI_FWSTATE_DEVICE_SCAN:
312 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
313 prev_abs_reg_val = cur_abs_reg_val;
314 break;
315 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
316 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
317 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
318 else
319 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
320 max_wait = MFI_RESET_WAIT_TIME;
321 break;
322 default:
323 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
324 fw_state);
325 return (ENXIO);
326 }
327 for (i = 0; i < (max_wait * 10); i++) {
328 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
329 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
330 if (fw_state == cur_state)
331 DELAY(100000);
332 else
333 break;
334 }
335 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
336 /* Check the device scanning progress */
337 if (prev_abs_reg_val != cur_abs_reg_val) {
338 continue;
339 }
340 }
341 if (fw_state == cur_state) {
342 device_printf(sc->mfi_dev, "Firmware stuck in state "
343 "%#x\n", fw_state);
344 return (ENXIO);
345 }
346 }
347 return (0);
348}
349
350static void
351mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
352{
353 bus_addr_t *addr;
354
355 addr = arg;
356 *addr = segs[0].ds_addr;
357}
358
359
360int
361mfi_attach(struct mfi_softc *sc)
362{
363 uint32_t status;
364 int error, commsz, framessz, sensesz;
365 int frames, unit, max_fw_sge;
366 uint32_t tb_mem_size = 0;
367
368 if (sc == NULL)
369 return EINVAL;
370
371 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
372 MEGASAS_VERSION);
373
374 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
375 sx_init(&sc->mfi_config_lock, "MFI config");
376 TAILQ_INIT(&sc->mfi_ld_tqh);
377 TAILQ_INIT(&sc->mfi_syspd_tqh);
378 TAILQ_INIT(&sc->mfi_evt_queue);
379 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
93static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
94static void mfi_startup(void *arg);
95static void mfi_intr(void *arg);
96static void mfi_ldprobe(struct mfi_softc *sc);
97static void mfi_syspdprobe(struct mfi_softc *sc);
98static void mfi_handle_evt(void *context, int pending);
99static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
100static void mfi_aen_complete(struct mfi_command *);
101static int mfi_add_ld(struct mfi_softc *sc, int);
102static void mfi_add_ld_complete(struct mfi_command *);
103static int mfi_add_sys_pd(struct mfi_softc *sc, int);
104static void mfi_add_sys_pd_complete(struct mfi_command *);
105static struct mfi_command * mfi_bio_command(struct mfi_softc *);
106static void mfi_bio_complete(struct mfi_command *);
107static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
108static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
109static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
110static int mfi_abort(struct mfi_softc *, struct mfi_command *);
111static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
112static void mfi_timeout(void *);
113static int mfi_user_command(struct mfi_softc *,
114 struct mfi_ioc_passthru *);
115static void mfi_enable_intr_xscale(struct mfi_softc *sc);
116static void mfi_enable_intr_ppc(struct mfi_softc *sc);
117static int32_t mfi_read_fw_status_xscale(struct mfi_softc *sc);
118static int32_t mfi_read_fw_status_ppc(struct mfi_softc *sc);
119static int mfi_check_clear_intr_xscale(struct mfi_softc *sc);
120static int mfi_check_clear_intr_ppc(struct mfi_softc *sc);
121static void mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
122 uint32_t frame_cnt);
123static void mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
124 uint32_t frame_cnt);
125static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
126static void mfi_config_unlock(struct mfi_softc *sc, int locked);
127static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
128static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
129static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
130
131SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
132static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
133TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
134SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
135 0, "event message locale");
136
137static int mfi_event_class = MFI_EVT_CLASS_INFO;
138TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
139SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
140 0, "event message class");
141
142static int mfi_max_cmds = 128;
143TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
144SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
145 0, "Max commands");
146
147static int mfi_detect_jbod_change = 1;
148TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
149SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
150 &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
151
152/* Management interface */
153static d_open_t mfi_open;
154static d_close_t mfi_close;
155static d_ioctl_t mfi_ioctl;
156static d_poll_t mfi_poll;
157
158static struct cdevsw mfi_cdevsw = {
159 .d_version = D_VERSION,
160 .d_flags = 0,
161 .d_open = mfi_open,
162 .d_close = mfi_close,
163 .d_ioctl = mfi_ioctl,
164 .d_poll = mfi_poll,
165 .d_name = "mfi",
166};
167
168MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
169
170#define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
171struct mfi_skinny_dma_info mfi_skinny;
172
173static void
174mfi_enable_intr_xscale(struct mfi_softc *sc)
175{
176 MFI_WRITE4(sc, MFI_OMSK, 0x01);
177}
178
179static void
180mfi_enable_intr_ppc(struct mfi_softc *sc)
181{
182 if (sc->mfi_flags & MFI_FLAGS_1078) {
183 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
184 MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
185 }
186 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
187 MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
188 MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
189 }
190 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
191 MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
192 }
193}
194
195static int32_t
196mfi_read_fw_status_xscale(struct mfi_softc *sc)
197{
198 return MFI_READ4(sc, MFI_OMSG0);
199}
200
201static int32_t
202mfi_read_fw_status_ppc(struct mfi_softc *sc)
203{
204 return MFI_READ4(sc, MFI_OSP0);
205}
206
207static int
208mfi_check_clear_intr_xscale(struct mfi_softc *sc)
209{
210 int32_t status;
211
212 status = MFI_READ4(sc, MFI_OSTS);
213 if ((status & MFI_OSTS_INTR_VALID) == 0)
214 return 1;
215
216 MFI_WRITE4(sc, MFI_OSTS, status);
217 return 0;
218}
219
220static int
221mfi_check_clear_intr_ppc(struct mfi_softc *sc)
222{
223 int32_t status;
224
225 status = MFI_READ4(sc, MFI_OSTS);
226 if (sc->mfi_flags & MFI_FLAGS_1078) {
227 if (!(status & MFI_1078_RM)) {
228 return 1;
229 }
230 }
231 else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
232 if (!(status & MFI_GEN2_RM)) {
233 return 1;
234 }
235 }
236 else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
237 if (!(status & MFI_SKINNY_RM)) {
238 return 1;
239 }
240 }
241 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
242 MFI_WRITE4(sc, MFI_OSTS, status);
243 else
244 MFI_WRITE4(sc, MFI_ODCR0, status);
245 return 0;
246}
247
248static void
249mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
250{
251 MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
252}
253
254static void
255mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
256{
257 if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
258 MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
259 MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
260 } else {
261 MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
262 }
263}
264
265int
266mfi_transition_firmware(struct mfi_softc *sc)
267{
268 uint32_t fw_state, cur_state;
269 int max_wait, i;
270 uint32_t cur_abs_reg_val = 0;
271 uint32_t prev_abs_reg_val = 0;
272
273 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
274 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
275 while (fw_state != MFI_FWSTATE_READY) {
276 if (bootverbose)
277 device_printf(sc->mfi_dev, "Waiting for firmware to "
278 "become ready\n");
279 cur_state = fw_state;
280 switch (fw_state) {
281 case MFI_FWSTATE_FAULT:
282 device_printf(sc->mfi_dev, "Firmware fault\n");
283 return (ENXIO);
284 case MFI_FWSTATE_WAIT_HANDSHAKE:
285 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
286 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
287 else
288 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
289 max_wait = MFI_RESET_WAIT_TIME;
290 break;
291 case MFI_FWSTATE_OPERATIONAL:
292 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
293 MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
294 else
295 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
296 max_wait = MFI_RESET_WAIT_TIME;
297 break;
298 case MFI_FWSTATE_UNDEFINED:
299 case MFI_FWSTATE_BB_INIT:
300 max_wait = MFI_RESET_WAIT_TIME;
301 break;
302 case MFI_FWSTATE_FW_INIT_2:
303 max_wait = MFI_RESET_WAIT_TIME;
304 break;
305 case MFI_FWSTATE_FW_INIT:
306 case MFI_FWSTATE_FLUSH_CACHE:
307 max_wait = MFI_RESET_WAIT_TIME;
308 break;
309 case MFI_FWSTATE_DEVICE_SCAN:
310 max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
311 prev_abs_reg_val = cur_abs_reg_val;
312 break;
313 case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
314 if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
315 MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
316 else
317 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
318 max_wait = MFI_RESET_WAIT_TIME;
319 break;
320 default:
321 device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
322 fw_state);
323 return (ENXIO);
324 }
325 for (i = 0; i < (max_wait * 10); i++) {
326 cur_abs_reg_val = sc->mfi_read_fw_status(sc);
327 fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
328 if (fw_state == cur_state)
329 DELAY(100000);
330 else
331 break;
332 }
333 if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
334 /* Check the device scanning progress */
335 if (prev_abs_reg_val != cur_abs_reg_val) {
336 continue;
337 }
338 }
339 if (fw_state == cur_state) {
340 device_printf(sc->mfi_dev, "Firmware stuck in state "
341 "%#x\n", fw_state);
342 return (ENXIO);
343 }
344 }
345 return (0);
346}
347
348static void
349mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
350{
351 bus_addr_t *addr;
352
353 addr = arg;
354 *addr = segs[0].ds_addr;
355}
356
357
358int
359mfi_attach(struct mfi_softc *sc)
360{
361 uint32_t status;
362 int error, commsz, framessz, sensesz;
363 int frames, unit, max_fw_sge;
364 uint32_t tb_mem_size = 0;
365
366 if (sc == NULL)
367 return EINVAL;
368
369 device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
370 MEGASAS_VERSION);
371
372 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
373 sx_init(&sc->mfi_config_lock, "MFI config");
374 TAILQ_INIT(&sc->mfi_ld_tqh);
375 TAILQ_INIT(&sc->mfi_syspd_tqh);
376 TAILQ_INIT(&sc->mfi_evt_queue);
377 TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
378 TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
380 TAILQ_INIT(&sc->mfi_aen_pids);
381 TAILQ_INIT(&sc->mfi_cam_ccbq);
382
383 mfi_initq_free(sc);
384 mfi_initq_ready(sc);
385 mfi_initq_busy(sc);
386 mfi_initq_bio(sc);
387
388 sc->adpreset = 0;
389 sc->last_seq_num = 0;
390 sc->disableOnlineCtrlReset = 1;
391 sc->issuepend_done = 1;
392 sc->hw_crit_error = 0;
393
394 if (sc->mfi_flags & MFI_FLAGS_1064R) {
395 sc->mfi_enable_intr = mfi_enable_intr_xscale;
396 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
397 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
398 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
399 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
400 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
401 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
402 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
403 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
404 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
405 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
406 sc->mfi_tbolt = 1;
407 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
408 } else {
409 sc->mfi_enable_intr = mfi_enable_intr_ppc;
410 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
411 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
412 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
413 }
414
415
416 /* Before we get too far, see if the firmware is working */
417 if ((error = mfi_transition_firmware(sc)) != 0) {
418 device_printf(sc->mfi_dev, "Firmware not in READY state, "
419 "error %d\n", error);
420 return (ENXIO);
421 }
422
423 /* Start: LSIP200113393 */
424 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
425 1, 0, /* algnmnt, boundary */
426 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
427 BUS_SPACE_MAXADDR, /* highaddr */
428 NULL, NULL, /* filter, filterarg */
429 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
430 1, /* msegments */
431 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
432 0, /* flags */
433 NULL, NULL, /* lockfunc, lockarg */
434 &sc->verbuf_h_dmat)) {
435 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
436 return (ENOMEM);
437 }
438 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
439 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
440 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
441 return (ENOMEM);
442 }
443 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
444 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
445 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
446 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
447 /* End: LSIP200113393 */
448
449 /*
450 * Get information needed for sizing the contiguous memory for the
451 * frame pool. Size down the sgl parameter since we know that
452 * we will never need more than what's required for MAXPHYS.
453 * It would be nice if these constants were available at runtime
454 * instead of compile time.
455 */
456 status = sc->mfi_read_fw_status(sc);
457 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
458 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
459 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
460
461 /* ThunderBolt Support get the contiguous memory */
462
463 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
464 mfi_tbolt_init_globals(sc);
465 device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
466 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
467 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
468
469 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
470 1, 0, /* algnmnt, boundary */
471 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
472 BUS_SPACE_MAXADDR, /* highaddr */
473 NULL, NULL, /* filter, filterarg */
474 tb_mem_size, /* maxsize */
475 1, /* msegments */
476 tb_mem_size, /* maxsegsize */
477 0, /* flags */
478 NULL, NULL, /* lockfunc, lockarg */
479 &sc->mfi_tb_dmat)) {
480 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
481 return (ENOMEM);
482 }
483 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
484 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
485 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
486 return (ENOMEM);
487 }
488 bzero(sc->request_message_pool, tb_mem_size);
489 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
490 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
491
492 /* For ThunderBolt memory init */
493 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
494 0x100, 0, /* alignmnt, boundary */
495 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
496 BUS_SPACE_MAXADDR, /* highaddr */
497 NULL, NULL, /* filter, filterarg */
498 MFI_FRAME_SIZE, /* maxsize */
499 1, /* msegments */
500 MFI_FRAME_SIZE, /* maxsegsize */
501 0, /* flags */
502 NULL, NULL, /* lockfunc, lockarg */
503 &sc->mfi_tb_init_dmat)) {
504 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
505 return (ENOMEM);
506 }
507 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
508 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
509 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
510 return (ENOMEM);
511 }
512 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
513 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
514 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
515 &sc->mfi_tb_init_busaddr, 0);
516 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
517 tb_mem_size)) {
518 device_printf(sc->mfi_dev,
519 "Thunderbolt pool preparation error\n");
520 return 0;
521 }
522
523 /*
524 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
525 we are taking it diffrent from what we have allocated for Request
526 and reply descriptors to avoid confusion later
527 */
528 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
529 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
530 1, 0, /* algnmnt, boundary */
531 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
532 BUS_SPACE_MAXADDR, /* highaddr */
533 NULL, NULL, /* filter, filterarg */
534 tb_mem_size, /* maxsize */
535 1, /* msegments */
536 tb_mem_size, /* maxsegsize */
537 0, /* flags */
538 NULL, NULL, /* lockfunc, lockarg */
539 &sc->mfi_tb_ioc_init_dmat)) {
540 device_printf(sc->mfi_dev,
541 "Cannot allocate comms DMA tag\n");
542 return (ENOMEM);
543 }
544 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
545 (void **)&sc->mfi_tb_ioc_init_desc,
546 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
547 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
548 return (ENOMEM);
549 }
550 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
551 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
552 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
553 &sc->mfi_tb_ioc_init_busaddr, 0);
554 }
555 /*
556 * Create the dma tag for data buffers. Used both for block I/O
557 * and for various internal data queries.
558 */
559 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
560 1, 0, /* algnmnt, boundary */
561 BUS_SPACE_MAXADDR, /* lowaddr */
562 BUS_SPACE_MAXADDR, /* highaddr */
563 NULL, NULL, /* filter, filterarg */
564 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
565 sc->mfi_max_sge, /* nsegments */
566 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
567 BUS_DMA_ALLOCNOW, /* flags */
568 busdma_lock_mutex, /* lockfunc */
569 &sc->mfi_io_lock, /* lockfuncarg */
570 &sc->mfi_buffer_dmat)) {
571 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
572 return (ENOMEM);
573 }
574
575 /*
576 * Allocate DMA memory for the comms queues. Keep it under 4GB for
577 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
578 * entry, so the calculated size here will be will be 1 more than
579 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
580 */
581 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
582 sizeof(struct mfi_hwcomms);
583 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
584 1, 0, /* algnmnt, boundary */
585 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
586 BUS_SPACE_MAXADDR, /* highaddr */
587 NULL, NULL, /* filter, filterarg */
588 commsz, /* maxsize */
589 1, /* msegments */
590 commsz, /* maxsegsize */
591 0, /* flags */
592 NULL, NULL, /* lockfunc, lockarg */
593 &sc->mfi_comms_dmat)) {
594 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
595 return (ENOMEM);
596 }
597 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
598 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
599 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
600 return (ENOMEM);
601 }
602 bzero(sc->mfi_comms, commsz);
603 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
604 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
605 /*
606 * Allocate DMA memory for the command frames. Keep them in the
607 * lower 4GB for efficiency. Calculate the size of the commands at
608 * the same time; each command is one 64 byte frame plus a set of
609 * additional frames for holding sg lists or other data.
610 * The assumption here is that the SG list will start at the second
611 * frame and not use the unused bytes in the first frame. While this
612 * isn't technically correct, it simplifies the calculation and allows
613 * for command frames that might be larger than an mfi_io_frame.
614 */
615 if (sizeof(bus_addr_t) == 8) {
616 sc->mfi_sge_size = sizeof(struct mfi_sg64);
617 sc->mfi_flags |= MFI_FLAGS_SG64;
618 } else {
619 sc->mfi_sge_size = sizeof(struct mfi_sg32);
620 }
621 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
622 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
623 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
624 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
625 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
626 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
627 64, 0, /* algnmnt, boundary */
628 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
629 BUS_SPACE_MAXADDR, /* highaddr */
630 NULL, NULL, /* filter, filterarg */
631 framessz, /* maxsize */
632 1, /* nsegments */
633 framessz, /* maxsegsize */
634 0, /* flags */
635 NULL, NULL, /* lockfunc, lockarg */
636 &sc->mfi_frames_dmat)) {
637 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
638 return (ENOMEM);
639 }
640 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
641 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
642 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
643 return (ENOMEM);
644 }
645 bzero(sc->mfi_frames, framessz);
646 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
647 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
648 /*
649 * Allocate DMA memory for the frame sense data. Keep them in the
650 * lower 4GB for efficiency
651 */
652 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
653 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
654 4, 0, /* algnmnt, boundary */
655 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
656 BUS_SPACE_MAXADDR, /* highaddr */
657 NULL, NULL, /* filter, filterarg */
658 sensesz, /* maxsize */
659 1, /* nsegments */
660 sensesz, /* maxsegsize */
661 0, /* flags */
662 NULL, NULL, /* lockfunc, lockarg */
663 &sc->mfi_sense_dmat)) {
664 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
665 return (ENOMEM);
666 }
667 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
668 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
669 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
670 return (ENOMEM);
671 }
672 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
673 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
674 if ((error = mfi_alloc_commands(sc)) != 0)
675 return (error);
676
677 /* Before moving the FW to operational state, check whether
678 * hostmemory is required by the FW or not
679 */
680
681 /* ThunderBolt MFI_IOC2 INIT */
682 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
683 sc->mfi_disable_intr(sc);
684 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
685 device_printf(sc->mfi_dev,
686 "TB Init has failed with error %d\n",error);
687 return error;
688 }
689
690 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
691 return error;
692 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
693 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
694 &sc->mfi_intr)) {
695 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
696 return (EINVAL);
697 }
698 sc->mfi_enable_intr(sc);
379 TAILQ_INIT(&sc->mfi_aen_pids);
380 TAILQ_INIT(&sc->mfi_cam_ccbq);
381
382 mfi_initq_free(sc);
383 mfi_initq_ready(sc);
384 mfi_initq_busy(sc);
385 mfi_initq_bio(sc);
386
387 sc->adpreset = 0;
388 sc->last_seq_num = 0;
389 sc->disableOnlineCtrlReset = 1;
390 sc->issuepend_done = 1;
391 sc->hw_crit_error = 0;
392
393 if (sc->mfi_flags & MFI_FLAGS_1064R) {
394 sc->mfi_enable_intr = mfi_enable_intr_xscale;
395 sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
396 sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
397 sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
398 } else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
399 sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
400 sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
401 sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
402 sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
403 sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
404 sc->mfi_adp_reset = mfi_tbolt_adp_reset;
405 sc->mfi_tbolt = 1;
406 TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
407 } else {
408 sc->mfi_enable_intr = mfi_enable_intr_ppc;
409 sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
410 sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
411 sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
412 }
413
414
415 /* Before we get too far, see if the firmware is working */
416 if ((error = mfi_transition_firmware(sc)) != 0) {
417 device_printf(sc->mfi_dev, "Firmware not in READY state, "
418 "error %d\n", error);
419 return (ENXIO);
420 }
421
422 /* Start: LSIP200113393 */
423 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
424 1, 0, /* algnmnt, boundary */
425 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
426 BUS_SPACE_MAXADDR, /* highaddr */
427 NULL, NULL, /* filter, filterarg */
428 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsize */
429 1, /* msegments */
430 MEGASAS_MAX_NAME*sizeof(bus_addr_t), /* maxsegsize */
431 0, /* flags */
432 NULL, NULL, /* lockfunc, lockarg */
433 &sc->verbuf_h_dmat)) {
434 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
435 return (ENOMEM);
436 }
437 if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
438 BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
439 device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
440 return (ENOMEM);
441 }
442 bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
443 bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
444 sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
445 mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
446 /* End: LSIP200113393 */
447
448 /*
449 * Get information needed for sizing the contiguous memory for the
450 * frame pool. Size down the sgl parameter since we know that
451 * we will never need more than what's required for MAXPHYS.
452 * It would be nice if these constants were available at runtime
453 * instead of compile time.
454 */
455 status = sc->mfi_read_fw_status(sc);
456 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
457 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
458 sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
459
460 /* ThunderBolt Support get the contiguous memory */
461
462 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
463 mfi_tbolt_init_globals(sc);
464 device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
465 sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
466 tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
467
468 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
469 1, 0, /* algnmnt, boundary */
470 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
471 BUS_SPACE_MAXADDR, /* highaddr */
472 NULL, NULL, /* filter, filterarg */
473 tb_mem_size, /* maxsize */
474 1, /* msegments */
475 tb_mem_size, /* maxsegsize */
476 0, /* flags */
477 NULL, NULL, /* lockfunc, lockarg */
478 &sc->mfi_tb_dmat)) {
479 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
480 return (ENOMEM);
481 }
482 if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
483 BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
484 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
485 return (ENOMEM);
486 }
487 bzero(sc->request_message_pool, tb_mem_size);
488 bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
489 sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
490
491 /* For ThunderBolt memory init */
492 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
493 0x100, 0, /* alignmnt, boundary */
494 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
495 BUS_SPACE_MAXADDR, /* highaddr */
496 NULL, NULL, /* filter, filterarg */
497 MFI_FRAME_SIZE, /* maxsize */
498 1, /* msegments */
499 MFI_FRAME_SIZE, /* maxsegsize */
500 0, /* flags */
501 NULL, NULL, /* lockfunc, lockarg */
502 &sc->mfi_tb_init_dmat)) {
503 device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
504 return (ENOMEM);
505 }
506 if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
507 BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
508 device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
509 return (ENOMEM);
510 }
511 bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
512 bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
513 sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
514 &sc->mfi_tb_init_busaddr, 0);
515 if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
516 tb_mem_size)) {
517 device_printf(sc->mfi_dev,
518 "Thunderbolt pool preparation error\n");
519 return 0;
520 }
521
522 /*
523 Allocate DMA memory mapping for MPI2 IOC Init descriptor,
524 we are taking it diffrent from what we have allocated for Request
525 and reply descriptors to avoid confusion later
526 */
527 tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
528 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
529 1, 0, /* algnmnt, boundary */
530 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
531 BUS_SPACE_MAXADDR, /* highaddr */
532 NULL, NULL, /* filter, filterarg */
533 tb_mem_size, /* maxsize */
534 1, /* msegments */
535 tb_mem_size, /* maxsegsize */
536 0, /* flags */
537 NULL, NULL, /* lockfunc, lockarg */
538 &sc->mfi_tb_ioc_init_dmat)) {
539 device_printf(sc->mfi_dev,
540 "Cannot allocate comms DMA tag\n");
541 return (ENOMEM);
542 }
543 if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
544 (void **)&sc->mfi_tb_ioc_init_desc,
545 BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
546 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
547 return (ENOMEM);
548 }
549 bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
550 bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
551 sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
552 &sc->mfi_tb_ioc_init_busaddr, 0);
553 }
554 /*
555 * Create the dma tag for data buffers. Used both for block I/O
556 * and for various internal data queries.
557 */
558 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
559 1, 0, /* algnmnt, boundary */
560 BUS_SPACE_MAXADDR, /* lowaddr */
561 BUS_SPACE_MAXADDR, /* highaddr */
562 NULL, NULL, /* filter, filterarg */
563 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
564 sc->mfi_max_sge, /* nsegments */
565 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
566 BUS_DMA_ALLOCNOW, /* flags */
567 busdma_lock_mutex, /* lockfunc */
568 &sc->mfi_io_lock, /* lockfuncarg */
569 &sc->mfi_buffer_dmat)) {
570 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
571 return (ENOMEM);
572 }
573
574 /*
575 * Allocate DMA memory for the comms queues. Keep it under 4GB for
576 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
577 * entry, so the calculated size here will be will be 1 more than
578 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
579 */
580 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
581 sizeof(struct mfi_hwcomms);
582 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
583 1, 0, /* algnmnt, boundary */
584 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
585 BUS_SPACE_MAXADDR, /* highaddr */
586 NULL, NULL, /* filter, filterarg */
587 commsz, /* maxsize */
588 1, /* msegments */
589 commsz, /* maxsegsize */
590 0, /* flags */
591 NULL, NULL, /* lockfunc, lockarg */
592 &sc->mfi_comms_dmat)) {
593 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
594 return (ENOMEM);
595 }
596 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
597 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
598 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
599 return (ENOMEM);
600 }
601 bzero(sc->mfi_comms, commsz);
602 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
603 sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
604 /*
605 * Allocate DMA memory for the command frames. Keep them in the
606 * lower 4GB for efficiency. Calculate the size of the commands at
607 * the same time; each command is one 64 byte frame plus a set of
608 * additional frames for holding sg lists or other data.
609 * The assumption here is that the SG list will start at the second
610 * frame and not use the unused bytes in the first frame. While this
611 * isn't technically correct, it simplifies the calculation and allows
612 * for command frames that might be larger than an mfi_io_frame.
613 */
614 if (sizeof(bus_addr_t) == 8) {
615 sc->mfi_sge_size = sizeof(struct mfi_sg64);
616 sc->mfi_flags |= MFI_FLAGS_SG64;
617 } else {
618 sc->mfi_sge_size = sizeof(struct mfi_sg32);
619 }
620 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
621 sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
622 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
623 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
624 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
625 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
626 64, 0, /* algnmnt, boundary */
627 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
628 BUS_SPACE_MAXADDR, /* highaddr */
629 NULL, NULL, /* filter, filterarg */
630 framessz, /* maxsize */
631 1, /* nsegments */
632 framessz, /* maxsegsize */
633 0, /* flags */
634 NULL, NULL, /* lockfunc, lockarg */
635 &sc->mfi_frames_dmat)) {
636 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
637 return (ENOMEM);
638 }
639 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
640 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
641 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
642 return (ENOMEM);
643 }
644 bzero(sc->mfi_frames, framessz);
645 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
646 sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
647 /*
648 * Allocate DMA memory for the frame sense data. Keep them in the
649 * lower 4GB for efficiency
650 */
651 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
652 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
653 4, 0, /* algnmnt, boundary */
654 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
655 BUS_SPACE_MAXADDR, /* highaddr */
656 NULL, NULL, /* filter, filterarg */
657 sensesz, /* maxsize */
658 1, /* nsegments */
659 sensesz, /* maxsegsize */
660 0, /* flags */
661 NULL, NULL, /* lockfunc, lockarg */
662 &sc->mfi_sense_dmat)) {
663 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
664 return (ENOMEM);
665 }
666 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
667 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
668 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
669 return (ENOMEM);
670 }
671 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
672 sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
673 if ((error = mfi_alloc_commands(sc)) != 0)
674 return (error);
675
676 /* Before moving the FW to operational state, check whether
677 * hostmemory is required by the FW or not
678 */
679
680 /* ThunderBolt MFI_IOC2 INIT */
681 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
682 sc->mfi_disable_intr(sc);
683 if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
684 device_printf(sc->mfi_dev,
685 "TB Init has failed with error %d\n",error);
686 return error;
687 }
688
689 if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
690 return error;
691 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
692 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
693 &sc->mfi_intr)) {
694 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
695 return (EINVAL);
696 }
697 sc->mfi_enable_intr(sc);
699 sc->map_id = 0;
700 } else {
701 if ((error = mfi_comms_init(sc)) != 0)
702 return (error);
703
704 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
705 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
706 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
707 return (EINVAL);
708 }
709 sc->mfi_enable_intr(sc);
710 }
711 if ((error = mfi_get_controller_info(sc)) != 0)
712 return (error);
713 sc->disableOnlineCtrlReset = 0;
714
715 /* Register a config hook to probe the bus for arrays */
716 sc->mfi_ich.ich_func = mfi_startup;
717 sc->mfi_ich.ich_arg = sc;
718 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
719 device_printf(sc->mfi_dev, "Cannot establish configuration "
720 "hook\n");
721 return (EINVAL);
722 }
723 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
724 mtx_unlock(&sc->mfi_io_lock);
725 return (error);
726 }
727
728 /*
729 * Register a shutdown handler.
730 */
731 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
732 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
733 device_printf(sc->mfi_dev, "Warning: shutdown event "
734 "registration failed\n");
735 }
736
737 /*
738 * Create the control device for doing management
739 */
740 unit = device_get_unit(sc->mfi_dev);
741 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
742 0640, "mfi%d", unit);
743 if (unit == 0)
744 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
745 if (sc->mfi_cdev != NULL)
746 sc->mfi_cdev->si_drv1 = sc;
747 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
748 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
749 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
750 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
751 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
752 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
753 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
754 &sc->mfi_keep_deleted_volumes, 0,
755 "Don't detach the mfid device for a busy volume that is deleted");
756
757 device_add_child(sc->mfi_dev, "mfip", -1);
758 bus_generic_attach(sc->mfi_dev);
759
760 /* Start the timeout watchdog */
761 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
762 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
763 mfi_timeout, sc);
764
698 } else {
699 if ((error = mfi_comms_init(sc)) != 0)
700 return (error);
701
702 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
703 INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
704 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
705 return (EINVAL);
706 }
707 sc->mfi_enable_intr(sc);
708 }
709 if ((error = mfi_get_controller_info(sc)) != 0)
710 return (error);
711 sc->disableOnlineCtrlReset = 0;
712
713 /* Register a config hook to probe the bus for arrays */
714 sc->mfi_ich.ich_func = mfi_startup;
715 sc->mfi_ich.ich_arg = sc;
716 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
717 device_printf(sc->mfi_dev, "Cannot establish configuration "
718 "hook\n");
719 return (EINVAL);
720 }
721 if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
722 mtx_unlock(&sc->mfi_io_lock);
723 return (error);
724 }
725
726 /*
727 * Register a shutdown handler.
728 */
729 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
730 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
731 device_printf(sc->mfi_dev, "Warning: shutdown event "
732 "registration failed\n");
733 }
734
735 /*
736 * Create the control device for doing management
737 */
738 unit = device_get_unit(sc->mfi_dev);
739 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
740 0640, "mfi%d", unit);
741 if (unit == 0)
742 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
743 if (sc->mfi_cdev != NULL)
744 sc->mfi_cdev->si_drv1 = sc;
745 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
746 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
747 OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
748 &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
749 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
750 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
751 OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
752 &sc->mfi_keep_deleted_volumes, 0,
753 "Don't detach the mfid device for a busy volume that is deleted");
754
755 device_add_child(sc->mfi_dev, "mfip", -1);
756 bus_generic_attach(sc->mfi_dev);
757
758 /* Start the timeout watchdog */
759 callout_init(&sc->mfi_watchdog_callout, CALLOUT_MPSAFE);
760 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
761 mfi_timeout, sc);
762
763 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
764 mfi_tbolt_sync_map_info(sc);
765 }
766
765 return (0);
766}
767
768static int
769mfi_alloc_commands(struct mfi_softc *sc)
770{
771 struct mfi_command *cm;
772 int i, ncmds;
773
774 /*
775 * XXX Should we allocate all the commands up front, or allocate on
776 * demand later like 'aac' does?
777 */
778 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
779 if (bootverbose)
780 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
781 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
782
783 sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
784 M_WAITOK | M_ZERO);
785
786 for (i = 0; i < ncmds; i++) {
787 cm = &sc->mfi_commands[i];
788 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
789 sc->mfi_cmd_size * i);
790 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
791 sc->mfi_cmd_size * i;
792 cm->cm_frame->header.context = i;
793 cm->cm_sense = &sc->mfi_sense[i];
794 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
795 cm->cm_sc = sc;
796 cm->cm_index = i;
797 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
798 &cm->cm_dmamap) == 0) {
799 mtx_lock(&sc->mfi_io_lock);
800 mfi_release_command(cm);
801 mtx_unlock(&sc->mfi_io_lock);
802 }
803 else
804 break;
805 sc->mfi_total_cmds++;
806 }
807
808 return (0);
809}
810
811void
812mfi_release_command(struct mfi_command *cm)
813{
814 struct mfi_frame_header *hdr;
815 uint32_t *hdr_data;
816
817 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
818
819 /*
820 * Zero out the important fields of the frame, but make sure the
821 * context field is preserved. For efficiency, handle the fields
822 * as 32 bit words. Clear out the first S/G entry too for safety.
823 */
824 hdr = &cm->cm_frame->header;
825 if (cm->cm_data != NULL && hdr->sg_count) {
826 cm->cm_sg->sg32[0].len = 0;
827 cm->cm_sg->sg32[0].addr = 0;
828 }
829
830 hdr_data = (uint32_t *)cm->cm_frame;
831 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
832 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
833 hdr_data[4] = 0; /* flags, timeout */
834 hdr_data[5] = 0; /* data_len */
835
836 cm->cm_extra_frames = 0;
837 cm->cm_flags = 0;
838 cm->cm_complete = NULL;
839 cm->cm_private = NULL;
840 cm->cm_data = NULL;
841 cm->cm_sg = 0;
842 cm->cm_total_frame_size = 0;
843 cm->retry_for_fw_reset = 0;
844
845 mfi_enqueue_free(cm);
846}
847
767 return (0);
768}
769
770static int
771mfi_alloc_commands(struct mfi_softc *sc)
772{
773 struct mfi_command *cm;
774 int i, ncmds;
775
776 /*
777 * XXX Should we allocate all the commands up front, or allocate on
778 * demand later like 'aac' does?
779 */
780 ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
781 if (bootverbose)
782 device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
783 "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
784
785 sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
786 M_WAITOK | M_ZERO);
787
788 for (i = 0; i < ncmds; i++) {
789 cm = &sc->mfi_commands[i];
790 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
791 sc->mfi_cmd_size * i);
792 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
793 sc->mfi_cmd_size * i;
794 cm->cm_frame->header.context = i;
795 cm->cm_sense = &sc->mfi_sense[i];
796 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
797 cm->cm_sc = sc;
798 cm->cm_index = i;
799 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
800 &cm->cm_dmamap) == 0) {
801 mtx_lock(&sc->mfi_io_lock);
802 mfi_release_command(cm);
803 mtx_unlock(&sc->mfi_io_lock);
804 }
805 else
806 break;
807 sc->mfi_total_cmds++;
808 }
809
810 return (0);
811}
812
813void
814mfi_release_command(struct mfi_command *cm)
815{
816 struct mfi_frame_header *hdr;
817 uint32_t *hdr_data;
818
819 mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
820
821 /*
822 * Zero out the important fields of the frame, but make sure the
823 * context field is preserved. For efficiency, handle the fields
824 * as 32 bit words. Clear out the first S/G entry too for safety.
825 */
826 hdr = &cm->cm_frame->header;
827 if (cm->cm_data != NULL && hdr->sg_count) {
828 cm->cm_sg->sg32[0].len = 0;
829 cm->cm_sg->sg32[0].addr = 0;
830 }
831
832 hdr_data = (uint32_t *)cm->cm_frame;
833 hdr_data[0] = 0; /* cmd, sense_len, cmd_status, scsi_status */
834 hdr_data[1] = 0; /* target_id, lun_id, cdb_len, sg_count */
835 hdr_data[4] = 0; /* flags, timeout */
836 hdr_data[5] = 0; /* data_len */
837
838 cm->cm_extra_frames = 0;
839 cm->cm_flags = 0;
840 cm->cm_complete = NULL;
841 cm->cm_private = NULL;
842 cm->cm_data = NULL;
843 cm->cm_sg = 0;
844 cm->cm_total_frame_size = 0;
845 cm->retry_for_fw_reset = 0;
846
847 mfi_enqueue_free(cm);
848}
849
848static int
850int
849mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
850 uint32_t opcode, void **bufp, size_t bufsize)
851{
852 struct mfi_command *cm;
853 struct mfi_dcmd_frame *dcmd;
854 void *buf = NULL;
855 uint32_t context = 0;
856
857 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
858
859 cm = mfi_dequeue_free(sc);
860 if (cm == NULL)
861 return (EBUSY);
862
863 /* Zero out the MFI frame */
864 context = cm->cm_frame->header.context;
865 bzero(cm->cm_frame, sizeof(union mfi_frame));
866 cm->cm_frame->header.context = context;
867
868 if ((bufsize > 0) && (bufp != NULL)) {
869 if (*bufp == NULL) {
870 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
871 if (buf == NULL) {
872 mfi_release_command(cm);
873 return (ENOMEM);
874 }
875 *bufp = buf;
876 } else {
877 buf = *bufp;
878 }
879 }
880
881 dcmd = &cm->cm_frame->dcmd;
882 bzero(dcmd->mbox, MFI_MBOX_SIZE);
883 dcmd->header.cmd = MFI_CMD_DCMD;
884 dcmd->header.timeout = 0;
885 dcmd->header.flags = 0;
886 dcmd->header.data_len = bufsize;
887 dcmd->header.scsi_status = 0;
888 dcmd->opcode = opcode;
889 cm->cm_sg = &dcmd->sgl;
890 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
891 cm->cm_flags = 0;
892 cm->cm_data = buf;
893 cm->cm_private = buf;
894 cm->cm_len = bufsize;
895
896 *cmp = cm;
897 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
898 *bufp = buf;
899 return (0);
900}
901
902static int
903mfi_comms_init(struct mfi_softc *sc)
904{
905 struct mfi_command *cm;
906 struct mfi_init_frame *init;
907 struct mfi_init_qinfo *qinfo;
908 int error;
909 uint32_t context = 0;
910
911 mtx_lock(&sc->mfi_io_lock);
912 if ((cm = mfi_dequeue_free(sc)) == NULL)
913 return (EBUSY);
914
915 /* Zero out the MFI frame */
916 context = cm->cm_frame->header.context;
917 bzero(cm->cm_frame, sizeof(union mfi_frame));
918 cm->cm_frame->header.context = context;
919
920 /*
921 * Abuse the SG list area of the frame to hold the init_qinfo
922 * object;
923 */
924 init = &cm->cm_frame->init;
925 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
926
927 bzero(qinfo, sizeof(struct mfi_init_qinfo));
928 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
929 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
930 offsetof(struct mfi_hwcomms, hw_reply_q);
931 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
932 offsetof(struct mfi_hwcomms, hw_pi);
933 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
934 offsetof(struct mfi_hwcomms, hw_ci);
935
936 init->header.cmd = MFI_CMD_INIT;
937 init->header.data_len = sizeof(struct mfi_init_qinfo);
938 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
939 cm->cm_data = NULL;
940 cm->cm_flags = MFI_CMD_POLLED;
941
942 if ((error = mfi_mapcmd(sc, cm)) != 0) {
943 device_printf(sc->mfi_dev, "failed to send init command\n");
944 mtx_unlock(&sc->mfi_io_lock);
945 return (error);
946 }
947 mfi_release_command(cm);
948 mtx_unlock(&sc->mfi_io_lock);
949
950 return (0);
951}
952
953static int
954mfi_get_controller_info(struct mfi_softc *sc)
955{
956 struct mfi_command *cm = NULL;
957 struct mfi_ctrl_info *ci = NULL;
958 uint32_t max_sectors_1, max_sectors_2;
959 int error;
960
961 mtx_lock(&sc->mfi_io_lock);
962 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
963 (void **)&ci, sizeof(*ci));
964 if (error)
965 goto out;
966 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
967
968 if ((error = mfi_mapcmd(sc, cm)) != 0) {
969 device_printf(sc->mfi_dev, "Failed to get controller info\n");
970 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
971 MFI_SECTOR_LEN;
972 error = 0;
973 goto out;
974 }
975
976 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
977 BUS_DMASYNC_POSTREAD);
978 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
979
980 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
981 max_sectors_2 = ci->max_request_size;
982 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
983 sc->disableOnlineCtrlReset =
984 ci->properties.OnOffProperties.disableOnlineCtrlReset;
985
986out:
987 if (ci)
988 free(ci, M_MFIBUF);
989 if (cm)
990 mfi_release_command(cm);
991 mtx_unlock(&sc->mfi_io_lock);
992 return (error);
993}
994
995static int
996mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
997{
998 struct mfi_command *cm = NULL;
999 int error;
1000
1001 mtx_lock(&sc->mfi_io_lock);
1002 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1003 (void **)log_state, sizeof(**log_state));
1004 if (error)
1005 goto out;
1006 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1007
1008 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1009 device_printf(sc->mfi_dev, "Failed to get log state\n");
1010 goto out;
1011 }
1012
1013 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1014 BUS_DMASYNC_POSTREAD);
1015 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1016
1017out:
1018 if (cm)
1019 mfi_release_command(cm);
1020 mtx_unlock(&sc->mfi_io_lock);
1021
1022 return (error);
1023}
1024
1025int
1026mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1027{
1028 struct mfi_evt_log_state *log_state = NULL;
1029 union mfi_evt class_locale;
1030 int error = 0;
1031 uint32_t seq;
1032
1033 class_locale.members.reserved = 0;
1034 class_locale.members.locale = mfi_event_locale;
1035 class_locale.members.evt_class = mfi_event_class;
1036
1037 if (seq_start == 0) {
1038 error = mfi_get_log_state(sc, &log_state);
1039 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1040 if (error) {
1041 if (log_state)
1042 free(log_state, M_MFIBUF);
1043 return (error);
1044 }
1045
1046 /*
1047 * Walk through any events that fired since the last
1048 * shutdown.
1049 */
1050 mfi_parse_entries(sc, log_state->shutdown_seq_num,
1051 log_state->newest_seq_num);
1052 seq = log_state->newest_seq_num;
1053 } else
1054 seq = seq_start;
1055 mfi_aen_register(sc, seq, class_locale.word);
1056 free(log_state, M_MFIBUF);
1057
1058 return 0;
1059}
1060
1061int
1062mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1063{
1064
1065 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1066 cm->cm_complete = NULL;
1067
1068
1069 /*
1070 * MegaCli can issue a DCMD of 0. In this case do nothing
1071 * and return 0 to it as status
1072 */
1073 if (cm->cm_frame->dcmd.opcode == 0) {
1074 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1075 cm->cm_error = 0;
1076 return (cm->cm_error);
1077 }
1078 mfi_enqueue_ready(cm);
1079 mfi_startio(sc);
1080 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1081 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1082 return (cm->cm_error);
1083}
1084
1085void
1086mfi_free(struct mfi_softc *sc)
1087{
1088 struct mfi_command *cm;
1089 int i;
1090
1091 callout_drain(&sc->mfi_watchdog_callout);
1092
1093 if (sc->mfi_cdev != NULL)
1094 destroy_dev(sc->mfi_cdev);
1095
1096 if (sc->mfi_total_cmds != 0) {
1097 for (i = 0; i < sc->mfi_total_cmds; i++) {
1098 cm = &sc->mfi_commands[i];
1099 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1100 }
1101 free(sc->mfi_commands, M_MFIBUF);
1102 }
1103
1104 if (sc->mfi_intr)
1105 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1106 if (sc->mfi_irq != NULL)
1107 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1108 sc->mfi_irq);
1109
1110 if (sc->mfi_sense_busaddr != 0)
1111 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1112 if (sc->mfi_sense != NULL)
1113 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1114 sc->mfi_sense_dmamap);
1115 if (sc->mfi_sense_dmat != NULL)
1116 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1117
1118 if (sc->mfi_frames_busaddr != 0)
1119 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1120 if (sc->mfi_frames != NULL)
1121 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1122 sc->mfi_frames_dmamap);
1123 if (sc->mfi_frames_dmat != NULL)
1124 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1125
1126 if (sc->mfi_comms_busaddr != 0)
1127 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1128 if (sc->mfi_comms != NULL)
1129 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1130 sc->mfi_comms_dmamap);
1131 if (sc->mfi_comms_dmat != NULL)
1132 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1133
1134 /* ThunderBolt contiguous memory free here */
1135 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1136 if (sc->mfi_tb_busaddr != 0)
1137 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1138 if (sc->request_message_pool != NULL)
1139 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1140 sc->mfi_tb_dmamap);
1141 if (sc->mfi_tb_dmat != NULL)
1142 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1143
1144 /* Version buffer memory free */
1145 /* Start LSIP200113393 */
1146 if (sc->verbuf_h_busaddr != 0)
1147 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1148 if (sc->verbuf != NULL)
1149 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1150 sc->verbuf_h_dmamap);
1151 if (sc->verbuf_h_dmat != NULL)
1152 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1153
1154 /* End LSIP200113393 */
1155 /* ThunderBolt INIT packet memory Free */
1156 if (sc->mfi_tb_init_busaddr != 0)
1157 bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1158 if (sc->mfi_tb_init != NULL)
1159 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1160 sc->mfi_tb_init_dmamap);
1161 if (sc->mfi_tb_init_dmat != NULL)
1162 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1163
1164 /* ThunderBolt IOC Init Desc memory free here */
1165 if (sc->mfi_tb_ioc_init_busaddr != 0)
1166 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1167 sc->mfi_tb_ioc_init_dmamap);
1168 if (sc->mfi_tb_ioc_init_desc != NULL)
1169 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1170 sc->mfi_tb_ioc_init_desc,
1171 sc->mfi_tb_ioc_init_dmamap);
1172 if (sc->mfi_tb_ioc_init_dmat != NULL)
1173 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1174 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1175 if (sc->mfi_cmd_pool_tbolt != NULL) {
1176 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1177 free(sc->mfi_cmd_pool_tbolt[i],
1178 M_MFIBUF);
1179 sc->mfi_cmd_pool_tbolt[i] = NULL;
1180 }
1181 }
1182 }
1183 if (sc->mfi_cmd_pool_tbolt != NULL) {
1184 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1185 sc->mfi_cmd_pool_tbolt = NULL;
1186 }
1187 if (sc->request_desc_pool != NULL) {
1188 free(sc->request_desc_pool, M_MFIBUF);
1189 sc->request_desc_pool = NULL;
1190 }
1191 }
1192 if (sc->mfi_buffer_dmat != NULL)
1193 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1194 if (sc->mfi_parent_dmat != NULL)
1195 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1196
1197 if (mtx_initialized(&sc->mfi_io_lock)) {
1198 mtx_destroy(&sc->mfi_io_lock);
1199 sx_destroy(&sc->mfi_config_lock);
1200 }
1201
1202 return;
1203}
1204
1205static void
1206mfi_startup(void *arg)
1207{
1208 struct mfi_softc *sc;
1209
1210 sc = (struct mfi_softc *)arg;
1211
1212 config_intrhook_disestablish(&sc->mfi_ich);
1213
1214 sc->mfi_enable_intr(sc);
1215 sx_xlock(&sc->mfi_config_lock);
1216 mtx_lock(&sc->mfi_io_lock);
1217 mfi_ldprobe(sc);
1218 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1219 mfi_syspdprobe(sc);
1220 mtx_unlock(&sc->mfi_io_lock);
1221 sx_xunlock(&sc->mfi_config_lock);
1222}
1223
1224static void
1225mfi_intr(void *arg)
1226{
1227 struct mfi_softc *sc;
1228 struct mfi_command *cm;
1229 uint32_t pi, ci, context;
1230
1231 sc = (struct mfi_softc *)arg;
1232
1233 if (sc->mfi_check_clear_intr(sc))
1234 return;
1235
1236restart:
1237 pi = sc->mfi_comms->hw_pi;
1238 ci = sc->mfi_comms->hw_ci;
1239 mtx_lock(&sc->mfi_io_lock);
1240 while (ci != pi) {
1241 context = sc->mfi_comms->hw_reply_q[ci];
1242 if (context < sc->mfi_max_fw_cmds) {
1243 cm = &sc->mfi_commands[context];
1244 mfi_remove_busy(cm);
1245 cm->cm_error = 0;
1246 mfi_complete(sc, cm);
1247 }
1248 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1249 ci = 0;
1250 }
1251 }
1252
1253 sc->mfi_comms->hw_ci = ci;
1254
1255 /* Give defered I/O a chance to run */
1256 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1257 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1258 mfi_startio(sc);
1259 mtx_unlock(&sc->mfi_io_lock);
1260
1261 /*
1262 * Dummy read to flush the bus; this ensures that the indexes are up
1263 * to date. Restart processing if more commands have come it.
1264 */
1265 (void)sc->mfi_read_fw_status(sc);
1266 if (pi != sc->mfi_comms->hw_pi)
1267 goto restart;
1268
1269 return;
1270}
1271
1272int
1273mfi_shutdown(struct mfi_softc *sc)
1274{
1275 struct mfi_dcmd_frame *dcmd;
1276 struct mfi_command *cm;
1277 int error;
1278
1279 mtx_lock(&sc->mfi_io_lock);
1280 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1281 if (error) {
1282 mtx_unlock(&sc->mfi_io_lock);
1283 return (error);
1284 }
1285
1286 if (sc->mfi_aen_cm != NULL)
1287 mfi_abort(sc, sc->mfi_aen_cm);
1288
851mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
852 uint32_t opcode, void **bufp, size_t bufsize)
853{
854 struct mfi_command *cm;
855 struct mfi_dcmd_frame *dcmd;
856 void *buf = NULL;
857 uint32_t context = 0;
858
859 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
860
861 cm = mfi_dequeue_free(sc);
862 if (cm == NULL)
863 return (EBUSY);
864
865 /* Zero out the MFI frame */
866 context = cm->cm_frame->header.context;
867 bzero(cm->cm_frame, sizeof(union mfi_frame));
868 cm->cm_frame->header.context = context;
869
870 if ((bufsize > 0) && (bufp != NULL)) {
871 if (*bufp == NULL) {
872 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
873 if (buf == NULL) {
874 mfi_release_command(cm);
875 return (ENOMEM);
876 }
877 *bufp = buf;
878 } else {
879 buf = *bufp;
880 }
881 }
882
883 dcmd = &cm->cm_frame->dcmd;
884 bzero(dcmd->mbox, MFI_MBOX_SIZE);
885 dcmd->header.cmd = MFI_CMD_DCMD;
886 dcmd->header.timeout = 0;
887 dcmd->header.flags = 0;
888 dcmd->header.data_len = bufsize;
889 dcmd->header.scsi_status = 0;
890 dcmd->opcode = opcode;
891 cm->cm_sg = &dcmd->sgl;
892 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
893 cm->cm_flags = 0;
894 cm->cm_data = buf;
895 cm->cm_private = buf;
896 cm->cm_len = bufsize;
897
898 *cmp = cm;
899 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
900 *bufp = buf;
901 return (0);
902}
903
904static int
905mfi_comms_init(struct mfi_softc *sc)
906{
907 struct mfi_command *cm;
908 struct mfi_init_frame *init;
909 struct mfi_init_qinfo *qinfo;
910 int error;
911 uint32_t context = 0;
912
913 mtx_lock(&sc->mfi_io_lock);
914 if ((cm = mfi_dequeue_free(sc)) == NULL)
915 return (EBUSY);
916
917 /* Zero out the MFI frame */
918 context = cm->cm_frame->header.context;
919 bzero(cm->cm_frame, sizeof(union mfi_frame));
920 cm->cm_frame->header.context = context;
921
922 /*
923 * Abuse the SG list area of the frame to hold the init_qinfo
924 * object;
925 */
926 init = &cm->cm_frame->init;
927 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
928
929 bzero(qinfo, sizeof(struct mfi_init_qinfo));
930 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
931 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
932 offsetof(struct mfi_hwcomms, hw_reply_q);
933 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
934 offsetof(struct mfi_hwcomms, hw_pi);
935 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
936 offsetof(struct mfi_hwcomms, hw_ci);
937
938 init->header.cmd = MFI_CMD_INIT;
939 init->header.data_len = sizeof(struct mfi_init_qinfo);
940 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
941 cm->cm_data = NULL;
942 cm->cm_flags = MFI_CMD_POLLED;
943
944 if ((error = mfi_mapcmd(sc, cm)) != 0) {
945 device_printf(sc->mfi_dev, "failed to send init command\n");
946 mtx_unlock(&sc->mfi_io_lock);
947 return (error);
948 }
949 mfi_release_command(cm);
950 mtx_unlock(&sc->mfi_io_lock);
951
952 return (0);
953}
954
955static int
956mfi_get_controller_info(struct mfi_softc *sc)
957{
958 struct mfi_command *cm = NULL;
959 struct mfi_ctrl_info *ci = NULL;
960 uint32_t max_sectors_1, max_sectors_2;
961 int error;
962
963 mtx_lock(&sc->mfi_io_lock);
964 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
965 (void **)&ci, sizeof(*ci));
966 if (error)
967 goto out;
968 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
969
970 if ((error = mfi_mapcmd(sc, cm)) != 0) {
971 device_printf(sc->mfi_dev, "Failed to get controller info\n");
972 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
973 MFI_SECTOR_LEN;
974 error = 0;
975 goto out;
976 }
977
978 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
979 BUS_DMASYNC_POSTREAD);
980 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
981
982 max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
983 max_sectors_2 = ci->max_request_size;
984 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
985 sc->disableOnlineCtrlReset =
986 ci->properties.OnOffProperties.disableOnlineCtrlReset;
987
988out:
989 if (ci)
990 free(ci, M_MFIBUF);
991 if (cm)
992 mfi_release_command(cm);
993 mtx_unlock(&sc->mfi_io_lock);
994 return (error);
995}
996
997static int
998mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
999{
1000 struct mfi_command *cm = NULL;
1001 int error;
1002
1003 mtx_lock(&sc->mfi_io_lock);
1004 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1005 (void **)log_state, sizeof(**log_state));
1006 if (error)
1007 goto out;
1008 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1009
1010 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1011 device_printf(sc->mfi_dev, "Failed to get log state\n");
1012 goto out;
1013 }
1014
1015 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1016 BUS_DMASYNC_POSTREAD);
1017 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1018
1019out:
1020 if (cm)
1021 mfi_release_command(cm);
1022 mtx_unlock(&sc->mfi_io_lock);
1023
1024 return (error);
1025}
1026
1027int
1028mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1029{
1030 struct mfi_evt_log_state *log_state = NULL;
1031 union mfi_evt class_locale;
1032 int error = 0;
1033 uint32_t seq;
1034
1035 class_locale.members.reserved = 0;
1036 class_locale.members.locale = mfi_event_locale;
1037 class_locale.members.evt_class = mfi_event_class;
1038
1039 if (seq_start == 0) {
1040 error = mfi_get_log_state(sc, &log_state);
1041 sc->mfi_boot_seq_num = log_state->boot_seq_num;
1042 if (error) {
1043 if (log_state)
1044 free(log_state, M_MFIBUF);
1045 return (error);
1046 }
1047
1048 /*
1049 * Walk through any events that fired since the last
1050 * shutdown.
1051 */
1052 mfi_parse_entries(sc, log_state->shutdown_seq_num,
1053 log_state->newest_seq_num);
1054 seq = log_state->newest_seq_num;
1055 } else
1056 seq = seq_start;
1057 mfi_aen_register(sc, seq, class_locale.word);
1058 free(log_state, M_MFIBUF);
1059
1060 return 0;
1061}
1062
1063int
1064mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1065{
1066
1067 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1068 cm->cm_complete = NULL;
1069
1070
1071 /*
1072 * MegaCli can issue a DCMD of 0. In this case do nothing
1073 * and return 0 to it as status
1074 */
1075 if (cm->cm_frame->dcmd.opcode == 0) {
1076 cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1077 cm->cm_error = 0;
1078 return (cm->cm_error);
1079 }
1080 mfi_enqueue_ready(cm);
1081 mfi_startio(sc);
1082 if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1083 msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1084 return (cm->cm_error);
1085}
1086
1087void
1088mfi_free(struct mfi_softc *sc)
1089{
1090 struct mfi_command *cm;
1091 int i;
1092
1093 callout_drain(&sc->mfi_watchdog_callout);
1094
1095 if (sc->mfi_cdev != NULL)
1096 destroy_dev(sc->mfi_cdev);
1097
1098 if (sc->mfi_total_cmds != 0) {
1099 for (i = 0; i < sc->mfi_total_cmds; i++) {
1100 cm = &sc->mfi_commands[i];
1101 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1102 }
1103 free(sc->mfi_commands, M_MFIBUF);
1104 }
1105
1106 if (sc->mfi_intr)
1107 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1108 if (sc->mfi_irq != NULL)
1109 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1110 sc->mfi_irq);
1111
1112 if (sc->mfi_sense_busaddr != 0)
1113 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1114 if (sc->mfi_sense != NULL)
1115 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1116 sc->mfi_sense_dmamap);
1117 if (sc->mfi_sense_dmat != NULL)
1118 bus_dma_tag_destroy(sc->mfi_sense_dmat);
1119
1120 if (sc->mfi_frames_busaddr != 0)
1121 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1122 if (sc->mfi_frames != NULL)
1123 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1124 sc->mfi_frames_dmamap);
1125 if (sc->mfi_frames_dmat != NULL)
1126 bus_dma_tag_destroy(sc->mfi_frames_dmat);
1127
1128 if (sc->mfi_comms_busaddr != 0)
1129 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1130 if (sc->mfi_comms != NULL)
1131 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1132 sc->mfi_comms_dmamap);
1133 if (sc->mfi_comms_dmat != NULL)
1134 bus_dma_tag_destroy(sc->mfi_comms_dmat);
1135
1136 /* ThunderBolt contiguous memory free here */
1137 if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1138 if (sc->mfi_tb_busaddr != 0)
1139 bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1140 if (sc->request_message_pool != NULL)
1141 bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1142 sc->mfi_tb_dmamap);
1143 if (sc->mfi_tb_dmat != NULL)
1144 bus_dma_tag_destroy(sc->mfi_tb_dmat);
1145
1146 /* Version buffer memory free */
1147 /* Start LSIP200113393 */
1148 if (sc->verbuf_h_busaddr != 0)
1149 bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1150 if (sc->verbuf != NULL)
1151 bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1152 sc->verbuf_h_dmamap);
1153 if (sc->verbuf_h_dmat != NULL)
1154 bus_dma_tag_destroy(sc->verbuf_h_dmat);
1155
1156 /* End LSIP200113393 */
1157 /* ThunderBolt INIT packet memory Free */
1158 if (sc->mfi_tb_init_busaddr != 0)
1159 bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1160 if (sc->mfi_tb_init != NULL)
1161 bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1162 sc->mfi_tb_init_dmamap);
1163 if (sc->mfi_tb_init_dmat != NULL)
1164 bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1165
1166 /* ThunderBolt IOC Init Desc memory free here */
1167 if (sc->mfi_tb_ioc_init_busaddr != 0)
1168 bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1169 sc->mfi_tb_ioc_init_dmamap);
1170 if (sc->mfi_tb_ioc_init_desc != NULL)
1171 bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1172 sc->mfi_tb_ioc_init_desc,
1173 sc->mfi_tb_ioc_init_dmamap);
1174 if (sc->mfi_tb_ioc_init_dmat != NULL)
1175 bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1176 for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1177 if (sc->mfi_cmd_pool_tbolt != NULL) {
1178 if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1179 free(sc->mfi_cmd_pool_tbolt[i],
1180 M_MFIBUF);
1181 sc->mfi_cmd_pool_tbolt[i] = NULL;
1182 }
1183 }
1184 }
1185 if (sc->mfi_cmd_pool_tbolt != NULL) {
1186 free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1187 sc->mfi_cmd_pool_tbolt = NULL;
1188 }
1189 if (sc->request_desc_pool != NULL) {
1190 free(sc->request_desc_pool, M_MFIBUF);
1191 sc->request_desc_pool = NULL;
1192 }
1193 }
1194 if (sc->mfi_buffer_dmat != NULL)
1195 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1196 if (sc->mfi_parent_dmat != NULL)
1197 bus_dma_tag_destroy(sc->mfi_parent_dmat);
1198
1199 if (mtx_initialized(&sc->mfi_io_lock)) {
1200 mtx_destroy(&sc->mfi_io_lock);
1201 sx_destroy(&sc->mfi_config_lock);
1202 }
1203
1204 return;
1205}
1206
1207static void
1208mfi_startup(void *arg)
1209{
1210 struct mfi_softc *sc;
1211
1212 sc = (struct mfi_softc *)arg;
1213
1214 config_intrhook_disestablish(&sc->mfi_ich);
1215
1216 sc->mfi_enable_intr(sc);
1217 sx_xlock(&sc->mfi_config_lock);
1218 mtx_lock(&sc->mfi_io_lock);
1219 mfi_ldprobe(sc);
1220 if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1221 mfi_syspdprobe(sc);
1222 mtx_unlock(&sc->mfi_io_lock);
1223 sx_xunlock(&sc->mfi_config_lock);
1224}
1225
1226static void
1227mfi_intr(void *arg)
1228{
1229 struct mfi_softc *sc;
1230 struct mfi_command *cm;
1231 uint32_t pi, ci, context;
1232
1233 sc = (struct mfi_softc *)arg;
1234
1235 if (sc->mfi_check_clear_intr(sc))
1236 return;
1237
1238restart:
1239 pi = sc->mfi_comms->hw_pi;
1240 ci = sc->mfi_comms->hw_ci;
1241 mtx_lock(&sc->mfi_io_lock);
1242 while (ci != pi) {
1243 context = sc->mfi_comms->hw_reply_q[ci];
1244 if (context < sc->mfi_max_fw_cmds) {
1245 cm = &sc->mfi_commands[context];
1246 mfi_remove_busy(cm);
1247 cm->cm_error = 0;
1248 mfi_complete(sc, cm);
1249 }
1250 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1251 ci = 0;
1252 }
1253 }
1254
1255 sc->mfi_comms->hw_ci = ci;
1256
1257 /* Give defered I/O a chance to run */
1258 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1259 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1260 mfi_startio(sc);
1261 mtx_unlock(&sc->mfi_io_lock);
1262
1263 /*
1264 * Dummy read to flush the bus; this ensures that the indexes are up
1265 * to date. Restart processing if more commands have come it.
1266 */
1267 (void)sc->mfi_read_fw_status(sc);
1268 if (pi != sc->mfi_comms->hw_pi)
1269 goto restart;
1270
1271 return;
1272}
1273
1274int
1275mfi_shutdown(struct mfi_softc *sc)
1276{
1277 struct mfi_dcmd_frame *dcmd;
1278 struct mfi_command *cm;
1279 int error;
1280
1281 mtx_lock(&sc->mfi_io_lock);
1282 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1283 if (error) {
1284 mtx_unlock(&sc->mfi_io_lock);
1285 return (error);
1286 }
1287
1288 if (sc->mfi_aen_cm != NULL)
1289 mfi_abort(sc, sc->mfi_aen_cm);
1290
1289 if (sc->map_update_cmd != NULL)
1290 mfi_abort(sc, sc->map_update_cmd);
1291 if (sc->mfi_map_sync_cm != NULL)
1292 mfi_abort(sc, sc->mfi_map_sync_cm);
1291
1292 dcmd = &cm->cm_frame->dcmd;
1293 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1294 cm->cm_flags = MFI_CMD_POLLED;
1295 cm->cm_data = NULL;
1296
1297 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1298 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1299 }
1300
1301 mfi_release_command(cm);
1302 mtx_unlock(&sc->mfi_io_lock);
1303 return (error);
1304}
1305
1306static void
1307mfi_syspdprobe(struct mfi_softc *sc)
1308{
1309 struct mfi_frame_header *hdr;
1310 struct mfi_command *cm = NULL;
1311 struct mfi_pd_list *pdlist = NULL;
1312 struct mfi_system_pd *syspd, *tmp;
1313 int error, i, found;
1314
1315 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1316 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1317 /* Add SYSTEM PD's */
1318 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1319 (void **)&pdlist, sizeof(*pdlist));
1320 if (error){
1321 device_printf(sc->mfi_dev,
1322 "Error while forming SYSTEM PD list\n");
1323 goto out;
1324 }
1325
1326 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1327 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1328 cm->cm_frame->dcmd.mbox[1] = 0;
1329 if (mfi_mapcmd(sc, cm) != 0) {
1330 device_printf(sc->mfi_dev,
1331 "Failed to get syspd device listing\n");
1332 goto out;
1333 }
1334 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1335 BUS_DMASYNC_POSTREAD);
1336 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1337 hdr = &cm->cm_frame->header;
1338 if (hdr->cmd_status != MFI_STAT_OK) {
1339 device_printf(sc->mfi_dev,
1340 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1341 goto out;
1342 }
1343 /* Get each PD and add it to the system */
1344 for (i = 0; i < pdlist->count; i++) {
1345 if (pdlist->addr[i].device_id ==
1346 pdlist->addr[i].encl_device_id)
1347 continue;
1348 found = 0;
1349 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1350 if (syspd->pd_id == pdlist->addr[i].device_id)
1351 found = 1;
1352 }
1353 if (found == 0)
1354 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1355 }
1356 /* Delete SYSPD's whose state has been changed */
1357 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1358 found = 0;
1359 for (i = 0; i < pdlist->count; i++) {
1360 if (syspd->pd_id == pdlist->addr[i].device_id)
1361 found = 1;
1362 }
1363 if (found == 0) {
1364 printf("DELETE\n");
1365 mtx_unlock(&sc->mfi_io_lock);
1366 mtx_lock(&Giant);
1367 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1368 mtx_unlock(&Giant);
1369 mtx_lock(&sc->mfi_io_lock);
1370 }
1371 }
1372out:
1373 if (pdlist)
1374 free(pdlist, M_MFIBUF);
1375 if (cm)
1376 mfi_release_command(cm);
1377
1378 return;
1379}
1380
1381static void
1382mfi_ldprobe(struct mfi_softc *sc)
1383{
1384 struct mfi_frame_header *hdr;
1385 struct mfi_command *cm = NULL;
1386 struct mfi_ld_list *list = NULL;
1387 struct mfi_disk *ld;
1388 int error, i;
1389
1390 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1391 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1392
1393 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1394 (void **)&list, sizeof(*list));
1395 if (error)
1396 goto out;
1397
1398 cm->cm_flags = MFI_CMD_DATAIN;
1399 if (mfi_wait_command(sc, cm) != 0) {
1400 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1401 goto out;
1402 }
1403
1404 hdr = &cm->cm_frame->header;
1405 if (hdr->cmd_status != MFI_STAT_OK) {
1406 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1407 hdr->cmd_status);
1408 goto out;
1409 }
1410
1411 for (i = 0; i < list->ld_count; i++) {
1412 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1413 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1414 goto skip_add;
1415 }
1416 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1417 skip_add:;
1418 }
1419out:
1420 if (list)
1421 free(list, M_MFIBUF);
1422 if (cm)
1423 mfi_release_command(cm);
1424
1425 return;
1426}
1427
1428/*
1429 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1430 * the bits in 24-31 are all set, then it is the number of seconds since
1431 * boot.
1432 */
1433static const char *
1434format_timestamp(uint32_t timestamp)
1435{
1436 static char buffer[32];
1437
1438 if ((timestamp & 0xff000000) == 0xff000000)
1439 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1440 0x00ffffff);
1441 else
1442 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1443 return (buffer);
1444}
1445
1446static const char *
1447format_class(int8_t class)
1448{
1449 static char buffer[6];
1450
1451 switch (class) {
1452 case MFI_EVT_CLASS_DEBUG:
1453 return ("debug");
1454 case MFI_EVT_CLASS_PROGRESS:
1455 return ("progress");
1456 case MFI_EVT_CLASS_INFO:
1457 return ("info");
1458 case MFI_EVT_CLASS_WARNING:
1459 return ("WARN");
1460 case MFI_EVT_CLASS_CRITICAL:
1461 return ("CRIT");
1462 case MFI_EVT_CLASS_FATAL:
1463 return ("FATAL");
1464 case MFI_EVT_CLASS_DEAD:
1465 return ("DEAD");
1466 default:
1467 snprintf(buffer, sizeof(buffer), "%d", class);
1468 return (buffer);
1469 }
1470}
1471
1472static void
1473mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1474{
1475 struct mfi_system_pd *syspd = NULL;
1476
1477 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1478 format_timestamp(detail->time), detail->evt_class.members.locale,
1479 format_class(detail->evt_class.members.evt_class),
1480 detail->description);
1481
1482 /* Don't act on old AEN's or while shutting down */
1483 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1484 return;
1485
1486 switch (detail->arg_type) {
1487 case MR_EVT_ARGS_NONE:
1488 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1489 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1490 if (mfi_detect_jbod_change) {
1491 /*
1492 * Probe for new SYSPD's and Delete
1493 * invalid SYSPD's
1494 */
1495 sx_xlock(&sc->mfi_config_lock);
1496 mtx_lock(&sc->mfi_io_lock);
1497 mfi_syspdprobe(sc);
1498 mtx_unlock(&sc->mfi_io_lock);
1499 sx_xunlock(&sc->mfi_config_lock);
1500 }
1501 }
1502 break;
1503 case MR_EVT_ARGS_LD_STATE:
1504 /* During load time driver reads all the events starting
1505 * from the one that has been logged after shutdown. Avoid
1506 * these old events.
1507 */
1508 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1509 /* Remove the LD */
1510 struct mfi_disk *ld;
1511 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1512 if (ld->ld_id ==
1513 detail->args.ld_state.ld.target_id)
1514 break;
1515 }
1516 /*
1517 Fix: for kernel panics when SSCD is removed
1518 KASSERT(ld != NULL, ("volume dissappeared"));
1519 */
1520 if (ld != NULL) {
1521 mtx_lock(&Giant);
1522 device_delete_child(sc->mfi_dev, ld->ld_dev);
1523 mtx_unlock(&Giant);
1524 }
1525 }
1526 break;
1527 case MR_EVT_ARGS_PD:
1528 if (detail->code == MR_EVT_PD_REMOVED) {
1529 if (mfi_detect_jbod_change) {
1530 /*
1531 * If the removed device is a SYSPD then
1532 * delete it
1533 */
1534 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1535 pd_link) {
1536 if (syspd->pd_id ==
1537 detail->args.pd.device_id) {
1538 mtx_lock(&Giant);
1539 device_delete_child(
1540 sc->mfi_dev,
1541 syspd->pd_dev);
1542 mtx_unlock(&Giant);
1543 break;
1544 }
1545 }
1546 }
1547 }
1548 if (detail->code == MR_EVT_PD_INSERTED) {
1549 if (mfi_detect_jbod_change) {
1550 /* Probe for new SYSPD's */
1551 sx_xlock(&sc->mfi_config_lock);
1552 mtx_lock(&sc->mfi_io_lock);
1553 mfi_syspdprobe(sc);
1554 mtx_unlock(&sc->mfi_io_lock);
1555 sx_xunlock(&sc->mfi_config_lock);
1556 }
1557 }
1558 break;
1559 }
1560}
1561
1562static void
1563mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1564{
1565 struct mfi_evt_queue_elm *elm;
1566
1567 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1568 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1569 if (elm == NULL)
1570 return;
1571 memcpy(&elm->detail, detail, sizeof(*detail));
1572 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1573 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1574}
1575
1576static void
1577mfi_handle_evt(void *context, int pending)
1578{
1579 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1580 struct mfi_softc *sc;
1581 struct mfi_evt_queue_elm *elm;
1582
1583 sc = context;
1584 TAILQ_INIT(&queue);
1585 mtx_lock(&sc->mfi_io_lock);
1586 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1587 mtx_unlock(&sc->mfi_io_lock);
1588 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1589 TAILQ_REMOVE(&queue, elm, link);
1590 mfi_decode_evt(sc, &elm->detail);
1591 free(elm, M_MFIBUF);
1592 }
1593}
1594
1595static int
1596mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1597{
1598 struct mfi_command *cm;
1599 struct mfi_dcmd_frame *dcmd;
1600 union mfi_evt current_aen, prior_aen;
1601 struct mfi_evt_detail *ed = NULL;
1602 int error = 0;
1603
1604 current_aen.word = locale;
1605 if (sc->mfi_aen_cm != NULL) {
1606 prior_aen.word =
1607 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1608 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1609 !((prior_aen.members.locale & current_aen.members.locale)
1610 ^current_aen.members.locale)) {
1611 return (0);
1612 } else {
1613 prior_aen.members.locale |= current_aen.members.locale;
1614 if (prior_aen.members.evt_class
1615 < current_aen.members.evt_class)
1616 current_aen.members.evt_class =
1617 prior_aen.members.evt_class;
1618 mtx_lock(&sc->mfi_io_lock);
1619 mfi_abort(sc, sc->mfi_aen_cm);
1620 mtx_unlock(&sc->mfi_io_lock);
1621 }
1622 }
1623
1624 mtx_lock(&sc->mfi_io_lock);
1625 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1626 (void **)&ed, sizeof(*ed));
1627 mtx_unlock(&sc->mfi_io_lock);
1628 if (error) {
1629 goto out;
1630 }
1631
1632 dcmd = &cm->cm_frame->dcmd;
1633 ((uint32_t *)&dcmd->mbox)[0] = seq;
1634 ((uint32_t *)&dcmd->mbox)[1] = locale;
1635 cm->cm_flags = MFI_CMD_DATAIN;
1636 cm->cm_complete = mfi_aen_complete;
1637
1638 sc->last_seq_num = seq;
1639 sc->mfi_aen_cm = cm;
1640
1641 mtx_lock(&sc->mfi_io_lock);
1642 mfi_enqueue_ready(cm);
1643 mfi_startio(sc);
1644 mtx_unlock(&sc->mfi_io_lock);
1645
1646out:
1647 return (error);
1648}
1649
1650static void
1651mfi_aen_complete(struct mfi_command *cm)
1652{
1653 struct mfi_frame_header *hdr;
1654 struct mfi_softc *sc;
1655 struct mfi_evt_detail *detail;
1656 struct mfi_aen *mfi_aen_entry, *tmp;
1657 int seq = 0, aborted = 0;
1658
1659 sc = cm->cm_sc;
1660 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1661
1662 hdr = &cm->cm_frame->header;
1663
1664 if (sc->mfi_aen_cm == NULL)
1665 return;
1666
1293
1294 dcmd = &cm->cm_frame->dcmd;
1295 dcmd->header.flags = MFI_FRAME_DIR_NONE;
1296 cm->cm_flags = MFI_CMD_POLLED;
1297 cm->cm_data = NULL;
1298
1299 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1300 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1301 }
1302
1303 mfi_release_command(cm);
1304 mtx_unlock(&sc->mfi_io_lock);
1305 return (error);
1306}
1307
1308static void
1309mfi_syspdprobe(struct mfi_softc *sc)
1310{
1311 struct mfi_frame_header *hdr;
1312 struct mfi_command *cm = NULL;
1313 struct mfi_pd_list *pdlist = NULL;
1314 struct mfi_system_pd *syspd, *tmp;
1315 int error, i, found;
1316
1317 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1318 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1319 /* Add SYSTEM PD's */
1320 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1321 (void **)&pdlist, sizeof(*pdlist));
1322 if (error){
1323 device_printf(sc->mfi_dev,
1324 "Error while forming SYSTEM PD list\n");
1325 goto out;
1326 }
1327
1328 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1329 cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1330 cm->cm_frame->dcmd.mbox[1] = 0;
1331 if (mfi_mapcmd(sc, cm) != 0) {
1332 device_printf(sc->mfi_dev,
1333 "Failed to get syspd device listing\n");
1334 goto out;
1335 }
1336 bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1337 BUS_DMASYNC_POSTREAD);
1338 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1339 hdr = &cm->cm_frame->header;
1340 if (hdr->cmd_status != MFI_STAT_OK) {
1341 device_printf(sc->mfi_dev,
1342 "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1343 goto out;
1344 }
1345 /* Get each PD and add it to the system */
1346 for (i = 0; i < pdlist->count; i++) {
1347 if (pdlist->addr[i].device_id ==
1348 pdlist->addr[i].encl_device_id)
1349 continue;
1350 found = 0;
1351 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1352 if (syspd->pd_id == pdlist->addr[i].device_id)
1353 found = 1;
1354 }
1355 if (found == 0)
1356 mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1357 }
1358 /* Delete SYSPD's whose state has been changed */
1359 TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1360 found = 0;
1361 for (i = 0; i < pdlist->count; i++) {
1362 if (syspd->pd_id == pdlist->addr[i].device_id)
1363 found = 1;
1364 }
1365 if (found == 0) {
1366 printf("DELETE\n");
1367 mtx_unlock(&sc->mfi_io_lock);
1368 mtx_lock(&Giant);
1369 device_delete_child(sc->mfi_dev, syspd->pd_dev);
1370 mtx_unlock(&Giant);
1371 mtx_lock(&sc->mfi_io_lock);
1372 }
1373 }
1374out:
1375 if (pdlist)
1376 free(pdlist, M_MFIBUF);
1377 if (cm)
1378 mfi_release_command(cm);
1379
1380 return;
1381}
1382
1383static void
1384mfi_ldprobe(struct mfi_softc *sc)
1385{
1386 struct mfi_frame_header *hdr;
1387 struct mfi_command *cm = NULL;
1388 struct mfi_ld_list *list = NULL;
1389 struct mfi_disk *ld;
1390 int error, i;
1391
1392 sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1393 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1394
1395 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1396 (void **)&list, sizeof(*list));
1397 if (error)
1398 goto out;
1399
1400 cm->cm_flags = MFI_CMD_DATAIN;
1401 if (mfi_wait_command(sc, cm) != 0) {
1402 device_printf(sc->mfi_dev, "Failed to get device listing\n");
1403 goto out;
1404 }
1405
1406 hdr = &cm->cm_frame->header;
1407 if (hdr->cmd_status != MFI_STAT_OK) {
1408 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1409 hdr->cmd_status);
1410 goto out;
1411 }
1412
1413 for (i = 0; i < list->ld_count; i++) {
1414 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1415 if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1416 goto skip_add;
1417 }
1418 mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1419 skip_add:;
1420 }
1421out:
1422 if (list)
1423 free(list, M_MFIBUF);
1424 if (cm)
1425 mfi_release_command(cm);
1426
1427 return;
1428}
1429
1430/*
1431 * The timestamp is the number of seconds since 00:00 Jan 1, 2000. If
1432 * the bits in 24-31 are all set, then it is the number of seconds since
1433 * boot.
1434 */
1435static const char *
1436format_timestamp(uint32_t timestamp)
1437{
1438 static char buffer[32];
1439
1440 if ((timestamp & 0xff000000) == 0xff000000)
1441 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1442 0x00ffffff);
1443 else
1444 snprintf(buffer, sizeof(buffer), "%us", timestamp);
1445 return (buffer);
1446}
1447
1448static const char *
1449format_class(int8_t class)
1450{
1451 static char buffer[6];
1452
1453 switch (class) {
1454 case MFI_EVT_CLASS_DEBUG:
1455 return ("debug");
1456 case MFI_EVT_CLASS_PROGRESS:
1457 return ("progress");
1458 case MFI_EVT_CLASS_INFO:
1459 return ("info");
1460 case MFI_EVT_CLASS_WARNING:
1461 return ("WARN");
1462 case MFI_EVT_CLASS_CRITICAL:
1463 return ("CRIT");
1464 case MFI_EVT_CLASS_FATAL:
1465 return ("FATAL");
1466 case MFI_EVT_CLASS_DEAD:
1467 return ("DEAD");
1468 default:
1469 snprintf(buffer, sizeof(buffer), "%d", class);
1470 return (buffer);
1471 }
1472}
1473
1474static void
1475mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1476{
1477 struct mfi_system_pd *syspd = NULL;
1478
1479 device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1480 format_timestamp(detail->time), detail->evt_class.members.locale,
1481 format_class(detail->evt_class.members.evt_class),
1482 detail->description);
1483
1484 /* Don't act on old AEN's or while shutting down */
1485 if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1486 return;
1487
1488 switch (detail->arg_type) {
1489 case MR_EVT_ARGS_NONE:
1490 if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1491 device_printf(sc->mfi_dev, "HostBus scan raised\n");
1492 if (mfi_detect_jbod_change) {
1493 /*
1494 * Probe for new SYSPD's and Delete
1495 * invalid SYSPD's
1496 */
1497 sx_xlock(&sc->mfi_config_lock);
1498 mtx_lock(&sc->mfi_io_lock);
1499 mfi_syspdprobe(sc);
1500 mtx_unlock(&sc->mfi_io_lock);
1501 sx_xunlock(&sc->mfi_config_lock);
1502 }
1503 }
1504 break;
1505 case MR_EVT_ARGS_LD_STATE:
1506 /* During load time driver reads all the events starting
1507 * from the one that has been logged after shutdown. Avoid
1508 * these old events.
1509 */
1510 if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1511 /* Remove the LD */
1512 struct mfi_disk *ld;
1513 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1514 if (ld->ld_id ==
1515 detail->args.ld_state.ld.target_id)
1516 break;
1517 }
1518 /*
1519 Fix: for kernel panics when SSCD is removed
1520 KASSERT(ld != NULL, ("volume dissappeared"));
1521 */
1522 if (ld != NULL) {
1523 mtx_lock(&Giant);
1524 device_delete_child(sc->mfi_dev, ld->ld_dev);
1525 mtx_unlock(&Giant);
1526 }
1527 }
1528 break;
1529 case MR_EVT_ARGS_PD:
1530 if (detail->code == MR_EVT_PD_REMOVED) {
1531 if (mfi_detect_jbod_change) {
1532 /*
1533 * If the removed device is a SYSPD then
1534 * delete it
1535 */
1536 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1537 pd_link) {
1538 if (syspd->pd_id ==
1539 detail->args.pd.device_id) {
1540 mtx_lock(&Giant);
1541 device_delete_child(
1542 sc->mfi_dev,
1543 syspd->pd_dev);
1544 mtx_unlock(&Giant);
1545 break;
1546 }
1547 }
1548 }
1549 }
1550 if (detail->code == MR_EVT_PD_INSERTED) {
1551 if (mfi_detect_jbod_change) {
1552 /* Probe for new SYSPD's */
1553 sx_xlock(&sc->mfi_config_lock);
1554 mtx_lock(&sc->mfi_io_lock);
1555 mfi_syspdprobe(sc);
1556 mtx_unlock(&sc->mfi_io_lock);
1557 sx_xunlock(&sc->mfi_config_lock);
1558 }
1559 }
1560 break;
1561 }
1562}
1563
1564static void
1565mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1566{
1567 struct mfi_evt_queue_elm *elm;
1568
1569 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1570 elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1571 if (elm == NULL)
1572 return;
1573 memcpy(&elm->detail, detail, sizeof(*detail));
1574 TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1575 taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1576}
1577
1578static void
1579mfi_handle_evt(void *context, int pending)
1580{
1581 TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1582 struct mfi_softc *sc;
1583 struct mfi_evt_queue_elm *elm;
1584
1585 sc = context;
1586 TAILQ_INIT(&queue);
1587 mtx_lock(&sc->mfi_io_lock);
1588 TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1589 mtx_unlock(&sc->mfi_io_lock);
1590 while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1591 TAILQ_REMOVE(&queue, elm, link);
1592 mfi_decode_evt(sc, &elm->detail);
1593 free(elm, M_MFIBUF);
1594 }
1595}
1596
1597static int
1598mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1599{
1600 struct mfi_command *cm;
1601 struct mfi_dcmd_frame *dcmd;
1602 union mfi_evt current_aen, prior_aen;
1603 struct mfi_evt_detail *ed = NULL;
1604 int error = 0;
1605
1606 current_aen.word = locale;
1607 if (sc->mfi_aen_cm != NULL) {
1608 prior_aen.word =
1609 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1610 if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1611 !((prior_aen.members.locale & current_aen.members.locale)
1612 ^current_aen.members.locale)) {
1613 return (0);
1614 } else {
1615 prior_aen.members.locale |= current_aen.members.locale;
1616 if (prior_aen.members.evt_class
1617 < current_aen.members.evt_class)
1618 current_aen.members.evt_class =
1619 prior_aen.members.evt_class;
1620 mtx_lock(&sc->mfi_io_lock);
1621 mfi_abort(sc, sc->mfi_aen_cm);
1622 mtx_unlock(&sc->mfi_io_lock);
1623 }
1624 }
1625
1626 mtx_lock(&sc->mfi_io_lock);
1627 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1628 (void **)&ed, sizeof(*ed));
1629 mtx_unlock(&sc->mfi_io_lock);
1630 if (error) {
1631 goto out;
1632 }
1633
1634 dcmd = &cm->cm_frame->dcmd;
1635 ((uint32_t *)&dcmd->mbox)[0] = seq;
1636 ((uint32_t *)&dcmd->mbox)[1] = locale;
1637 cm->cm_flags = MFI_CMD_DATAIN;
1638 cm->cm_complete = mfi_aen_complete;
1639
1640 sc->last_seq_num = seq;
1641 sc->mfi_aen_cm = cm;
1642
1643 mtx_lock(&sc->mfi_io_lock);
1644 mfi_enqueue_ready(cm);
1645 mfi_startio(sc);
1646 mtx_unlock(&sc->mfi_io_lock);
1647
1648out:
1649 return (error);
1650}
1651
1652static void
1653mfi_aen_complete(struct mfi_command *cm)
1654{
1655 struct mfi_frame_header *hdr;
1656 struct mfi_softc *sc;
1657 struct mfi_evt_detail *detail;
1658 struct mfi_aen *mfi_aen_entry, *tmp;
1659 int seq = 0, aborted = 0;
1660
1661 sc = cm->cm_sc;
1662 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1663
1664 hdr = &cm->cm_frame->header;
1665
1666 if (sc->mfi_aen_cm == NULL)
1667 return;
1668
1667 if (sc->mfi_aen_cm->cm_aen_abort ||
1669 if (sc->cm_aen_abort ||
1668 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1670 hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1669 sc->mfi_aen_cm->cm_aen_abort = 0;
1671 sc->cm_aen_abort = 0;
1670 aborted = 1;
1671 } else {
1672 sc->mfi_aen_triggered = 1;
1673 if (sc->mfi_poll_waiting) {
1674 sc->mfi_poll_waiting = 0;
1675 selwakeup(&sc->mfi_select);
1676 }
1677 detail = cm->cm_data;
1678 mfi_queue_evt(sc, detail);
1679 seq = detail->seq + 1;
1680 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1681 tmp) {
1682 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1683 aen_link);
1684 PROC_LOCK(mfi_aen_entry->p);
1685 kern_psignal(mfi_aen_entry->p, SIGIO);
1686 PROC_UNLOCK(mfi_aen_entry->p);
1687 free(mfi_aen_entry, M_MFIBUF);
1688 }
1689 }
1690
1691 free(cm->cm_data, M_MFIBUF);
1692 sc->mfi_aen_cm = NULL;
1693 wakeup(&sc->mfi_aen_cm);
1694 mfi_release_command(cm);
1695
1696 /* set it up again so the driver can catch more events */
1697 if (!aborted) {
1698 mtx_unlock(&sc->mfi_io_lock);
1699 mfi_aen_setup(sc, seq);
1700 mtx_lock(&sc->mfi_io_lock);
1701 }
1702}
1703
1704#define MAX_EVENTS 15
1705
1706static int
1707mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1708{
1709 struct mfi_command *cm;
1710 struct mfi_dcmd_frame *dcmd;
1711 struct mfi_evt_list *el;
1712 union mfi_evt class_locale;
1713 int error, i, seq, size;
1714
1715 class_locale.members.reserved = 0;
1716 class_locale.members.locale = mfi_event_locale;
1717 class_locale.members.evt_class = mfi_event_class;
1718
1719 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1720 * (MAX_EVENTS - 1);
1721 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1722 if (el == NULL)
1723 return (ENOMEM);
1724
1725 for (seq = start_seq;;) {
1726 mtx_lock(&sc->mfi_io_lock);
1727 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1728 free(el, M_MFIBUF);
1729 mtx_unlock(&sc->mfi_io_lock);
1730 return (EBUSY);
1731 }
1732 mtx_unlock(&sc->mfi_io_lock);
1733
1734 dcmd = &cm->cm_frame->dcmd;
1735 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1736 dcmd->header.cmd = MFI_CMD_DCMD;
1737 dcmd->header.timeout = 0;
1738 dcmd->header.data_len = size;
1739 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1740 ((uint32_t *)&dcmd->mbox)[0] = seq;
1741 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1742 cm->cm_sg = &dcmd->sgl;
1743 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1744 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1745 cm->cm_data = el;
1746 cm->cm_len = size;
1747
1748 mtx_lock(&sc->mfi_io_lock);
1749 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1750 device_printf(sc->mfi_dev,
1751 "Failed to get controller entries\n");
1752 mfi_release_command(cm);
1753 mtx_unlock(&sc->mfi_io_lock);
1754 break;
1755 }
1756
1757 mtx_unlock(&sc->mfi_io_lock);
1758 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1759 BUS_DMASYNC_POSTREAD);
1760 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1761
1762 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1763 mtx_lock(&sc->mfi_io_lock);
1764 mfi_release_command(cm);
1765 mtx_unlock(&sc->mfi_io_lock);
1766 break;
1767 }
1768 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1769 device_printf(sc->mfi_dev,
1770 "Error %d fetching controller entries\n",
1771 dcmd->header.cmd_status);
1772 mtx_lock(&sc->mfi_io_lock);
1773 mfi_release_command(cm);
1774 mtx_unlock(&sc->mfi_io_lock);
1775 break;
1776 }
1777 mtx_lock(&sc->mfi_io_lock);
1778 mfi_release_command(cm);
1779 mtx_unlock(&sc->mfi_io_lock);
1780
1781 for (i = 0; i < el->count; i++) {
1782 /*
1783 * If this event is newer than 'stop_seq' then
1784 * break out of the loop. Note that the log
1785 * is a circular buffer so we have to handle
1786 * the case that our stop point is earlier in
1787 * the buffer than our start point.
1788 */
1789 if (el->event[i].seq >= stop_seq) {
1790 if (start_seq <= stop_seq)
1791 break;
1792 else if (el->event[i].seq < start_seq)
1793 break;
1794 }
1795 mtx_lock(&sc->mfi_io_lock);
1796 mfi_queue_evt(sc, &el->event[i]);
1797 mtx_unlock(&sc->mfi_io_lock);
1798 }
1799 seq = el->event[el->count - 1].seq + 1;
1800 }
1801
1802 free(el, M_MFIBUF);
1803 return (0);
1804}
1805
1806static int
1807mfi_add_ld(struct mfi_softc *sc, int id)
1808{
1809 struct mfi_command *cm;
1810 struct mfi_dcmd_frame *dcmd = NULL;
1811 struct mfi_ld_info *ld_info = NULL;
1812 int error;
1813
1814 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1815
1816 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1817 (void **)&ld_info, sizeof(*ld_info));
1818 if (error) {
1819 device_printf(sc->mfi_dev,
1820 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1821 if (ld_info)
1822 free(ld_info, M_MFIBUF);
1823 return (error);
1824 }
1825 cm->cm_flags = MFI_CMD_DATAIN;
1826 dcmd = &cm->cm_frame->dcmd;
1827 dcmd->mbox[0] = id;
1828 if (mfi_wait_command(sc, cm) != 0) {
1829 device_printf(sc->mfi_dev,
1830 "Failed to get logical drive: %d\n", id);
1831 free(ld_info, M_MFIBUF);
1832 return (0);
1833 }
1834 if (ld_info->ld_config.params.isSSCD != 1)
1835 mfi_add_ld_complete(cm);
1836 else {
1837 mfi_release_command(cm);
1838 if (ld_info) /* SSCD drives ld_info free here */
1839 free(ld_info, M_MFIBUF);
1840 }
1841 return (0);
1842}
1843
1844static void
1845mfi_add_ld_complete(struct mfi_command *cm)
1846{
1847 struct mfi_frame_header *hdr;
1848 struct mfi_ld_info *ld_info;
1849 struct mfi_softc *sc;
1850 device_t child;
1851
1852 sc = cm->cm_sc;
1853 hdr = &cm->cm_frame->header;
1854 ld_info = cm->cm_private;
1855
1856 if (hdr->cmd_status != MFI_STAT_OK) {
1857 free(ld_info, M_MFIBUF);
1858 mfi_release_command(cm);
1859 return;
1860 }
1861 mfi_release_command(cm);
1862
1863 mtx_unlock(&sc->mfi_io_lock);
1864 mtx_lock(&Giant);
1865 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1866 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1867 free(ld_info, M_MFIBUF);
1868 mtx_unlock(&Giant);
1869 mtx_lock(&sc->mfi_io_lock);
1870 return;
1871 }
1872
1873 device_set_ivars(child, ld_info);
1874 device_set_desc(child, "MFI Logical Disk");
1875 bus_generic_attach(sc->mfi_dev);
1876 mtx_unlock(&Giant);
1877 mtx_lock(&sc->mfi_io_lock);
1878}
1879
1880static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1881{
1882 struct mfi_command *cm;
1883 struct mfi_dcmd_frame *dcmd = NULL;
1884 struct mfi_pd_info *pd_info = NULL;
1885 int error;
1886
1887 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1888
1889 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1890 (void **)&pd_info, sizeof(*pd_info));
1891 if (error) {
1892 device_printf(sc->mfi_dev,
1893 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1894 error);
1895 if (pd_info)
1896 free(pd_info, M_MFIBUF);
1897 return (error);
1898 }
1899 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1900 dcmd = &cm->cm_frame->dcmd;
1901 dcmd->mbox[0]=id;
1902 dcmd->header.scsi_status = 0;
1903 dcmd->header.pad0 = 0;
1904 if (mfi_mapcmd(sc, cm) != 0) {
1905 device_printf(sc->mfi_dev,
1906 "Failed to get physical drive info %d\n", id);
1907 free(pd_info, M_MFIBUF);
1908 return (0);
1909 }
1910 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1911 BUS_DMASYNC_POSTREAD);
1912 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1913 mfi_add_sys_pd_complete(cm);
1914 return (0);
1915}
1916
1917static void
1918mfi_add_sys_pd_complete(struct mfi_command *cm)
1919{
1920 struct mfi_frame_header *hdr;
1921 struct mfi_pd_info *pd_info;
1922 struct mfi_softc *sc;
1923 device_t child;
1924
1925 sc = cm->cm_sc;
1926 hdr = &cm->cm_frame->header;
1927 pd_info = cm->cm_private;
1928
1929 if (hdr->cmd_status != MFI_STAT_OK) {
1930 free(pd_info, M_MFIBUF);
1931 mfi_release_command(cm);
1932 return;
1933 }
1934 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1935 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1936 pd_info->ref.v.device_id);
1937 free(pd_info, M_MFIBUF);
1938 mfi_release_command(cm);
1939 return;
1940 }
1941 mfi_release_command(cm);
1942
1943 mtx_unlock(&sc->mfi_io_lock);
1944 mtx_lock(&Giant);
1945 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1946 device_printf(sc->mfi_dev, "Failed to add system pd\n");
1947 free(pd_info, M_MFIBUF);
1948 mtx_unlock(&Giant);
1949 mtx_lock(&sc->mfi_io_lock);
1950 return;
1951 }
1952
1953 device_set_ivars(child, pd_info);
1954 device_set_desc(child, "MFI System PD");
1955 bus_generic_attach(sc->mfi_dev);
1956 mtx_unlock(&Giant);
1957 mtx_lock(&sc->mfi_io_lock);
1958}
1959static struct mfi_command *
1960mfi_bio_command(struct mfi_softc *sc)
1961{
1962 struct bio *bio;
1963 struct mfi_command *cm = NULL;
1964
1965 /*reserving two commands to avoid starvation for IOCTL*/
1966 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2){
1967 return (NULL);
1968 }
1969 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1970 return (NULL);
1971 }
1972 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
1973 cm = mfi_build_ldio(sc, bio);
1974 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
1975 cm = mfi_build_syspdio(sc, bio);
1976 }
1977 if (!cm)
1978 mfi_enqueue_bio(sc, bio);
1979 return cm;
1980}
1981static struct mfi_command *
1982mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1983{
1984 struct mfi_command *cm;
1985 struct mfi_pass_frame *pass;
1986 int flags = 0, blkcount = 0;
1987 uint32_t context = 0;
1988
1989 if ((cm = mfi_dequeue_free(sc)) == NULL)
1990 return (NULL);
1991
1992 /* Zero out the MFI frame */
1993 context = cm->cm_frame->header.context;
1994 bzero(cm->cm_frame, sizeof(union mfi_frame));
1995 cm->cm_frame->header.context = context;
1996 pass = &cm->cm_frame->pass;
1997 bzero(pass->cdb, 16);
1998 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
1999 switch (bio->bio_cmd & 0x03) {
2000 case BIO_READ:
2001#define SCSI_READ 0x28
2002 pass->cdb[0] = SCSI_READ;
2003 flags = MFI_CMD_DATAIN;
2004 break;
2005 case BIO_WRITE:
2006#define SCSI_WRITE 0x2a
2007 pass->cdb[0] = SCSI_WRITE;
2008 flags = MFI_CMD_DATAOUT;
2009 break;
2010 default:
2011 panic("Invalid bio command");
2012 }
2013
2014 /* Cheat with the sector length to avoid a non-constant division */
2015 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2016 /* Fill the LBA and Transfer length in CDB */
2017 pass->cdb[2] = (bio->bio_pblkno & 0xff000000) >> 24;
2018 pass->cdb[3] = (bio->bio_pblkno & 0x00ff0000) >> 16;
2019 pass->cdb[4] = (bio->bio_pblkno & 0x0000ff00) >> 8;
2020 pass->cdb[5] = bio->bio_pblkno & 0x000000ff;
2021 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2022 pass->cdb[8] = (blkcount & 0x00ff);
2023 pass->header.target_id = (uintptr_t)bio->bio_driver1;
2024 pass->header.timeout = 0;
2025 pass->header.flags = 0;
2026 pass->header.scsi_status = 0;
2027 pass->header.sense_len = MFI_SENSE_LEN;
2028 pass->header.data_len = bio->bio_bcount;
2029 pass->header.cdb_len = 10;
2030 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2031 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2032 cm->cm_complete = mfi_bio_complete;
2033 cm->cm_private = bio;
2034 cm->cm_data = bio->bio_data;
2035 cm->cm_len = bio->bio_bcount;
2036 cm->cm_sg = &pass->sgl;
2037 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2038 cm->cm_flags = flags;
2039 return (cm);
2040}
2041
2042static struct mfi_command *
2043mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2044{
2045 struct mfi_io_frame *io;
2046 struct mfi_command *cm;
2047 int flags, blkcount;
2048 uint32_t context = 0;
2049
2050 if ((cm = mfi_dequeue_free(sc)) == NULL)
2051 return (NULL);
2052
2053 /* Zero out the MFI frame */
2054 context = cm->cm_frame->header.context;
2055 bzero(cm->cm_frame, sizeof(union mfi_frame));
2056 cm->cm_frame->header.context = context;
2057 io = &cm->cm_frame->io;
2058 switch (bio->bio_cmd & 0x03) {
2059 case BIO_READ:
2060 io->header.cmd = MFI_CMD_LD_READ;
2061 flags = MFI_CMD_DATAIN;
2062 break;
2063 case BIO_WRITE:
2064 io->header.cmd = MFI_CMD_LD_WRITE;
2065 flags = MFI_CMD_DATAOUT;
2066 break;
2067 default:
2068 panic("Invalid bio command");
2069 }
2070
2071 /* Cheat with the sector length to avoid a non-constant division */
2072 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2073 io->header.target_id = (uintptr_t)bio->bio_driver1;
2074 io->header.timeout = 0;
2075 io->header.flags = 0;
2076 io->header.scsi_status = 0;
2077 io->header.sense_len = MFI_SENSE_LEN;
2078 io->header.data_len = blkcount;
2079 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2080 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2081 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2082 io->lba_lo = bio->bio_pblkno & 0xffffffff;
2083 cm->cm_complete = mfi_bio_complete;
2084 cm->cm_private = bio;
2085 cm->cm_data = bio->bio_data;
2086 cm->cm_len = bio->bio_bcount;
2087 cm->cm_sg = &io->sgl;
2088 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2089 cm->cm_flags = flags;
2090 return (cm);
2091}
2092
2093static void
2094mfi_bio_complete(struct mfi_command *cm)
2095{
2096 struct bio *bio;
2097 struct mfi_frame_header *hdr;
2098 struct mfi_softc *sc;
2099
2100 bio = cm->cm_private;
2101 hdr = &cm->cm_frame->header;
2102 sc = cm->cm_sc;
2103
2104 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2105 bio->bio_flags |= BIO_ERROR;
2106 bio->bio_error = EIO;
2107 device_printf(sc->mfi_dev, "I/O error, status= %d "
2108 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2109 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2110 } else if (cm->cm_error != 0) {
2111 bio->bio_flags |= BIO_ERROR;
2112 }
2113
2114 mfi_release_command(cm);
2115 mfi_disk_complete(bio);
2116}
2117
2118void
2119mfi_startio(struct mfi_softc *sc)
2120{
2121 struct mfi_command *cm;
2122 struct ccb_hdr *ccbh;
2123
2124 for (;;) {
2125 /* Don't bother if we're short on resources */
2126 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2127 break;
2128
2129 /* Try a command that has already been prepared */
2130 cm = mfi_dequeue_ready(sc);
2131
2132 if (cm == NULL) {
2133 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2134 cm = sc->mfi_cam_start(ccbh);
2135 }
2136
2137 /* Nope, so look for work on the bioq */
2138 if (cm == NULL)
2139 cm = mfi_bio_command(sc);
2140
2141 /* No work available, so exit */
2142 if (cm == NULL)
2143 break;
2144
2145 /* Send the command to the controller */
2146 if (mfi_mapcmd(sc, cm) != 0) {
2147 mfi_requeue_ready(cm);
2148 break;
2149 }
2150 }
2151}
2152
2153int
2154mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2155{
2156 int error, polled;
2157
2158 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2159
2160 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2161 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2162 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2163 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2164 if (error == EINPROGRESS) {
2165 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2166 return (0);
2167 }
2168 } else {
2169 if (sc->MFA_enabled)
2170 error = mfi_tbolt_send_frame(sc, cm);
2171 else
2172 error = mfi_send_frame(sc, cm);
2173 }
2174
2175 return (error);
2176}
2177
2178static void
2179mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2180{
2181 struct mfi_frame_header *hdr;
2182 struct mfi_command *cm;
2183 union mfi_sgl *sgl;
2184 struct mfi_softc *sc;
2185 int i, j, first, dir;
2186 int sge_size;
2187
2188 cm = (struct mfi_command *)arg;
2189 sc = cm->cm_sc;
2190 hdr = &cm->cm_frame->header;
2191 sgl = cm->cm_sg;
2192
2193 if (error) {
2194 printf("error %d in callback\n", error);
2195 cm->cm_error = error;
2196 mfi_complete(sc, cm);
2197 return;
2198 }
2199 /* Use IEEE sgl only for IO's on a SKINNY controller
2200 * For other commands on a SKINNY controller use either
2201 * sg32 or sg64 based on the sizeof(bus_addr_t).
2202 * Also calculate the total frame size based on the type
2203 * of SGL used.
2204 */
2205 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2206 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2207 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2208 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2209 for (i = 0; i < nsegs; i++) {
2210 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2211 sgl->sg_skinny[i].len = segs[i].ds_len;
2212 sgl->sg_skinny[i].flag = 0;
2213 }
2214 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2215 sge_size = sizeof(struct mfi_sg_skinny);
2216 hdr->sg_count = nsegs;
2217 } else {
2218 j = 0;
2219 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2220 first = cm->cm_stp_len;
2221 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2222 sgl->sg32[j].addr = segs[0].ds_addr;
2223 sgl->sg32[j++].len = first;
2224 } else {
2225 sgl->sg64[j].addr = segs[0].ds_addr;
2226 sgl->sg64[j++].len = first;
2227 }
2228 } else
2229 first = 0;
2230 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2231 for (i = 0; i < nsegs; i++) {
2232 sgl->sg32[j].addr = segs[i].ds_addr + first;
2233 sgl->sg32[j++].len = segs[i].ds_len - first;
2234 first = 0;
2235 }
2236 } else {
2237 for (i = 0; i < nsegs; i++) {
2238 sgl->sg64[j].addr = segs[i].ds_addr + first;
2239 sgl->sg64[j++].len = segs[i].ds_len - first;
2240 first = 0;
2241 }
2242 hdr->flags |= MFI_FRAME_SGL64;
2243 }
2244 hdr->sg_count = j;
2245 sge_size = sc->mfi_sge_size;
2246 }
2247
2248 dir = 0;
2249 if (cm->cm_flags & MFI_CMD_DATAIN) {
2250 dir |= BUS_DMASYNC_PREREAD;
2251 hdr->flags |= MFI_FRAME_DIR_READ;
2252 }
2253 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2254 dir |= BUS_DMASYNC_PREWRITE;
2255 hdr->flags |= MFI_FRAME_DIR_WRITE;
2256 }
2257 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2258 cm->cm_flags |= MFI_CMD_MAPPED;
2259
2260 /*
2261 * Instead of calculating the total number of frames in the
2262 * compound frame, it's already assumed that there will be at
2263 * least 1 frame, so don't compensate for the modulo of the
2264 * following division.
2265 */
2266 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2267 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2268
2269 if (sc->MFA_enabled)
2270 mfi_tbolt_send_frame(sc, cm);
2271 else
2272 mfi_send_frame(sc, cm);
2273
2274 return;
2275}
2276
2277static int
2278mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2279{
2280 struct mfi_frame_header *hdr;
2281 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2282
2283 hdr = &cm->cm_frame->header;
2284
2285 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2286 cm->cm_timestamp = time_uptime;
2287 mfi_enqueue_busy(cm);
2288 } else {
2289 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2290 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2291 }
2292
2293 /*
2294 * The bus address of the command is aligned on a 64 byte boundary,
2295 * leaving the least 6 bits as zero. For whatever reason, the
2296 * hardware wants the address shifted right by three, leaving just
2297 * 3 zero bits. These three bits are then used as a prefetching
2298 * hint for the hardware to predict how many frames need to be
2299 * fetched across the bus. If a command has more than 8 frames
2300 * then the 3 bits are set to 0x7 and the firmware uses other
2301 * information in the command to determine the total amount to fetch.
2302 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2303 * is enough for both 32bit and 64bit systems.
2304 */
2305 if (cm->cm_extra_frames > 7)
2306 cm->cm_extra_frames = 7;
2307
2308 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2309
2310 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2311 return (0);
2312
2313 /* This is a polled command, so busy-wait for it to complete. */
2314 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2315 DELAY(1000);
2316 tm -= 1;
2317 if (tm <= 0)
2318 break;
2319 }
2320
2321 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2322 device_printf(sc->mfi_dev, "Frame %p timed out "
2323 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2324 return (ETIMEDOUT);
2325 }
2326
2327 return (0);
2328}
2329
2330
2331void
2332mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2333{
2334 int dir;
2335
2336 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2337 dir = 0;
2338 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2339 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2340 dir |= BUS_DMASYNC_POSTREAD;
2341 if (cm->cm_flags & MFI_CMD_DATAOUT)
2342 dir |= BUS_DMASYNC_POSTWRITE;
2343
2344 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2345 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2346 cm->cm_flags &= ~MFI_CMD_MAPPED;
2347 }
2348
2349 cm->cm_flags |= MFI_CMD_COMPLETED;
2350
2351 if (cm->cm_complete != NULL)
2352 cm->cm_complete(cm);
2353 else
2354 wakeup(cm);
2355}
2356
2357static int
2358mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2359{
2360 struct mfi_command *cm;
2361 struct mfi_abort_frame *abort;
2362 int i = 0;
2363 uint32_t context = 0;
2364
2365 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2366
2367 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2368 return (EBUSY);
2369 }
2370
2371 /* Zero out the MFI frame */
2372 context = cm->cm_frame->header.context;
2373 bzero(cm->cm_frame, sizeof(union mfi_frame));
2374 cm->cm_frame->header.context = context;
2375
2376 abort = &cm->cm_frame->abort;
2377 abort->header.cmd = MFI_CMD_ABORT;
2378 abort->header.flags = 0;
2379 abort->header.scsi_status = 0;
2380 abort->abort_context = cm_abort->cm_frame->header.context;
2381 abort->abort_mfi_addr_lo = (uint32_t)cm_abort->cm_frame_busaddr;
2382 abort->abort_mfi_addr_hi =
2383 (uint32_t)((uint64_t)cm_abort->cm_frame_busaddr >> 32);
2384 cm->cm_data = NULL;
2385 cm->cm_flags = MFI_CMD_POLLED;
2386
2387 if (sc->mfi_aen_cm)
1672 aborted = 1;
1673 } else {
1674 sc->mfi_aen_triggered = 1;
1675 if (sc->mfi_poll_waiting) {
1676 sc->mfi_poll_waiting = 0;
1677 selwakeup(&sc->mfi_select);
1678 }
1679 detail = cm->cm_data;
1680 mfi_queue_evt(sc, detail);
1681 seq = detail->seq + 1;
1682 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1683 tmp) {
1684 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1685 aen_link);
1686 PROC_LOCK(mfi_aen_entry->p);
1687 kern_psignal(mfi_aen_entry->p, SIGIO);
1688 PROC_UNLOCK(mfi_aen_entry->p);
1689 free(mfi_aen_entry, M_MFIBUF);
1690 }
1691 }
1692
1693 free(cm->cm_data, M_MFIBUF);
1694 sc->mfi_aen_cm = NULL;
1695 wakeup(&sc->mfi_aen_cm);
1696 mfi_release_command(cm);
1697
1698 /* set it up again so the driver can catch more events */
1699 if (!aborted) {
1700 mtx_unlock(&sc->mfi_io_lock);
1701 mfi_aen_setup(sc, seq);
1702 mtx_lock(&sc->mfi_io_lock);
1703 }
1704}
1705
1706#define MAX_EVENTS 15
1707
1708static int
1709mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1710{
1711 struct mfi_command *cm;
1712 struct mfi_dcmd_frame *dcmd;
1713 struct mfi_evt_list *el;
1714 union mfi_evt class_locale;
1715 int error, i, seq, size;
1716
1717 class_locale.members.reserved = 0;
1718 class_locale.members.locale = mfi_event_locale;
1719 class_locale.members.evt_class = mfi_event_class;
1720
1721 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1722 * (MAX_EVENTS - 1);
1723 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1724 if (el == NULL)
1725 return (ENOMEM);
1726
1727 for (seq = start_seq;;) {
1728 mtx_lock(&sc->mfi_io_lock);
1729 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1730 free(el, M_MFIBUF);
1731 mtx_unlock(&sc->mfi_io_lock);
1732 return (EBUSY);
1733 }
1734 mtx_unlock(&sc->mfi_io_lock);
1735
1736 dcmd = &cm->cm_frame->dcmd;
1737 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1738 dcmd->header.cmd = MFI_CMD_DCMD;
1739 dcmd->header.timeout = 0;
1740 dcmd->header.data_len = size;
1741 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1742 ((uint32_t *)&dcmd->mbox)[0] = seq;
1743 ((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1744 cm->cm_sg = &dcmd->sgl;
1745 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1746 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1747 cm->cm_data = el;
1748 cm->cm_len = size;
1749
1750 mtx_lock(&sc->mfi_io_lock);
1751 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1752 device_printf(sc->mfi_dev,
1753 "Failed to get controller entries\n");
1754 mfi_release_command(cm);
1755 mtx_unlock(&sc->mfi_io_lock);
1756 break;
1757 }
1758
1759 mtx_unlock(&sc->mfi_io_lock);
1760 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1761 BUS_DMASYNC_POSTREAD);
1762 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1763
1764 if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1765 mtx_lock(&sc->mfi_io_lock);
1766 mfi_release_command(cm);
1767 mtx_unlock(&sc->mfi_io_lock);
1768 break;
1769 }
1770 if (dcmd->header.cmd_status != MFI_STAT_OK) {
1771 device_printf(sc->mfi_dev,
1772 "Error %d fetching controller entries\n",
1773 dcmd->header.cmd_status);
1774 mtx_lock(&sc->mfi_io_lock);
1775 mfi_release_command(cm);
1776 mtx_unlock(&sc->mfi_io_lock);
1777 break;
1778 }
1779 mtx_lock(&sc->mfi_io_lock);
1780 mfi_release_command(cm);
1781 mtx_unlock(&sc->mfi_io_lock);
1782
1783 for (i = 0; i < el->count; i++) {
1784 /*
1785 * If this event is newer than 'stop_seq' then
1786 * break out of the loop. Note that the log
1787 * is a circular buffer so we have to handle
1788 * the case that our stop point is earlier in
1789 * the buffer than our start point.
1790 */
1791 if (el->event[i].seq >= stop_seq) {
1792 if (start_seq <= stop_seq)
1793 break;
1794 else if (el->event[i].seq < start_seq)
1795 break;
1796 }
1797 mtx_lock(&sc->mfi_io_lock);
1798 mfi_queue_evt(sc, &el->event[i]);
1799 mtx_unlock(&sc->mfi_io_lock);
1800 }
1801 seq = el->event[el->count - 1].seq + 1;
1802 }
1803
1804 free(el, M_MFIBUF);
1805 return (0);
1806}
1807
1808static int
1809mfi_add_ld(struct mfi_softc *sc, int id)
1810{
1811 struct mfi_command *cm;
1812 struct mfi_dcmd_frame *dcmd = NULL;
1813 struct mfi_ld_info *ld_info = NULL;
1814 int error;
1815
1816 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1817
1818 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1819 (void **)&ld_info, sizeof(*ld_info));
1820 if (error) {
1821 device_printf(sc->mfi_dev,
1822 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1823 if (ld_info)
1824 free(ld_info, M_MFIBUF);
1825 return (error);
1826 }
1827 cm->cm_flags = MFI_CMD_DATAIN;
1828 dcmd = &cm->cm_frame->dcmd;
1829 dcmd->mbox[0] = id;
1830 if (mfi_wait_command(sc, cm) != 0) {
1831 device_printf(sc->mfi_dev,
1832 "Failed to get logical drive: %d\n", id);
1833 free(ld_info, M_MFIBUF);
1834 return (0);
1835 }
1836 if (ld_info->ld_config.params.isSSCD != 1)
1837 mfi_add_ld_complete(cm);
1838 else {
1839 mfi_release_command(cm);
1840 if (ld_info) /* SSCD drives ld_info free here */
1841 free(ld_info, M_MFIBUF);
1842 }
1843 return (0);
1844}
1845
1846static void
1847mfi_add_ld_complete(struct mfi_command *cm)
1848{
1849 struct mfi_frame_header *hdr;
1850 struct mfi_ld_info *ld_info;
1851 struct mfi_softc *sc;
1852 device_t child;
1853
1854 sc = cm->cm_sc;
1855 hdr = &cm->cm_frame->header;
1856 ld_info = cm->cm_private;
1857
1858 if (hdr->cmd_status != MFI_STAT_OK) {
1859 free(ld_info, M_MFIBUF);
1860 mfi_release_command(cm);
1861 return;
1862 }
1863 mfi_release_command(cm);
1864
1865 mtx_unlock(&sc->mfi_io_lock);
1866 mtx_lock(&Giant);
1867 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1868 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1869 free(ld_info, M_MFIBUF);
1870 mtx_unlock(&Giant);
1871 mtx_lock(&sc->mfi_io_lock);
1872 return;
1873 }
1874
1875 device_set_ivars(child, ld_info);
1876 device_set_desc(child, "MFI Logical Disk");
1877 bus_generic_attach(sc->mfi_dev);
1878 mtx_unlock(&Giant);
1879 mtx_lock(&sc->mfi_io_lock);
1880}
1881
1882static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1883{
1884 struct mfi_command *cm;
1885 struct mfi_dcmd_frame *dcmd = NULL;
1886 struct mfi_pd_info *pd_info = NULL;
1887 int error;
1888
1889 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1890
1891 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1892 (void **)&pd_info, sizeof(*pd_info));
1893 if (error) {
1894 device_printf(sc->mfi_dev,
1895 "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1896 error);
1897 if (pd_info)
1898 free(pd_info, M_MFIBUF);
1899 return (error);
1900 }
1901 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1902 dcmd = &cm->cm_frame->dcmd;
1903 dcmd->mbox[0]=id;
1904 dcmd->header.scsi_status = 0;
1905 dcmd->header.pad0 = 0;
1906 if (mfi_mapcmd(sc, cm) != 0) {
1907 device_printf(sc->mfi_dev,
1908 "Failed to get physical drive info %d\n", id);
1909 free(pd_info, M_MFIBUF);
1910 return (0);
1911 }
1912 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1913 BUS_DMASYNC_POSTREAD);
1914 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1915 mfi_add_sys_pd_complete(cm);
1916 return (0);
1917}
1918
1919static void
1920mfi_add_sys_pd_complete(struct mfi_command *cm)
1921{
1922 struct mfi_frame_header *hdr;
1923 struct mfi_pd_info *pd_info;
1924 struct mfi_softc *sc;
1925 device_t child;
1926
1927 sc = cm->cm_sc;
1928 hdr = &cm->cm_frame->header;
1929 pd_info = cm->cm_private;
1930
1931 if (hdr->cmd_status != MFI_STAT_OK) {
1932 free(pd_info, M_MFIBUF);
1933 mfi_release_command(cm);
1934 return;
1935 }
1936 if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1937 device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1938 pd_info->ref.v.device_id);
1939 free(pd_info, M_MFIBUF);
1940 mfi_release_command(cm);
1941 return;
1942 }
1943 mfi_release_command(cm);
1944
1945 mtx_unlock(&sc->mfi_io_lock);
1946 mtx_lock(&Giant);
1947 if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1948 device_printf(sc->mfi_dev, "Failed to add system pd\n");
1949 free(pd_info, M_MFIBUF);
1950 mtx_unlock(&Giant);
1951 mtx_lock(&sc->mfi_io_lock);
1952 return;
1953 }
1954
1955 device_set_ivars(child, pd_info);
1956 device_set_desc(child, "MFI System PD");
1957 bus_generic_attach(sc->mfi_dev);
1958 mtx_unlock(&Giant);
1959 mtx_lock(&sc->mfi_io_lock);
1960}
1961static struct mfi_command *
1962mfi_bio_command(struct mfi_softc *sc)
1963{
1964 struct bio *bio;
1965 struct mfi_command *cm = NULL;
1966
1967 /*reserving two commands to avoid starvation for IOCTL*/
1968 if (sc->mfi_qstat[MFIQ_FREE].q_length < 2){
1969 return (NULL);
1970 }
1971 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1972 return (NULL);
1973 }
1974 if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
1975 cm = mfi_build_ldio(sc, bio);
1976 } else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
1977 cm = mfi_build_syspdio(sc, bio);
1978 }
1979 if (!cm)
1980 mfi_enqueue_bio(sc, bio);
1981 return cm;
1982}
1983static struct mfi_command *
1984mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1985{
1986 struct mfi_command *cm;
1987 struct mfi_pass_frame *pass;
1988 int flags = 0, blkcount = 0;
1989 uint32_t context = 0;
1990
1991 if ((cm = mfi_dequeue_free(sc)) == NULL)
1992 return (NULL);
1993
1994 /* Zero out the MFI frame */
1995 context = cm->cm_frame->header.context;
1996 bzero(cm->cm_frame, sizeof(union mfi_frame));
1997 cm->cm_frame->header.context = context;
1998 pass = &cm->cm_frame->pass;
1999 bzero(pass->cdb, 16);
2000 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2001 switch (bio->bio_cmd & 0x03) {
2002 case BIO_READ:
2003#define SCSI_READ 0x28
2004 pass->cdb[0] = SCSI_READ;
2005 flags = MFI_CMD_DATAIN;
2006 break;
2007 case BIO_WRITE:
2008#define SCSI_WRITE 0x2a
2009 pass->cdb[0] = SCSI_WRITE;
2010 flags = MFI_CMD_DATAOUT;
2011 break;
2012 default:
2013 panic("Invalid bio command");
2014 }
2015
2016 /* Cheat with the sector length to avoid a non-constant division */
2017 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2018 /* Fill the LBA and Transfer length in CDB */
2019 pass->cdb[2] = (bio->bio_pblkno & 0xff000000) >> 24;
2020 pass->cdb[3] = (bio->bio_pblkno & 0x00ff0000) >> 16;
2021 pass->cdb[4] = (bio->bio_pblkno & 0x0000ff00) >> 8;
2022 pass->cdb[5] = bio->bio_pblkno & 0x000000ff;
2023 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2024 pass->cdb[8] = (blkcount & 0x00ff);
2025 pass->header.target_id = (uintptr_t)bio->bio_driver1;
2026 pass->header.timeout = 0;
2027 pass->header.flags = 0;
2028 pass->header.scsi_status = 0;
2029 pass->header.sense_len = MFI_SENSE_LEN;
2030 pass->header.data_len = bio->bio_bcount;
2031 pass->header.cdb_len = 10;
2032 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2033 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2034 cm->cm_complete = mfi_bio_complete;
2035 cm->cm_private = bio;
2036 cm->cm_data = bio->bio_data;
2037 cm->cm_len = bio->bio_bcount;
2038 cm->cm_sg = &pass->sgl;
2039 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2040 cm->cm_flags = flags;
2041 return (cm);
2042}
2043
2044static struct mfi_command *
2045mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2046{
2047 struct mfi_io_frame *io;
2048 struct mfi_command *cm;
2049 int flags, blkcount;
2050 uint32_t context = 0;
2051
2052 if ((cm = mfi_dequeue_free(sc)) == NULL)
2053 return (NULL);
2054
2055 /* Zero out the MFI frame */
2056 context = cm->cm_frame->header.context;
2057 bzero(cm->cm_frame, sizeof(union mfi_frame));
2058 cm->cm_frame->header.context = context;
2059 io = &cm->cm_frame->io;
2060 switch (bio->bio_cmd & 0x03) {
2061 case BIO_READ:
2062 io->header.cmd = MFI_CMD_LD_READ;
2063 flags = MFI_CMD_DATAIN;
2064 break;
2065 case BIO_WRITE:
2066 io->header.cmd = MFI_CMD_LD_WRITE;
2067 flags = MFI_CMD_DATAOUT;
2068 break;
2069 default:
2070 panic("Invalid bio command");
2071 }
2072
2073 /* Cheat with the sector length to avoid a non-constant division */
2074 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2075 io->header.target_id = (uintptr_t)bio->bio_driver1;
2076 io->header.timeout = 0;
2077 io->header.flags = 0;
2078 io->header.scsi_status = 0;
2079 io->header.sense_len = MFI_SENSE_LEN;
2080 io->header.data_len = blkcount;
2081 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2082 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2083 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2084 io->lba_lo = bio->bio_pblkno & 0xffffffff;
2085 cm->cm_complete = mfi_bio_complete;
2086 cm->cm_private = bio;
2087 cm->cm_data = bio->bio_data;
2088 cm->cm_len = bio->bio_bcount;
2089 cm->cm_sg = &io->sgl;
2090 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2091 cm->cm_flags = flags;
2092 return (cm);
2093}
2094
2095static void
2096mfi_bio_complete(struct mfi_command *cm)
2097{
2098 struct bio *bio;
2099 struct mfi_frame_header *hdr;
2100 struct mfi_softc *sc;
2101
2102 bio = cm->cm_private;
2103 hdr = &cm->cm_frame->header;
2104 sc = cm->cm_sc;
2105
2106 if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2107 bio->bio_flags |= BIO_ERROR;
2108 bio->bio_error = EIO;
2109 device_printf(sc->mfi_dev, "I/O error, status= %d "
2110 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2111 mfi_print_sense(cm->cm_sc, cm->cm_sense);
2112 } else if (cm->cm_error != 0) {
2113 bio->bio_flags |= BIO_ERROR;
2114 }
2115
2116 mfi_release_command(cm);
2117 mfi_disk_complete(bio);
2118}
2119
2120void
2121mfi_startio(struct mfi_softc *sc)
2122{
2123 struct mfi_command *cm;
2124 struct ccb_hdr *ccbh;
2125
2126 for (;;) {
2127 /* Don't bother if we're short on resources */
2128 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2129 break;
2130
2131 /* Try a command that has already been prepared */
2132 cm = mfi_dequeue_ready(sc);
2133
2134 if (cm == NULL) {
2135 if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2136 cm = sc->mfi_cam_start(ccbh);
2137 }
2138
2139 /* Nope, so look for work on the bioq */
2140 if (cm == NULL)
2141 cm = mfi_bio_command(sc);
2142
2143 /* No work available, so exit */
2144 if (cm == NULL)
2145 break;
2146
2147 /* Send the command to the controller */
2148 if (mfi_mapcmd(sc, cm) != 0) {
2149 mfi_requeue_ready(cm);
2150 break;
2151 }
2152 }
2153}
2154
2155int
2156mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2157{
2158 int error, polled;
2159
2160 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2161
2162 if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2163 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2164 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2165 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2166 if (error == EINPROGRESS) {
2167 sc->mfi_flags |= MFI_FLAGS_QFRZN;
2168 return (0);
2169 }
2170 } else {
2171 if (sc->MFA_enabled)
2172 error = mfi_tbolt_send_frame(sc, cm);
2173 else
2174 error = mfi_send_frame(sc, cm);
2175 }
2176
2177 return (error);
2178}
2179
2180static void
2181mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2182{
2183 struct mfi_frame_header *hdr;
2184 struct mfi_command *cm;
2185 union mfi_sgl *sgl;
2186 struct mfi_softc *sc;
2187 int i, j, first, dir;
2188 int sge_size;
2189
2190 cm = (struct mfi_command *)arg;
2191 sc = cm->cm_sc;
2192 hdr = &cm->cm_frame->header;
2193 sgl = cm->cm_sg;
2194
2195 if (error) {
2196 printf("error %d in callback\n", error);
2197 cm->cm_error = error;
2198 mfi_complete(sc, cm);
2199 return;
2200 }
2201 /* Use IEEE sgl only for IO's on a SKINNY controller
2202 * For other commands on a SKINNY controller use either
2203 * sg32 or sg64 based on the sizeof(bus_addr_t).
2204 * Also calculate the total frame size based on the type
2205 * of SGL used.
2206 */
2207 if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2208 (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2209 (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2210 (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2211 for (i = 0; i < nsegs; i++) {
2212 sgl->sg_skinny[i].addr = segs[i].ds_addr;
2213 sgl->sg_skinny[i].len = segs[i].ds_len;
2214 sgl->sg_skinny[i].flag = 0;
2215 }
2216 hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2217 sge_size = sizeof(struct mfi_sg_skinny);
2218 hdr->sg_count = nsegs;
2219 } else {
2220 j = 0;
2221 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2222 first = cm->cm_stp_len;
2223 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2224 sgl->sg32[j].addr = segs[0].ds_addr;
2225 sgl->sg32[j++].len = first;
2226 } else {
2227 sgl->sg64[j].addr = segs[0].ds_addr;
2228 sgl->sg64[j++].len = first;
2229 }
2230 } else
2231 first = 0;
2232 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2233 for (i = 0; i < nsegs; i++) {
2234 sgl->sg32[j].addr = segs[i].ds_addr + first;
2235 sgl->sg32[j++].len = segs[i].ds_len - first;
2236 first = 0;
2237 }
2238 } else {
2239 for (i = 0; i < nsegs; i++) {
2240 sgl->sg64[j].addr = segs[i].ds_addr + first;
2241 sgl->sg64[j++].len = segs[i].ds_len - first;
2242 first = 0;
2243 }
2244 hdr->flags |= MFI_FRAME_SGL64;
2245 }
2246 hdr->sg_count = j;
2247 sge_size = sc->mfi_sge_size;
2248 }
2249
2250 dir = 0;
2251 if (cm->cm_flags & MFI_CMD_DATAIN) {
2252 dir |= BUS_DMASYNC_PREREAD;
2253 hdr->flags |= MFI_FRAME_DIR_READ;
2254 }
2255 if (cm->cm_flags & MFI_CMD_DATAOUT) {
2256 dir |= BUS_DMASYNC_PREWRITE;
2257 hdr->flags |= MFI_FRAME_DIR_WRITE;
2258 }
2259 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2260 cm->cm_flags |= MFI_CMD_MAPPED;
2261
2262 /*
2263 * Instead of calculating the total number of frames in the
2264 * compound frame, it's already assumed that there will be at
2265 * least 1 frame, so don't compensate for the modulo of the
2266 * following division.
2267 */
2268 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2269 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2270
2271 if (sc->MFA_enabled)
2272 mfi_tbolt_send_frame(sc, cm);
2273 else
2274 mfi_send_frame(sc, cm);
2275
2276 return;
2277}
2278
2279static int
2280mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2281{
2282 struct mfi_frame_header *hdr;
2283 int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2284
2285 hdr = &cm->cm_frame->header;
2286
2287 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2288 cm->cm_timestamp = time_uptime;
2289 mfi_enqueue_busy(cm);
2290 } else {
2291 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2292 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2293 }
2294
2295 /*
2296 * The bus address of the command is aligned on a 64 byte boundary,
2297 * leaving the least 6 bits as zero. For whatever reason, the
2298 * hardware wants the address shifted right by three, leaving just
2299 * 3 zero bits. These three bits are then used as a prefetching
2300 * hint for the hardware to predict how many frames need to be
2301 * fetched across the bus. If a command has more than 8 frames
2302 * then the 3 bits are set to 0x7 and the firmware uses other
2303 * information in the command to determine the total amount to fetch.
2304 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2305 * is enough for both 32bit and 64bit systems.
2306 */
2307 if (cm->cm_extra_frames > 7)
2308 cm->cm_extra_frames = 7;
2309
2310 sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2311
2312 if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2313 return (0);
2314
2315 /* This is a polled command, so busy-wait for it to complete. */
2316 while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2317 DELAY(1000);
2318 tm -= 1;
2319 if (tm <= 0)
2320 break;
2321 }
2322
2323 if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2324 device_printf(sc->mfi_dev, "Frame %p timed out "
2325 "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2326 return (ETIMEDOUT);
2327 }
2328
2329 return (0);
2330}
2331
2332
2333void
2334mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2335{
2336 int dir;
2337
2338 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2339 dir = 0;
2340 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2341 (cm->cm_frame->header.cmd == MFI_CMD_STP))
2342 dir |= BUS_DMASYNC_POSTREAD;
2343 if (cm->cm_flags & MFI_CMD_DATAOUT)
2344 dir |= BUS_DMASYNC_POSTWRITE;
2345
2346 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2347 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2348 cm->cm_flags &= ~MFI_CMD_MAPPED;
2349 }
2350
2351 cm->cm_flags |= MFI_CMD_COMPLETED;
2352
2353 if (cm->cm_complete != NULL)
2354 cm->cm_complete(cm);
2355 else
2356 wakeup(cm);
2357}
2358
2359static int
2360mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2361{
2362 struct mfi_command *cm;
2363 struct mfi_abort_frame *abort;
2364 int i = 0;
2365 uint32_t context = 0;
2366
2367 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2368
2369 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2370 return (EBUSY);
2371 }
2372
2373 /* Zero out the MFI frame */
2374 context = cm->cm_frame->header.context;
2375 bzero(cm->cm_frame, sizeof(union mfi_frame));
2376 cm->cm_frame->header.context = context;
2377
2378 abort = &cm->cm_frame->abort;
2379 abort->header.cmd = MFI_CMD_ABORT;
2380 abort->header.flags = 0;
2381 abort->header.scsi_status = 0;
2382 abort->abort_context = cm_abort->cm_frame->header.context;
2383 abort->abort_mfi_addr_lo = (uint32_t)cm_abort->cm_frame_busaddr;
2384 abort->abort_mfi_addr_hi =
2385 (uint32_t)((uint64_t)cm_abort->cm_frame_busaddr >> 32);
2386 cm->cm_data = NULL;
2387 cm->cm_flags = MFI_CMD_POLLED;
2388
2389 if (sc->mfi_aen_cm)
2388 sc->mfi_aen_cm->cm_aen_abort = 1;
2390 sc->cm_aen_abort = 1;
2391 if (sc->mfi_map_sync_cm)
2392 sc->cm_map_abort = 1;
2389 mfi_mapcmd(sc, cm);
2390 mfi_release_command(cm);
2391
2392 while (i < 5 && sc->mfi_aen_cm != NULL) {
2393 msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort",
2394 5 * hz);
2395 i++;
2396 }
2393 mfi_mapcmd(sc, cm);
2394 mfi_release_command(cm);
2395
2396 while (i < 5 && sc->mfi_aen_cm != NULL) {
2397 msleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort",
2398 5 * hz);
2399 i++;
2400 }
2401 while (i < 5 && sc->mfi_map_sync_cm != NULL) {
2402 msleep(&sc->mfi_map_sync_cm, &sc->mfi_io_lock, 0, "mfiabort",
2403 5 * hz);
2404 i++;
2405 }
2397
2398 return (0);
2399}
2400
2401int
2402mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2403 int len)
2404{
2405 struct mfi_command *cm;
2406 struct mfi_io_frame *io;
2407 int error;
2408 uint32_t context = 0;
2409
2410 if ((cm = mfi_dequeue_free(sc)) == NULL)
2411 return (EBUSY);
2412
2413 /* Zero out the MFI frame */
2414 context = cm->cm_frame->header.context;
2415 bzero(cm->cm_frame, sizeof(union mfi_frame));
2416 cm->cm_frame->header.context = context;
2417
2418 io = &cm->cm_frame->io;
2419 io->header.cmd = MFI_CMD_LD_WRITE;
2420 io->header.target_id = id;
2421 io->header.timeout = 0;
2422 io->header.flags = 0;
2423 io->header.scsi_status = 0;
2424 io->header.sense_len = MFI_SENSE_LEN;
2425 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2426 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2427 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2428 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2429 io->lba_lo = lba & 0xffffffff;
2430 cm->cm_data = virt;
2431 cm->cm_len = len;
2432 cm->cm_sg = &io->sgl;
2433 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2434 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2435
2436 error = mfi_mapcmd(sc, cm);
2437 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2438 BUS_DMASYNC_POSTWRITE);
2439 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2440 mfi_release_command(cm);
2441
2442 return (error);
2443}
2444
2445int
2446mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2447 int len)
2448{
2449 struct mfi_command *cm;
2450 struct mfi_pass_frame *pass;
2451 int error;
2452 int blkcount = 0;
2453
2454 if ((cm = mfi_dequeue_free(sc)) == NULL)
2455 return (EBUSY);
2456
2457 pass = &cm->cm_frame->pass;
2458 bzero(pass->cdb, 16);
2459 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2460 pass->cdb[0] = SCSI_WRITE;
2461 pass->cdb[2] = (lba & 0xff000000) >> 24;
2462 pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2463 pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2464 pass->cdb[5] = (lba & 0x000000ff);
2465 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2466 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2467 pass->cdb[8] = (blkcount & 0x00ff);
2468 pass->header.target_id = id;
2469 pass->header.timeout = 0;
2470 pass->header.flags = 0;
2471 pass->header.scsi_status = 0;
2472 pass->header.sense_len = MFI_SENSE_LEN;
2473 pass->header.data_len = len;
2474 pass->header.cdb_len = 10;
2475 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2476 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2477 cm->cm_data = virt;
2478 cm->cm_len = len;
2479 cm->cm_sg = &pass->sgl;
2480 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2481 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2482
2483 error = mfi_mapcmd(sc, cm);
2484 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2485 BUS_DMASYNC_POSTWRITE);
2486 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2487 mfi_release_command(cm);
2488
2489 return (error);
2490}
2491
2492static int
2493mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2494{
2495 struct mfi_softc *sc;
2496 int error;
2497
2498 sc = dev->si_drv1;
2499
2500 mtx_lock(&sc->mfi_io_lock);
2501 if (sc->mfi_detaching)
2502 error = ENXIO;
2503 else {
2504 sc->mfi_flags |= MFI_FLAGS_OPEN;
2505 error = 0;
2506 }
2507 mtx_unlock(&sc->mfi_io_lock);
2508
2509 return (error);
2510}
2511
2512static int
2513mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2514{
2515 struct mfi_softc *sc;
2516 struct mfi_aen *mfi_aen_entry, *tmp;
2517
2518 sc = dev->si_drv1;
2519
2520 mtx_lock(&sc->mfi_io_lock);
2521 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2522
2523 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2524 if (mfi_aen_entry->p == curproc) {
2525 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2526 aen_link);
2527 free(mfi_aen_entry, M_MFIBUF);
2528 }
2529 }
2530 mtx_unlock(&sc->mfi_io_lock);
2531 return (0);
2532}
2533
2534static int
2535mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2536{
2537
2538 switch (opcode) {
2539 case MFI_DCMD_LD_DELETE:
2540 case MFI_DCMD_CFG_ADD:
2541 case MFI_DCMD_CFG_CLEAR:
2542 sx_xlock(&sc->mfi_config_lock);
2543 return (1);
2544 default:
2545 return (0);
2546 }
2547}
2548
2549static void
2550mfi_config_unlock(struct mfi_softc *sc, int locked)
2551{
2552
2553 if (locked)
2554 sx_xunlock(&sc->mfi_config_lock);
2555}
2556
2557/*
2558 * Perform pre-issue checks on commands from userland and possibly veto
2559 * them.
2560 */
2561static int
2562mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2563{
2564 struct mfi_disk *ld, *ld2;
2565 int error;
2566 struct mfi_system_pd *syspd = NULL;
2567 uint16_t syspd_id;
2568 uint16_t *mbox;
2569
2570 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2571 error = 0;
2572 switch (cm->cm_frame->dcmd.opcode) {
2573 case MFI_DCMD_LD_DELETE:
2574 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2575 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2576 break;
2577 }
2578 if (ld == NULL)
2579 error = ENOENT;
2580 else
2581 error = mfi_disk_disable(ld);
2582 break;
2583 case MFI_DCMD_CFG_CLEAR:
2584 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2585 error = mfi_disk_disable(ld);
2586 if (error)
2587 break;
2588 }
2589 if (error) {
2590 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2591 if (ld2 == ld)
2592 break;
2593 mfi_disk_enable(ld2);
2594 }
2595 }
2596 break;
2597 case MFI_DCMD_PD_STATE_SET:
2598 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2599 syspd_id = mbox[0];
2600 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2601 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2602 if (syspd->pd_id == syspd_id)
2603 break;
2604 }
2605 }
2606 else
2607 break;
2608 if (syspd)
2609 error = mfi_syspd_disable(syspd);
2610 break;
2611 default:
2612 break;
2613 }
2614 return (error);
2615}
2616
2617/* Perform post-issue checks on commands from userland. */
2618static void
2619mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2620{
2621 struct mfi_disk *ld, *ldn;
2622 struct mfi_system_pd *syspd = NULL;
2623 uint16_t syspd_id;
2624 uint16_t *mbox;
2625
2626 switch (cm->cm_frame->dcmd.opcode) {
2627 case MFI_DCMD_LD_DELETE:
2628 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2629 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2630 break;
2631 }
2632 KASSERT(ld != NULL, ("volume dissappeared"));
2633 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2634 mtx_unlock(&sc->mfi_io_lock);
2635 mtx_lock(&Giant);
2636 device_delete_child(sc->mfi_dev, ld->ld_dev);
2637 mtx_unlock(&Giant);
2638 mtx_lock(&sc->mfi_io_lock);
2639 } else
2640 mfi_disk_enable(ld);
2641 break;
2642 case MFI_DCMD_CFG_CLEAR:
2643 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2644 mtx_unlock(&sc->mfi_io_lock);
2645 mtx_lock(&Giant);
2646 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2647 device_delete_child(sc->mfi_dev, ld->ld_dev);
2648 }
2649 mtx_unlock(&Giant);
2650 mtx_lock(&sc->mfi_io_lock);
2651 } else {
2652 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2653 mfi_disk_enable(ld);
2654 }
2655 break;
2656 case MFI_DCMD_CFG_ADD:
2657 mfi_ldprobe(sc);
2658 break;
2659 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2660 mfi_ldprobe(sc);
2661 break;
2662 case MFI_DCMD_PD_STATE_SET:
2663 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2664 syspd_id = mbox[0];
2665 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2666 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2667 if (syspd->pd_id == syspd_id)
2668 break;
2669 }
2670 }
2671 else
2672 break;
2673 /* If the transition fails then enable the syspd again */
2674 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2675 mfi_syspd_enable(syspd);
2676 break;
2677 }
2678}
2679
2680static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2681{
2682 struct mfi_config_data *conf_data=(struct mfi_config_data *)cm->cm_data;
2683 struct mfi_command *ld_cm = NULL;
2684 struct mfi_ld_info *ld_info = NULL;
2685 int error = 0;
2686
2687 if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2688 (conf_data->ld[0].params.isSSCD == 1)){
2689 error = 1;
2690 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2691 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2692 (void **)&ld_info, sizeof(*ld_info));
2693 if (error){
2694 device_printf(sc->mfi_dev, "Failed to allocate"
2695 "MFI_DCMD_LD_GET_INFO %d", error);
2696 if (ld_info)
2697 free(ld_info, M_MFIBUF);
2698 return 0;
2699 }
2700 ld_cm->cm_flags = MFI_CMD_DATAIN;
2701 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2702 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2703 if (mfi_wait_command(sc, ld_cm) != 0){
2704 device_printf(sc->mfi_dev, "failed to get log drv\n");
2705 mfi_release_command(ld_cm);
2706 free(ld_info, M_MFIBUF);
2707 return 0;
2708 }
2709
2710 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2711 free(ld_info, M_MFIBUF);
2712 mfi_release_command(ld_cm);
2713 return 0;
2714 }
2715 else
2716 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2717
2718 if (ld_info->ld_config.params.isSSCD == 1)
2719 error = 1;
2720
2721 mfi_release_command(ld_cm);
2722 free(ld_info, M_MFIBUF);
2723
2724 }
2725 return error;
2726}
2727
2728static int
2729mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2730{
2731 uint8_t i;
2732 struct mfi_ioc_packet *ioc;
2733 ioc = (struct mfi_ioc_packet *)arg;
2734 int sge_size, error;
2735 struct megasas_sge *kern_sge;
2736
2737 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2738 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2739 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2740
2741 if (sizeof(bus_addr_t) == 8) {
2742 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2743 cm->cm_extra_frames = 2;
2744 sge_size = sizeof(struct mfi_sg64);
2745 } else {
2746 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2747 sge_size = sizeof(struct mfi_sg32);
2748 }
2749
2750 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2751 for (i = 0; i < ioc->mfi_sge_count; i++) {
2752 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2753 1, 0, /* algnmnt, boundary */
2754 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2755 BUS_SPACE_MAXADDR, /* highaddr */
2756 NULL, NULL, /* filter, filterarg */
2757 ioc->mfi_sgl[i].iov_len,/* maxsize */
2758 2, /* nsegments */
2759 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2760 BUS_DMA_ALLOCNOW, /* flags */
2761 NULL, NULL, /* lockfunc, lockarg */
2762 &sc->mfi_kbuff_arr_dmat[i])) {
2763 device_printf(sc->mfi_dev,
2764 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2765 return (ENOMEM);
2766 }
2767
2768 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2769 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2770 &sc->mfi_kbuff_arr_dmamap[i])) {
2771 device_printf(sc->mfi_dev,
2772 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2773 return (ENOMEM);
2774 }
2775
2776 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2777 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2778 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2779 &sc->mfi_kbuff_arr_busaddr[i], 0);
2780
2781 if (!sc->kbuff_arr[i]) {
2782 device_printf(sc->mfi_dev,
2783 "Could not allocate memory for kbuff_arr info\n");
2784 return -1;
2785 }
2786 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2787 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2788
2789 if (sizeof(bus_addr_t) == 8) {
2790 cm->cm_frame->stp.sgl.sg64[i].addr =
2791 kern_sge[i].phys_addr;
2792 cm->cm_frame->stp.sgl.sg64[i].len =
2793 ioc->mfi_sgl[i].iov_len;
2794 } else {
2795 cm->cm_frame->stp.sgl.sg32[i].len =
2796 kern_sge[i].phys_addr;
2797 cm->cm_frame->stp.sgl.sg32[i].len =
2798 ioc->mfi_sgl[i].iov_len;
2799 }
2800
2801 error = copyin(ioc->mfi_sgl[i].iov_base,
2802 sc->kbuff_arr[i],
2803 ioc->mfi_sgl[i].iov_len);
2804 if (error != 0) {
2805 device_printf(sc->mfi_dev, "Copy in failed\n");
2806 return error;
2807 }
2808 }
2809
2810 cm->cm_flags |=MFI_CMD_MAPPED;
2811 return 0;
2812}
2813
2814static int
2815mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2816{
2817 struct mfi_command *cm;
2818 struct mfi_dcmd_frame *dcmd;
2819 void *ioc_buf = NULL;
2820 uint32_t context;
2821 int error = 0, locked;
2822
2823
2824 if (ioc->buf_size > 0) {
2825 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2826 if (ioc_buf == NULL) {
2827 return (ENOMEM);
2828 }
2829 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2830 if (error) {
2831 device_printf(sc->mfi_dev, "failed to copyin\n");
2832 free(ioc_buf, M_MFIBUF);
2833 return (error);
2834 }
2835 }
2836
2837 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2838
2839 mtx_lock(&sc->mfi_io_lock);
2840 while ((cm = mfi_dequeue_free(sc)) == NULL)
2841 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2842
2843 /* Save context for later */
2844 context = cm->cm_frame->header.context;
2845
2846 dcmd = &cm->cm_frame->dcmd;
2847 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2848
2849 cm->cm_sg = &dcmd->sgl;
2850 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2851 cm->cm_data = ioc_buf;
2852 cm->cm_len = ioc->buf_size;
2853
2854 /* restore context */
2855 cm->cm_frame->header.context = context;
2856
2857 /* Cheat since we don't know if we're writing or reading */
2858 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2859
2860 error = mfi_check_command_pre(sc, cm);
2861 if (error)
2862 goto out;
2863
2864 error = mfi_wait_command(sc, cm);
2865 if (error) {
2866 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2867 goto out;
2868 }
2869 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2870 mfi_check_command_post(sc, cm);
2871out:
2872 mfi_release_command(cm);
2873 mtx_unlock(&sc->mfi_io_lock);
2874 mfi_config_unlock(sc, locked);
2875 if (ioc->buf_size > 0)
2876 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2877 if (ioc_buf)
2878 free(ioc_buf, M_MFIBUF);
2879 return (error);
2880}
2881
2882#define PTRIN(p) ((void *)(uintptr_t)(p))
2883
2884static int
2885mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2886{
2887 struct mfi_softc *sc;
2888 union mfi_statrequest *ms;
2889 struct mfi_ioc_packet *ioc;
2890#ifdef COMPAT_FREEBSD32
2891 struct mfi_ioc_packet32 *ioc32;
2892#endif
2893 struct mfi_ioc_aen *aen;
2894 struct mfi_command *cm = NULL;
2895 uint32_t context = 0;
2896 union mfi_sense_ptr sense_ptr;
2897 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
2898 size_t len;
2899 int i, res;
2900 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2901#ifdef COMPAT_FREEBSD32
2902 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
2903 struct mfi_ioc_passthru iop_swab;
2904#endif
2905 int error, locked;
2906 union mfi_sgl *sgl;
2907 sc = dev->si_drv1;
2908 error = 0;
2909
2910 if (sc->adpreset)
2911 return EBUSY;
2912
2913 if (sc->hw_crit_error)
2914 return EBUSY;
2915
2916 if (sc->issuepend_done == 0)
2917 return EBUSY;
2918
2919 switch (cmd) {
2920 case MFIIO_STATS:
2921 ms = (union mfi_statrequest *)arg;
2922 switch (ms->ms_item) {
2923 case MFIQ_FREE:
2924 case MFIQ_BIO:
2925 case MFIQ_READY:
2926 case MFIQ_BUSY:
2927 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2928 sizeof(struct mfi_qstat));
2929 break;
2930 default:
2931 error = ENOIOCTL;
2932 break;
2933 }
2934 break;
2935 case MFIIO_QUERY_DISK:
2936 {
2937 struct mfi_query_disk *qd;
2938 struct mfi_disk *ld;
2939
2940 qd = (struct mfi_query_disk *)arg;
2941 mtx_lock(&sc->mfi_io_lock);
2942 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2943 if (ld->ld_id == qd->array_id)
2944 break;
2945 }
2946 if (ld == NULL) {
2947 qd->present = 0;
2948 mtx_unlock(&sc->mfi_io_lock);
2949 return (0);
2950 }
2951 qd->present = 1;
2952 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2953 qd->open = 1;
2954 bzero(qd->devname, SPECNAMELEN + 1);
2955 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2956 mtx_unlock(&sc->mfi_io_lock);
2957 break;
2958 }
2959 case MFI_CMD:
2960#ifdef COMPAT_FREEBSD32
2961 case MFI_CMD32:
2962#endif
2963 {
2964 devclass_t devclass;
2965 ioc = (struct mfi_ioc_packet *)arg;
2966 int adapter;
2967
2968 adapter = ioc->mfi_adapter_no;
2969 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2970 devclass = devclass_find("mfi");
2971 sc = devclass_get_softc(devclass, adapter);
2972 }
2973 mtx_lock(&sc->mfi_io_lock);
2974 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2975 mtx_unlock(&sc->mfi_io_lock);
2976 return (EBUSY);
2977 }
2978 mtx_unlock(&sc->mfi_io_lock);
2979 locked = 0;
2980
2981 /*
2982 * save off original context since copying from user
2983 * will clobber some data
2984 */
2985 context = cm->cm_frame->header.context;
2986 cm->cm_frame->header.context = cm->cm_index;
2987
2988 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2989 2 * MEGAMFI_FRAME_SIZE);
2990 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2991 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2992 cm->cm_frame->header.scsi_status = 0;
2993 cm->cm_frame->header.pad0 = 0;
2994 if (ioc->mfi_sge_count) {
2995 cm->cm_sg =
2996 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
2997 }
2998 sgl = cm->cm_sg;
2999 cm->cm_flags = 0;
3000 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3001 cm->cm_flags |= MFI_CMD_DATAIN;
3002 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3003 cm->cm_flags |= MFI_CMD_DATAOUT;
3004 /* Legacy app shim */
3005 if (cm->cm_flags == 0)
3006 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3007 cm->cm_len = cm->cm_frame->header.data_len;
3008 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3009#ifdef COMPAT_FREEBSD32
3010 if (cmd == MFI_CMD) {
3011#endif
3012 /* Native */
3013 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3014#ifdef COMPAT_FREEBSD32
3015 } else {
3016 /* 32bit on 64bit */
3017 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3018 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3019 }
3020#endif
3021 cm->cm_len += cm->cm_stp_len;
3022 }
3023 if (cm->cm_len &&
3024 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3025 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3026 M_WAITOK | M_ZERO);
3027 if (cm->cm_data == NULL) {
3028 device_printf(sc->mfi_dev, "Malloc failed\n");
3029 goto out;
3030 }
3031 } else {
3032 cm->cm_data = 0;
3033 }
3034
3035 /* restore header context */
3036 cm->cm_frame->header.context = context;
3037
3038 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3039 res = mfi_stp_cmd(sc, cm, arg);
3040 if (res != 0)
3041 goto out;
3042 } else {
3043 temp = data;
3044 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3045 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3046 for (i = 0; i < ioc->mfi_sge_count; i++) {
3047#ifdef COMPAT_FREEBSD32
3048 if (cmd == MFI_CMD) {
3049#endif
3050 /* Native */
3051 addr = ioc->mfi_sgl[i].iov_base;
3052 len = ioc->mfi_sgl[i].iov_len;
3053#ifdef COMPAT_FREEBSD32
3054 } else {
3055 /* 32bit on 64bit */
3056 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3057 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3058 len = ioc32->mfi_sgl[i].iov_len;
3059 }
3060#endif
3061 error = copyin(addr, temp, len);
3062 if (error != 0) {
3063 device_printf(sc->mfi_dev,
3064 "Copy in failed\n");
3065 goto out;
3066 }
3067 temp = &temp[len];
3068 }
3069 }
3070 }
3071
3072 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3073 locked = mfi_config_lock(sc,
3074 cm->cm_frame->dcmd.opcode);
3075
3076 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3077 cm->cm_frame->pass.sense_addr_lo =
3078 (uint32_t)cm->cm_sense_busaddr;
3079 cm->cm_frame->pass.sense_addr_hi =
3080 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3081 }
3082 mtx_lock(&sc->mfi_io_lock);
3083 skip_pre_post = mfi_check_for_sscd (sc, cm);
3084 if (!skip_pre_post) {
3085 error = mfi_check_command_pre(sc, cm);
3086 if (error) {
3087 mtx_unlock(&sc->mfi_io_lock);
3088 goto out;
3089 }
3090 }
3091 if ((error = mfi_wait_command(sc, cm)) != 0) {
3092 device_printf(sc->mfi_dev,
3093 "Controller polled failed\n");
3094 mtx_unlock(&sc->mfi_io_lock);
3095 goto out;
3096 }
3097 if (!skip_pre_post) {
3098 mfi_check_command_post(sc, cm);
3099 }
3100 mtx_unlock(&sc->mfi_io_lock);
3101
3102 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3103 temp = data;
3104 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3105 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3106 for (i = 0; i < ioc->mfi_sge_count; i++) {
3107#ifdef COMPAT_FREEBSD32
3108 if (cmd == MFI_CMD) {
3109#endif
3110 /* Native */
3111 addr = ioc->mfi_sgl[i].iov_base;
3112 len = ioc->mfi_sgl[i].iov_len;
3113#ifdef COMPAT_FREEBSD32
3114 } else {
3115 /* 32bit on 64bit */
3116 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3117 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3118 len = ioc32->mfi_sgl[i].iov_len;
3119 }
3120#endif
3121 error = copyout(temp, addr, len);
3122 if (error != 0) {
3123 device_printf(sc->mfi_dev,
3124 "Copy out failed\n");
3125 goto out;
3126 }
3127 temp = &temp[len];
3128 }
3129 }
3130 }
3131
3132 if (ioc->mfi_sense_len) {
3133 /* get user-space sense ptr then copy out sense */
3134 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3135 &sense_ptr.sense_ptr_data[0],
3136 sizeof(sense_ptr.sense_ptr_data));
3137#ifdef COMPAT_FREEBSD32
3138 if (cmd != MFI_CMD) {
3139 /*
3140 * not 64bit native so zero out any address
3141 * over 32bit */
3142 sense_ptr.addr.high = 0;
3143 }
3144#endif
3145 error = copyout(cm->cm_sense, sense_ptr.user_space,
3146 ioc->mfi_sense_len);
3147 if (error != 0) {
3148 device_printf(sc->mfi_dev,
3149 "Copy out failed\n");
3150 goto out;
3151 }
3152 }
3153
3154 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3155out:
3156 mfi_config_unlock(sc, locked);
3157 if (data)
3158 free(data, M_MFIBUF);
3159 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3160 for (i = 0; i < 2; i++) {
3161 if (sc->kbuff_arr[i]) {
3162 if (sc->mfi_kbuff_arr_busaddr != 0)
3163 bus_dmamap_unload(
3164 sc->mfi_kbuff_arr_dmat[i],
3165 sc->mfi_kbuff_arr_dmamap[i]
3166 );
3167 if (sc->kbuff_arr[i] != NULL)
3168 bus_dmamem_free(
3169 sc->mfi_kbuff_arr_dmat[i],
3170 sc->kbuff_arr[i],
3171 sc->mfi_kbuff_arr_dmamap[i]
3172 );
3173 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3174 bus_dma_tag_destroy(
3175 sc->mfi_kbuff_arr_dmat[i]);
3176 }
3177 }
3178 }
3179 if (cm) {
3180 mtx_lock(&sc->mfi_io_lock);
3181 mfi_release_command(cm);
3182 mtx_unlock(&sc->mfi_io_lock);
3183 }
3184
3185 break;
3186 }
3187 case MFI_SET_AEN:
3188 aen = (struct mfi_ioc_aen *)arg;
3189 error = mfi_aen_register(sc, aen->aen_seq_num,
3190 aen->aen_class_locale);
3191
3192 break;
3193 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3194 {
3195 devclass_t devclass;
3196 struct mfi_linux_ioc_packet l_ioc;
3197 int adapter;
3198
3199 devclass = devclass_find("mfi");
3200 if (devclass == NULL)
3201 return (ENOENT);
3202
3203 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3204 if (error)
3205 return (error);
3206 adapter = l_ioc.lioc_adapter_no;
3207 sc = devclass_get_softc(devclass, adapter);
3208 if (sc == NULL)
3209 return (ENOENT);
3210 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3211 cmd, arg, flag, td));
3212 break;
3213 }
3214 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3215 {
3216 devclass_t devclass;
3217 struct mfi_linux_ioc_aen l_aen;
3218 int adapter;
3219
3220 devclass = devclass_find("mfi");
3221 if (devclass == NULL)
3222 return (ENOENT);
3223
3224 error = copyin(arg, &l_aen, sizeof(l_aen));
3225 if (error)
3226 return (error);
3227 adapter = l_aen.laen_adapter_no;
3228 sc = devclass_get_softc(devclass, adapter);
3229 if (sc == NULL)
3230 return (ENOENT);
3231 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3232 cmd, arg, flag, td));
3233 break;
3234 }
3235#ifdef COMPAT_FREEBSD32
3236 case MFIIO_PASSTHRU32:
3237 iop_swab.ioc_frame = iop32->ioc_frame;
3238 iop_swab.buf_size = iop32->buf_size;
3239 iop_swab.buf = PTRIN(iop32->buf);
3240 iop = &iop_swab;
3241 /* FALLTHROUGH */
3242#endif
3243 case MFIIO_PASSTHRU:
3244 error = mfi_user_command(sc, iop);
3245#ifdef COMPAT_FREEBSD32
3246 if (cmd == MFIIO_PASSTHRU32)
3247 iop32->ioc_frame = iop_swab.ioc_frame;
3248#endif
3249 break;
3250 default:
3251 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3252 error = ENOENT;
3253 break;
3254 }
3255
3256 return (error);
3257}
3258
3259static int
3260mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3261{
3262 struct mfi_softc *sc;
3263 struct mfi_linux_ioc_packet l_ioc;
3264 struct mfi_linux_ioc_aen l_aen;
3265 struct mfi_command *cm = NULL;
3266 struct mfi_aen *mfi_aen_entry;
3267 union mfi_sense_ptr sense_ptr;
3268 uint32_t context = 0;
3269 uint8_t *data = NULL, *temp;
3270 int i;
3271 int error, locked;
3272
3273 sc = dev->si_drv1;
3274 error = 0;
3275 switch (cmd) {
3276 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3277 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3278 if (error != 0)
3279 return (error);
3280
3281 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3282 return (EINVAL);
3283 }
3284
3285 mtx_lock(&sc->mfi_io_lock);
3286 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3287 mtx_unlock(&sc->mfi_io_lock);
3288 return (EBUSY);
3289 }
3290 mtx_unlock(&sc->mfi_io_lock);
3291 locked = 0;
3292
3293 /*
3294 * save off original context since copying from user
3295 * will clobber some data
3296 */
3297 context = cm->cm_frame->header.context;
3298
3299 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3300 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3301 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3302 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3303 cm->cm_frame->header.scsi_status = 0;
3304 cm->cm_frame->header.pad0 = 0;
3305 if (l_ioc.lioc_sge_count)
3306 cm->cm_sg =
3307 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3308 cm->cm_flags = 0;
3309 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3310 cm->cm_flags |= MFI_CMD_DATAIN;
3311 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3312 cm->cm_flags |= MFI_CMD_DATAOUT;
3313 cm->cm_len = cm->cm_frame->header.data_len;
3314 if (cm->cm_len &&
3315 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3316 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3317 M_WAITOK | M_ZERO);
3318 if (cm->cm_data == NULL) {
3319 device_printf(sc->mfi_dev, "Malloc failed\n");
3320 goto out;
3321 }
3322 } else {
3323 cm->cm_data = 0;
3324 }
3325
3326 /* restore header context */
3327 cm->cm_frame->header.context = context;
3328
3329 temp = data;
3330 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3331 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3332 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3333 temp,
3334 l_ioc.lioc_sgl[i].iov_len);
3335 if (error != 0) {
3336 device_printf(sc->mfi_dev,
3337 "Copy in failed\n");
3338 goto out;
3339 }
3340 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3341 }
3342 }
3343
3344 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3345 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3346
3347 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3348 cm->cm_frame->pass.sense_addr_lo =
3349 (uint32_t)cm->cm_sense_busaddr;
3350 cm->cm_frame->pass.sense_addr_hi =
3351 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3352 }
3353
3354 mtx_lock(&sc->mfi_io_lock);
3355 error = mfi_check_command_pre(sc, cm);
3356 if (error) {
3357 mtx_unlock(&sc->mfi_io_lock);
3358 goto out;
3359 }
3360
3361 if ((error = mfi_wait_command(sc, cm)) != 0) {
3362 device_printf(sc->mfi_dev,
3363 "Controller polled failed\n");
3364 mtx_unlock(&sc->mfi_io_lock);
3365 goto out;
3366 }
3367
3368 mfi_check_command_post(sc, cm);
3369 mtx_unlock(&sc->mfi_io_lock);
3370
3371 temp = data;
3372 if (cm->cm_flags & MFI_CMD_DATAIN) {
3373 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3374 error = copyout(temp,
3375 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3376 l_ioc.lioc_sgl[i].iov_len);
3377 if (error != 0) {
3378 device_printf(sc->mfi_dev,
3379 "Copy out failed\n");
3380 goto out;
3381 }
3382 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3383 }
3384 }
3385
3386 if (l_ioc.lioc_sense_len) {
3387 /* get user-space sense ptr then copy out sense */
3388 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3389 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3390 &sense_ptr.sense_ptr_data[0],
3391 sizeof(sense_ptr.sense_ptr_data));
3392#ifdef __amd64__
3393 /*
3394 * only 32bit Linux support so zero out any
3395 * address over 32bit
3396 */
3397 sense_ptr.addr.high = 0;
3398#endif
3399 error = copyout(cm->cm_sense, sense_ptr.user_space,
3400 l_ioc.lioc_sense_len);
3401 if (error != 0) {
3402 device_printf(sc->mfi_dev,
3403 "Copy out failed\n");
3404 goto out;
3405 }
3406 }
3407
3408 error = copyout(&cm->cm_frame->header.cmd_status,
3409 &((struct mfi_linux_ioc_packet*)arg)
3410 ->lioc_frame.hdr.cmd_status,
3411 1);
3412 if (error != 0) {
3413 device_printf(sc->mfi_dev,
3414 "Copy out failed\n");
3415 goto out;
3416 }
3417
3418out:
3419 mfi_config_unlock(sc, locked);
3420 if (data)
3421 free(data, M_MFIBUF);
3422 if (cm) {
3423 mtx_lock(&sc->mfi_io_lock);
3424 mfi_release_command(cm);
3425 mtx_unlock(&sc->mfi_io_lock);
3426 }
3427
3428 return (error);
3429 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3430 error = copyin(arg, &l_aen, sizeof(l_aen));
3431 if (error != 0)
3432 return (error);
3433 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3434 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3435 M_WAITOK);
3436 mtx_lock(&sc->mfi_io_lock);
3437 if (mfi_aen_entry != NULL) {
3438 mfi_aen_entry->p = curproc;
3439 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3440 aen_link);
3441 }
3442 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3443 l_aen.laen_class_locale);
3444
3445 if (error != 0) {
3446 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3447 aen_link);
3448 free(mfi_aen_entry, M_MFIBUF);
3449 }
3450 mtx_unlock(&sc->mfi_io_lock);
3451
3452 return (error);
3453 default:
3454 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3455 error = ENOENT;
3456 break;
3457 }
3458
3459 return (error);
3460}
3461
3462static int
3463mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3464{
3465 struct mfi_softc *sc;
3466 int revents = 0;
3467
3468 sc = dev->si_drv1;
3469
3470 if (poll_events & (POLLIN | POLLRDNORM)) {
3471 if (sc->mfi_aen_triggered != 0) {
3472 revents |= poll_events & (POLLIN | POLLRDNORM);
3473 sc->mfi_aen_triggered = 0;
3474 }
3475 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3476 revents |= POLLERR;
3477 }
3478 }
3479
3480 if (revents == 0) {
3481 if (poll_events & (POLLIN | POLLRDNORM)) {
3482 sc->mfi_poll_waiting = 1;
3483 selrecord(td, &sc->mfi_select);
3484 }
3485 }
3486
3487 return revents;
3488}
3489
3490static void
3491mfi_dump_all(void)
3492{
3493 struct mfi_softc *sc;
3494 struct mfi_command *cm;
3495 devclass_t dc;
3496 time_t deadline;
3497 int timedout;
3498 int i;
3499
3500 dc = devclass_find("mfi");
3501 if (dc == NULL) {
3502 printf("No mfi dev class\n");
3503 return;
3504 }
3505
3506 for (i = 0; ; i++) {
3507 sc = devclass_get_softc(dc, i);
3508 if (sc == NULL)
3509 break;
3510 device_printf(sc->mfi_dev, "Dumping\n\n");
3511 timedout = 0;
3512 deadline = time_uptime - MFI_CMD_TIMEOUT;
3513 mtx_lock(&sc->mfi_io_lock);
3514 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3515 if (cm->cm_timestamp < deadline) {
3516 device_printf(sc->mfi_dev,
3517 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3518 cm, (int)(time_uptime - cm->cm_timestamp));
3519 MFI_PRINT_CMD(cm);
3520 timedout++;
3521 }
3522 }
3523
3524#if 0
3525 if (timedout)
3526 MFI_DUMP_CMDS(SC);
3527#endif
3528
3529 mtx_unlock(&sc->mfi_io_lock);
3530 }
3531
3532 return;
3533}
3534
3535static void
3536mfi_timeout(void *data)
3537{
3538 struct mfi_softc *sc = (struct mfi_softc *)data;
3539 struct mfi_command *cm;
3540 time_t deadline;
3541 int timedout = 0;
3542
3543 deadline = time_uptime - MFI_CMD_TIMEOUT;
3544 if (sc->adpreset == 0) {
3545 if (!mfi_tbolt_reset(sc)) {
3546 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, mfi_timeout, sc);
3547 return;
3548 }
3549 }
3550 mtx_lock(&sc->mfi_io_lock);
3551 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
2406
2407 return (0);
2408}
2409
2410int
2411mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2412 int len)
2413{
2414 struct mfi_command *cm;
2415 struct mfi_io_frame *io;
2416 int error;
2417 uint32_t context = 0;
2418
2419 if ((cm = mfi_dequeue_free(sc)) == NULL)
2420 return (EBUSY);
2421
2422 /* Zero out the MFI frame */
2423 context = cm->cm_frame->header.context;
2424 bzero(cm->cm_frame, sizeof(union mfi_frame));
2425 cm->cm_frame->header.context = context;
2426
2427 io = &cm->cm_frame->io;
2428 io->header.cmd = MFI_CMD_LD_WRITE;
2429 io->header.target_id = id;
2430 io->header.timeout = 0;
2431 io->header.flags = 0;
2432 io->header.scsi_status = 0;
2433 io->header.sense_len = MFI_SENSE_LEN;
2434 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2435 io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2436 io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2437 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2438 io->lba_lo = lba & 0xffffffff;
2439 cm->cm_data = virt;
2440 cm->cm_len = len;
2441 cm->cm_sg = &io->sgl;
2442 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2443 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2444
2445 error = mfi_mapcmd(sc, cm);
2446 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2447 BUS_DMASYNC_POSTWRITE);
2448 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2449 mfi_release_command(cm);
2450
2451 return (error);
2452}
2453
2454int
2455mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2456 int len)
2457{
2458 struct mfi_command *cm;
2459 struct mfi_pass_frame *pass;
2460 int error;
2461 int blkcount = 0;
2462
2463 if ((cm = mfi_dequeue_free(sc)) == NULL)
2464 return (EBUSY);
2465
2466 pass = &cm->cm_frame->pass;
2467 bzero(pass->cdb, 16);
2468 pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2469 pass->cdb[0] = SCSI_WRITE;
2470 pass->cdb[2] = (lba & 0xff000000) >> 24;
2471 pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2472 pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2473 pass->cdb[5] = (lba & 0x000000ff);
2474 blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2475 pass->cdb[7] = (blkcount & 0xff00) >> 8;
2476 pass->cdb[8] = (blkcount & 0x00ff);
2477 pass->header.target_id = id;
2478 pass->header.timeout = 0;
2479 pass->header.flags = 0;
2480 pass->header.scsi_status = 0;
2481 pass->header.sense_len = MFI_SENSE_LEN;
2482 pass->header.data_len = len;
2483 pass->header.cdb_len = 10;
2484 pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2485 pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2486 cm->cm_data = virt;
2487 cm->cm_len = len;
2488 cm->cm_sg = &pass->sgl;
2489 cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2490 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2491
2492 error = mfi_mapcmd(sc, cm);
2493 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2494 BUS_DMASYNC_POSTWRITE);
2495 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2496 mfi_release_command(cm);
2497
2498 return (error);
2499}
2500
2501static int
2502mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2503{
2504 struct mfi_softc *sc;
2505 int error;
2506
2507 sc = dev->si_drv1;
2508
2509 mtx_lock(&sc->mfi_io_lock);
2510 if (sc->mfi_detaching)
2511 error = ENXIO;
2512 else {
2513 sc->mfi_flags |= MFI_FLAGS_OPEN;
2514 error = 0;
2515 }
2516 mtx_unlock(&sc->mfi_io_lock);
2517
2518 return (error);
2519}
2520
2521static int
2522mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2523{
2524 struct mfi_softc *sc;
2525 struct mfi_aen *mfi_aen_entry, *tmp;
2526
2527 sc = dev->si_drv1;
2528
2529 mtx_lock(&sc->mfi_io_lock);
2530 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2531
2532 TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2533 if (mfi_aen_entry->p == curproc) {
2534 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2535 aen_link);
2536 free(mfi_aen_entry, M_MFIBUF);
2537 }
2538 }
2539 mtx_unlock(&sc->mfi_io_lock);
2540 return (0);
2541}
2542
2543static int
2544mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2545{
2546
2547 switch (opcode) {
2548 case MFI_DCMD_LD_DELETE:
2549 case MFI_DCMD_CFG_ADD:
2550 case MFI_DCMD_CFG_CLEAR:
2551 sx_xlock(&sc->mfi_config_lock);
2552 return (1);
2553 default:
2554 return (0);
2555 }
2556}
2557
2558static void
2559mfi_config_unlock(struct mfi_softc *sc, int locked)
2560{
2561
2562 if (locked)
2563 sx_xunlock(&sc->mfi_config_lock);
2564}
2565
2566/*
2567 * Perform pre-issue checks on commands from userland and possibly veto
2568 * them.
2569 */
2570static int
2571mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2572{
2573 struct mfi_disk *ld, *ld2;
2574 int error;
2575 struct mfi_system_pd *syspd = NULL;
2576 uint16_t syspd_id;
2577 uint16_t *mbox;
2578
2579 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2580 error = 0;
2581 switch (cm->cm_frame->dcmd.opcode) {
2582 case MFI_DCMD_LD_DELETE:
2583 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2584 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2585 break;
2586 }
2587 if (ld == NULL)
2588 error = ENOENT;
2589 else
2590 error = mfi_disk_disable(ld);
2591 break;
2592 case MFI_DCMD_CFG_CLEAR:
2593 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2594 error = mfi_disk_disable(ld);
2595 if (error)
2596 break;
2597 }
2598 if (error) {
2599 TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2600 if (ld2 == ld)
2601 break;
2602 mfi_disk_enable(ld2);
2603 }
2604 }
2605 break;
2606 case MFI_DCMD_PD_STATE_SET:
2607 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2608 syspd_id = mbox[0];
2609 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2610 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2611 if (syspd->pd_id == syspd_id)
2612 break;
2613 }
2614 }
2615 else
2616 break;
2617 if (syspd)
2618 error = mfi_syspd_disable(syspd);
2619 break;
2620 default:
2621 break;
2622 }
2623 return (error);
2624}
2625
2626/* Perform post-issue checks on commands from userland. */
2627static void
2628mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2629{
2630 struct mfi_disk *ld, *ldn;
2631 struct mfi_system_pd *syspd = NULL;
2632 uint16_t syspd_id;
2633 uint16_t *mbox;
2634
2635 switch (cm->cm_frame->dcmd.opcode) {
2636 case MFI_DCMD_LD_DELETE:
2637 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2638 if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2639 break;
2640 }
2641 KASSERT(ld != NULL, ("volume dissappeared"));
2642 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2643 mtx_unlock(&sc->mfi_io_lock);
2644 mtx_lock(&Giant);
2645 device_delete_child(sc->mfi_dev, ld->ld_dev);
2646 mtx_unlock(&Giant);
2647 mtx_lock(&sc->mfi_io_lock);
2648 } else
2649 mfi_disk_enable(ld);
2650 break;
2651 case MFI_DCMD_CFG_CLEAR:
2652 if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2653 mtx_unlock(&sc->mfi_io_lock);
2654 mtx_lock(&Giant);
2655 TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2656 device_delete_child(sc->mfi_dev, ld->ld_dev);
2657 }
2658 mtx_unlock(&Giant);
2659 mtx_lock(&sc->mfi_io_lock);
2660 } else {
2661 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2662 mfi_disk_enable(ld);
2663 }
2664 break;
2665 case MFI_DCMD_CFG_ADD:
2666 mfi_ldprobe(sc);
2667 break;
2668 case MFI_DCMD_CFG_FOREIGN_IMPORT:
2669 mfi_ldprobe(sc);
2670 break;
2671 case MFI_DCMD_PD_STATE_SET:
2672 mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2673 syspd_id = mbox[0];
2674 if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2675 TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2676 if (syspd->pd_id == syspd_id)
2677 break;
2678 }
2679 }
2680 else
2681 break;
2682 /* If the transition fails then enable the syspd again */
2683 if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2684 mfi_syspd_enable(syspd);
2685 break;
2686 }
2687}
2688
2689static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2690{
2691 struct mfi_config_data *conf_data=(struct mfi_config_data *)cm->cm_data;
2692 struct mfi_command *ld_cm = NULL;
2693 struct mfi_ld_info *ld_info = NULL;
2694 int error = 0;
2695
2696 if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2697 (conf_data->ld[0].params.isSSCD == 1)){
2698 error = 1;
2699 } else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2700 error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2701 (void **)&ld_info, sizeof(*ld_info));
2702 if (error){
2703 device_printf(sc->mfi_dev, "Failed to allocate"
2704 "MFI_DCMD_LD_GET_INFO %d", error);
2705 if (ld_info)
2706 free(ld_info, M_MFIBUF);
2707 return 0;
2708 }
2709 ld_cm->cm_flags = MFI_CMD_DATAIN;
2710 ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2711 ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2712 if (mfi_wait_command(sc, ld_cm) != 0){
2713 device_printf(sc->mfi_dev, "failed to get log drv\n");
2714 mfi_release_command(ld_cm);
2715 free(ld_info, M_MFIBUF);
2716 return 0;
2717 }
2718
2719 if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2720 free(ld_info, M_MFIBUF);
2721 mfi_release_command(ld_cm);
2722 return 0;
2723 }
2724 else
2725 ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2726
2727 if (ld_info->ld_config.params.isSSCD == 1)
2728 error = 1;
2729
2730 mfi_release_command(ld_cm);
2731 free(ld_info, M_MFIBUF);
2732
2733 }
2734 return error;
2735}
2736
2737static int
2738mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2739{
2740 uint8_t i;
2741 struct mfi_ioc_packet *ioc;
2742 ioc = (struct mfi_ioc_packet *)arg;
2743 int sge_size, error;
2744 struct megasas_sge *kern_sge;
2745
2746 memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2747 kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2748 cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2749
2750 if (sizeof(bus_addr_t) == 8) {
2751 cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2752 cm->cm_extra_frames = 2;
2753 sge_size = sizeof(struct mfi_sg64);
2754 } else {
2755 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2756 sge_size = sizeof(struct mfi_sg32);
2757 }
2758
2759 cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2760 for (i = 0; i < ioc->mfi_sge_count; i++) {
2761 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
2762 1, 0, /* algnmnt, boundary */
2763 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2764 BUS_SPACE_MAXADDR, /* highaddr */
2765 NULL, NULL, /* filter, filterarg */
2766 ioc->mfi_sgl[i].iov_len,/* maxsize */
2767 2, /* nsegments */
2768 ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2769 BUS_DMA_ALLOCNOW, /* flags */
2770 NULL, NULL, /* lockfunc, lockarg */
2771 &sc->mfi_kbuff_arr_dmat[i])) {
2772 device_printf(sc->mfi_dev,
2773 "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2774 return (ENOMEM);
2775 }
2776
2777 if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2778 (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2779 &sc->mfi_kbuff_arr_dmamap[i])) {
2780 device_printf(sc->mfi_dev,
2781 "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2782 return (ENOMEM);
2783 }
2784
2785 bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2786 sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2787 ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2788 &sc->mfi_kbuff_arr_busaddr[i], 0);
2789
2790 if (!sc->kbuff_arr[i]) {
2791 device_printf(sc->mfi_dev,
2792 "Could not allocate memory for kbuff_arr info\n");
2793 return -1;
2794 }
2795 kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2796 kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2797
2798 if (sizeof(bus_addr_t) == 8) {
2799 cm->cm_frame->stp.sgl.sg64[i].addr =
2800 kern_sge[i].phys_addr;
2801 cm->cm_frame->stp.sgl.sg64[i].len =
2802 ioc->mfi_sgl[i].iov_len;
2803 } else {
2804 cm->cm_frame->stp.sgl.sg32[i].len =
2805 kern_sge[i].phys_addr;
2806 cm->cm_frame->stp.sgl.sg32[i].len =
2807 ioc->mfi_sgl[i].iov_len;
2808 }
2809
2810 error = copyin(ioc->mfi_sgl[i].iov_base,
2811 sc->kbuff_arr[i],
2812 ioc->mfi_sgl[i].iov_len);
2813 if (error != 0) {
2814 device_printf(sc->mfi_dev, "Copy in failed\n");
2815 return error;
2816 }
2817 }
2818
2819 cm->cm_flags |=MFI_CMD_MAPPED;
2820 return 0;
2821}
2822
2823static int
2824mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2825{
2826 struct mfi_command *cm;
2827 struct mfi_dcmd_frame *dcmd;
2828 void *ioc_buf = NULL;
2829 uint32_t context;
2830 int error = 0, locked;
2831
2832
2833 if (ioc->buf_size > 0) {
2834 ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2835 if (ioc_buf == NULL) {
2836 return (ENOMEM);
2837 }
2838 error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2839 if (error) {
2840 device_printf(sc->mfi_dev, "failed to copyin\n");
2841 free(ioc_buf, M_MFIBUF);
2842 return (error);
2843 }
2844 }
2845
2846 locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2847
2848 mtx_lock(&sc->mfi_io_lock);
2849 while ((cm = mfi_dequeue_free(sc)) == NULL)
2850 msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2851
2852 /* Save context for later */
2853 context = cm->cm_frame->header.context;
2854
2855 dcmd = &cm->cm_frame->dcmd;
2856 bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2857
2858 cm->cm_sg = &dcmd->sgl;
2859 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2860 cm->cm_data = ioc_buf;
2861 cm->cm_len = ioc->buf_size;
2862
2863 /* restore context */
2864 cm->cm_frame->header.context = context;
2865
2866 /* Cheat since we don't know if we're writing or reading */
2867 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2868
2869 error = mfi_check_command_pre(sc, cm);
2870 if (error)
2871 goto out;
2872
2873 error = mfi_wait_command(sc, cm);
2874 if (error) {
2875 device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2876 goto out;
2877 }
2878 bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2879 mfi_check_command_post(sc, cm);
2880out:
2881 mfi_release_command(cm);
2882 mtx_unlock(&sc->mfi_io_lock);
2883 mfi_config_unlock(sc, locked);
2884 if (ioc->buf_size > 0)
2885 error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2886 if (ioc_buf)
2887 free(ioc_buf, M_MFIBUF);
2888 return (error);
2889}
2890
2891#define PTRIN(p) ((void *)(uintptr_t)(p))
2892
2893static int
2894mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2895{
2896 struct mfi_softc *sc;
2897 union mfi_statrequest *ms;
2898 struct mfi_ioc_packet *ioc;
2899#ifdef COMPAT_FREEBSD32
2900 struct mfi_ioc_packet32 *ioc32;
2901#endif
2902 struct mfi_ioc_aen *aen;
2903 struct mfi_command *cm = NULL;
2904 uint32_t context = 0;
2905 union mfi_sense_ptr sense_ptr;
2906 uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
2907 size_t len;
2908 int i, res;
2909 struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2910#ifdef COMPAT_FREEBSD32
2911 struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
2912 struct mfi_ioc_passthru iop_swab;
2913#endif
2914 int error, locked;
2915 union mfi_sgl *sgl;
2916 sc = dev->si_drv1;
2917 error = 0;
2918
2919 if (sc->adpreset)
2920 return EBUSY;
2921
2922 if (sc->hw_crit_error)
2923 return EBUSY;
2924
2925 if (sc->issuepend_done == 0)
2926 return EBUSY;
2927
2928 switch (cmd) {
2929 case MFIIO_STATS:
2930 ms = (union mfi_statrequest *)arg;
2931 switch (ms->ms_item) {
2932 case MFIQ_FREE:
2933 case MFIQ_BIO:
2934 case MFIQ_READY:
2935 case MFIQ_BUSY:
2936 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2937 sizeof(struct mfi_qstat));
2938 break;
2939 default:
2940 error = ENOIOCTL;
2941 break;
2942 }
2943 break;
2944 case MFIIO_QUERY_DISK:
2945 {
2946 struct mfi_query_disk *qd;
2947 struct mfi_disk *ld;
2948
2949 qd = (struct mfi_query_disk *)arg;
2950 mtx_lock(&sc->mfi_io_lock);
2951 TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2952 if (ld->ld_id == qd->array_id)
2953 break;
2954 }
2955 if (ld == NULL) {
2956 qd->present = 0;
2957 mtx_unlock(&sc->mfi_io_lock);
2958 return (0);
2959 }
2960 qd->present = 1;
2961 if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2962 qd->open = 1;
2963 bzero(qd->devname, SPECNAMELEN + 1);
2964 snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2965 mtx_unlock(&sc->mfi_io_lock);
2966 break;
2967 }
2968 case MFI_CMD:
2969#ifdef COMPAT_FREEBSD32
2970 case MFI_CMD32:
2971#endif
2972 {
2973 devclass_t devclass;
2974 ioc = (struct mfi_ioc_packet *)arg;
2975 int adapter;
2976
2977 adapter = ioc->mfi_adapter_no;
2978 if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2979 devclass = devclass_find("mfi");
2980 sc = devclass_get_softc(devclass, adapter);
2981 }
2982 mtx_lock(&sc->mfi_io_lock);
2983 if ((cm = mfi_dequeue_free(sc)) == NULL) {
2984 mtx_unlock(&sc->mfi_io_lock);
2985 return (EBUSY);
2986 }
2987 mtx_unlock(&sc->mfi_io_lock);
2988 locked = 0;
2989
2990 /*
2991 * save off original context since copying from user
2992 * will clobber some data
2993 */
2994 context = cm->cm_frame->header.context;
2995 cm->cm_frame->header.context = cm->cm_index;
2996
2997 bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2998 2 * MEGAMFI_FRAME_SIZE);
2999 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3000 * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3001 cm->cm_frame->header.scsi_status = 0;
3002 cm->cm_frame->header.pad0 = 0;
3003 if (ioc->mfi_sge_count) {
3004 cm->cm_sg =
3005 (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3006 }
3007 sgl = cm->cm_sg;
3008 cm->cm_flags = 0;
3009 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3010 cm->cm_flags |= MFI_CMD_DATAIN;
3011 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3012 cm->cm_flags |= MFI_CMD_DATAOUT;
3013 /* Legacy app shim */
3014 if (cm->cm_flags == 0)
3015 cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3016 cm->cm_len = cm->cm_frame->header.data_len;
3017 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3018#ifdef COMPAT_FREEBSD32
3019 if (cmd == MFI_CMD) {
3020#endif
3021 /* Native */
3022 cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3023#ifdef COMPAT_FREEBSD32
3024 } else {
3025 /* 32bit on 64bit */
3026 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3027 cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3028 }
3029#endif
3030 cm->cm_len += cm->cm_stp_len;
3031 }
3032 if (cm->cm_len &&
3033 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3034 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3035 M_WAITOK | M_ZERO);
3036 if (cm->cm_data == NULL) {
3037 device_printf(sc->mfi_dev, "Malloc failed\n");
3038 goto out;
3039 }
3040 } else {
3041 cm->cm_data = 0;
3042 }
3043
3044 /* restore header context */
3045 cm->cm_frame->header.context = context;
3046
3047 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3048 res = mfi_stp_cmd(sc, cm, arg);
3049 if (res != 0)
3050 goto out;
3051 } else {
3052 temp = data;
3053 if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3054 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3055 for (i = 0; i < ioc->mfi_sge_count; i++) {
3056#ifdef COMPAT_FREEBSD32
3057 if (cmd == MFI_CMD) {
3058#endif
3059 /* Native */
3060 addr = ioc->mfi_sgl[i].iov_base;
3061 len = ioc->mfi_sgl[i].iov_len;
3062#ifdef COMPAT_FREEBSD32
3063 } else {
3064 /* 32bit on 64bit */
3065 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3066 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3067 len = ioc32->mfi_sgl[i].iov_len;
3068 }
3069#endif
3070 error = copyin(addr, temp, len);
3071 if (error != 0) {
3072 device_printf(sc->mfi_dev,
3073 "Copy in failed\n");
3074 goto out;
3075 }
3076 temp = &temp[len];
3077 }
3078 }
3079 }
3080
3081 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3082 locked = mfi_config_lock(sc,
3083 cm->cm_frame->dcmd.opcode);
3084
3085 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3086 cm->cm_frame->pass.sense_addr_lo =
3087 (uint32_t)cm->cm_sense_busaddr;
3088 cm->cm_frame->pass.sense_addr_hi =
3089 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3090 }
3091 mtx_lock(&sc->mfi_io_lock);
3092 skip_pre_post = mfi_check_for_sscd (sc, cm);
3093 if (!skip_pre_post) {
3094 error = mfi_check_command_pre(sc, cm);
3095 if (error) {
3096 mtx_unlock(&sc->mfi_io_lock);
3097 goto out;
3098 }
3099 }
3100 if ((error = mfi_wait_command(sc, cm)) != 0) {
3101 device_printf(sc->mfi_dev,
3102 "Controller polled failed\n");
3103 mtx_unlock(&sc->mfi_io_lock);
3104 goto out;
3105 }
3106 if (!skip_pre_post) {
3107 mfi_check_command_post(sc, cm);
3108 }
3109 mtx_unlock(&sc->mfi_io_lock);
3110
3111 if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3112 temp = data;
3113 if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3114 (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3115 for (i = 0; i < ioc->mfi_sge_count; i++) {
3116#ifdef COMPAT_FREEBSD32
3117 if (cmd == MFI_CMD) {
3118#endif
3119 /* Native */
3120 addr = ioc->mfi_sgl[i].iov_base;
3121 len = ioc->mfi_sgl[i].iov_len;
3122#ifdef COMPAT_FREEBSD32
3123 } else {
3124 /* 32bit on 64bit */
3125 ioc32 = (struct mfi_ioc_packet32 *)ioc;
3126 addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3127 len = ioc32->mfi_sgl[i].iov_len;
3128 }
3129#endif
3130 error = copyout(temp, addr, len);
3131 if (error != 0) {
3132 device_printf(sc->mfi_dev,
3133 "Copy out failed\n");
3134 goto out;
3135 }
3136 temp = &temp[len];
3137 }
3138 }
3139 }
3140
3141 if (ioc->mfi_sense_len) {
3142 /* get user-space sense ptr then copy out sense */
3143 bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3144 &sense_ptr.sense_ptr_data[0],
3145 sizeof(sense_ptr.sense_ptr_data));
3146#ifdef COMPAT_FREEBSD32
3147 if (cmd != MFI_CMD) {
3148 /*
3149 * not 64bit native so zero out any address
3150 * over 32bit */
3151 sense_ptr.addr.high = 0;
3152 }
3153#endif
3154 error = copyout(cm->cm_sense, sense_ptr.user_space,
3155 ioc->mfi_sense_len);
3156 if (error != 0) {
3157 device_printf(sc->mfi_dev,
3158 "Copy out failed\n");
3159 goto out;
3160 }
3161 }
3162
3163 ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3164out:
3165 mfi_config_unlock(sc, locked);
3166 if (data)
3167 free(data, M_MFIBUF);
3168 if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3169 for (i = 0; i < 2; i++) {
3170 if (sc->kbuff_arr[i]) {
3171 if (sc->mfi_kbuff_arr_busaddr != 0)
3172 bus_dmamap_unload(
3173 sc->mfi_kbuff_arr_dmat[i],
3174 sc->mfi_kbuff_arr_dmamap[i]
3175 );
3176 if (sc->kbuff_arr[i] != NULL)
3177 bus_dmamem_free(
3178 sc->mfi_kbuff_arr_dmat[i],
3179 sc->kbuff_arr[i],
3180 sc->mfi_kbuff_arr_dmamap[i]
3181 );
3182 if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3183 bus_dma_tag_destroy(
3184 sc->mfi_kbuff_arr_dmat[i]);
3185 }
3186 }
3187 }
3188 if (cm) {
3189 mtx_lock(&sc->mfi_io_lock);
3190 mfi_release_command(cm);
3191 mtx_unlock(&sc->mfi_io_lock);
3192 }
3193
3194 break;
3195 }
3196 case MFI_SET_AEN:
3197 aen = (struct mfi_ioc_aen *)arg;
3198 error = mfi_aen_register(sc, aen->aen_seq_num,
3199 aen->aen_class_locale);
3200
3201 break;
3202 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3203 {
3204 devclass_t devclass;
3205 struct mfi_linux_ioc_packet l_ioc;
3206 int adapter;
3207
3208 devclass = devclass_find("mfi");
3209 if (devclass == NULL)
3210 return (ENOENT);
3211
3212 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3213 if (error)
3214 return (error);
3215 adapter = l_ioc.lioc_adapter_no;
3216 sc = devclass_get_softc(devclass, adapter);
3217 if (sc == NULL)
3218 return (ENOENT);
3219 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3220 cmd, arg, flag, td));
3221 break;
3222 }
3223 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3224 {
3225 devclass_t devclass;
3226 struct mfi_linux_ioc_aen l_aen;
3227 int adapter;
3228
3229 devclass = devclass_find("mfi");
3230 if (devclass == NULL)
3231 return (ENOENT);
3232
3233 error = copyin(arg, &l_aen, sizeof(l_aen));
3234 if (error)
3235 return (error);
3236 adapter = l_aen.laen_adapter_no;
3237 sc = devclass_get_softc(devclass, adapter);
3238 if (sc == NULL)
3239 return (ENOENT);
3240 return (mfi_linux_ioctl_int(sc->mfi_cdev,
3241 cmd, arg, flag, td));
3242 break;
3243 }
3244#ifdef COMPAT_FREEBSD32
3245 case MFIIO_PASSTHRU32:
3246 iop_swab.ioc_frame = iop32->ioc_frame;
3247 iop_swab.buf_size = iop32->buf_size;
3248 iop_swab.buf = PTRIN(iop32->buf);
3249 iop = &iop_swab;
3250 /* FALLTHROUGH */
3251#endif
3252 case MFIIO_PASSTHRU:
3253 error = mfi_user_command(sc, iop);
3254#ifdef COMPAT_FREEBSD32
3255 if (cmd == MFIIO_PASSTHRU32)
3256 iop32->ioc_frame = iop_swab.ioc_frame;
3257#endif
3258 break;
3259 default:
3260 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3261 error = ENOENT;
3262 break;
3263 }
3264
3265 return (error);
3266}
3267
3268static int
3269mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3270{
3271 struct mfi_softc *sc;
3272 struct mfi_linux_ioc_packet l_ioc;
3273 struct mfi_linux_ioc_aen l_aen;
3274 struct mfi_command *cm = NULL;
3275 struct mfi_aen *mfi_aen_entry;
3276 union mfi_sense_ptr sense_ptr;
3277 uint32_t context = 0;
3278 uint8_t *data = NULL, *temp;
3279 int i;
3280 int error, locked;
3281
3282 sc = dev->si_drv1;
3283 error = 0;
3284 switch (cmd) {
3285 case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3286 error = copyin(arg, &l_ioc, sizeof(l_ioc));
3287 if (error != 0)
3288 return (error);
3289
3290 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3291 return (EINVAL);
3292 }
3293
3294 mtx_lock(&sc->mfi_io_lock);
3295 if ((cm = mfi_dequeue_free(sc)) == NULL) {
3296 mtx_unlock(&sc->mfi_io_lock);
3297 return (EBUSY);
3298 }
3299 mtx_unlock(&sc->mfi_io_lock);
3300 locked = 0;
3301
3302 /*
3303 * save off original context since copying from user
3304 * will clobber some data
3305 */
3306 context = cm->cm_frame->header.context;
3307
3308 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3309 2 * MFI_DCMD_FRAME_SIZE); /* this isn't quite right */
3310 cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3311 * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3312 cm->cm_frame->header.scsi_status = 0;
3313 cm->cm_frame->header.pad0 = 0;
3314 if (l_ioc.lioc_sge_count)
3315 cm->cm_sg =
3316 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3317 cm->cm_flags = 0;
3318 if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3319 cm->cm_flags |= MFI_CMD_DATAIN;
3320 if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3321 cm->cm_flags |= MFI_CMD_DATAOUT;
3322 cm->cm_len = cm->cm_frame->header.data_len;
3323 if (cm->cm_len &&
3324 (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3325 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3326 M_WAITOK | M_ZERO);
3327 if (cm->cm_data == NULL) {
3328 device_printf(sc->mfi_dev, "Malloc failed\n");
3329 goto out;
3330 }
3331 } else {
3332 cm->cm_data = 0;
3333 }
3334
3335 /* restore header context */
3336 cm->cm_frame->header.context = context;
3337
3338 temp = data;
3339 if (cm->cm_flags & MFI_CMD_DATAOUT) {
3340 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3341 error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3342 temp,
3343 l_ioc.lioc_sgl[i].iov_len);
3344 if (error != 0) {
3345 device_printf(sc->mfi_dev,
3346 "Copy in failed\n");
3347 goto out;
3348 }
3349 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3350 }
3351 }
3352
3353 if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3354 locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3355
3356 if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3357 cm->cm_frame->pass.sense_addr_lo =
3358 (uint32_t)cm->cm_sense_busaddr;
3359 cm->cm_frame->pass.sense_addr_hi =
3360 (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3361 }
3362
3363 mtx_lock(&sc->mfi_io_lock);
3364 error = mfi_check_command_pre(sc, cm);
3365 if (error) {
3366 mtx_unlock(&sc->mfi_io_lock);
3367 goto out;
3368 }
3369
3370 if ((error = mfi_wait_command(sc, cm)) != 0) {
3371 device_printf(sc->mfi_dev,
3372 "Controller polled failed\n");
3373 mtx_unlock(&sc->mfi_io_lock);
3374 goto out;
3375 }
3376
3377 mfi_check_command_post(sc, cm);
3378 mtx_unlock(&sc->mfi_io_lock);
3379
3380 temp = data;
3381 if (cm->cm_flags & MFI_CMD_DATAIN) {
3382 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3383 error = copyout(temp,
3384 PTRIN(l_ioc.lioc_sgl[i].iov_base),
3385 l_ioc.lioc_sgl[i].iov_len);
3386 if (error != 0) {
3387 device_printf(sc->mfi_dev,
3388 "Copy out failed\n");
3389 goto out;
3390 }
3391 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3392 }
3393 }
3394
3395 if (l_ioc.lioc_sense_len) {
3396 /* get user-space sense ptr then copy out sense */
3397 bcopy(&((struct mfi_linux_ioc_packet*)arg)
3398 ->lioc_frame.raw[l_ioc.lioc_sense_off],
3399 &sense_ptr.sense_ptr_data[0],
3400 sizeof(sense_ptr.sense_ptr_data));
3401#ifdef __amd64__
3402 /*
3403 * only 32bit Linux support so zero out any
3404 * address over 32bit
3405 */
3406 sense_ptr.addr.high = 0;
3407#endif
3408 error = copyout(cm->cm_sense, sense_ptr.user_space,
3409 l_ioc.lioc_sense_len);
3410 if (error != 0) {
3411 device_printf(sc->mfi_dev,
3412 "Copy out failed\n");
3413 goto out;
3414 }
3415 }
3416
3417 error = copyout(&cm->cm_frame->header.cmd_status,
3418 &((struct mfi_linux_ioc_packet*)arg)
3419 ->lioc_frame.hdr.cmd_status,
3420 1);
3421 if (error != 0) {
3422 device_printf(sc->mfi_dev,
3423 "Copy out failed\n");
3424 goto out;
3425 }
3426
3427out:
3428 mfi_config_unlock(sc, locked);
3429 if (data)
3430 free(data, M_MFIBUF);
3431 if (cm) {
3432 mtx_lock(&sc->mfi_io_lock);
3433 mfi_release_command(cm);
3434 mtx_unlock(&sc->mfi_io_lock);
3435 }
3436
3437 return (error);
3438 case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3439 error = copyin(arg, &l_aen, sizeof(l_aen));
3440 if (error != 0)
3441 return (error);
3442 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3443 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3444 M_WAITOK);
3445 mtx_lock(&sc->mfi_io_lock);
3446 if (mfi_aen_entry != NULL) {
3447 mfi_aen_entry->p = curproc;
3448 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3449 aen_link);
3450 }
3451 error = mfi_aen_register(sc, l_aen.laen_seq_num,
3452 l_aen.laen_class_locale);
3453
3454 if (error != 0) {
3455 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3456 aen_link);
3457 free(mfi_aen_entry, M_MFIBUF);
3458 }
3459 mtx_unlock(&sc->mfi_io_lock);
3460
3461 return (error);
3462 default:
3463 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3464 error = ENOENT;
3465 break;
3466 }
3467
3468 return (error);
3469}
3470
3471static int
3472mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3473{
3474 struct mfi_softc *sc;
3475 int revents = 0;
3476
3477 sc = dev->si_drv1;
3478
3479 if (poll_events & (POLLIN | POLLRDNORM)) {
3480 if (sc->mfi_aen_triggered != 0) {
3481 revents |= poll_events & (POLLIN | POLLRDNORM);
3482 sc->mfi_aen_triggered = 0;
3483 }
3484 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3485 revents |= POLLERR;
3486 }
3487 }
3488
3489 if (revents == 0) {
3490 if (poll_events & (POLLIN | POLLRDNORM)) {
3491 sc->mfi_poll_waiting = 1;
3492 selrecord(td, &sc->mfi_select);
3493 }
3494 }
3495
3496 return revents;
3497}
3498
3499static void
3500mfi_dump_all(void)
3501{
3502 struct mfi_softc *sc;
3503 struct mfi_command *cm;
3504 devclass_t dc;
3505 time_t deadline;
3506 int timedout;
3507 int i;
3508
3509 dc = devclass_find("mfi");
3510 if (dc == NULL) {
3511 printf("No mfi dev class\n");
3512 return;
3513 }
3514
3515 for (i = 0; ; i++) {
3516 sc = devclass_get_softc(dc, i);
3517 if (sc == NULL)
3518 break;
3519 device_printf(sc->mfi_dev, "Dumping\n\n");
3520 timedout = 0;
3521 deadline = time_uptime - MFI_CMD_TIMEOUT;
3522 mtx_lock(&sc->mfi_io_lock);
3523 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3524 if (cm->cm_timestamp < deadline) {
3525 device_printf(sc->mfi_dev,
3526 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3527 cm, (int)(time_uptime - cm->cm_timestamp));
3528 MFI_PRINT_CMD(cm);
3529 timedout++;
3530 }
3531 }
3532
3533#if 0
3534 if (timedout)
3535 MFI_DUMP_CMDS(SC);
3536#endif
3537
3538 mtx_unlock(&sc->mfi_io_lock);
3539 }
3540
3541 return;
3542}
3543
3544static void
3545mfi_timeout(void *data)
3546{
3547 struct mfi_softc *sc = (struct mfi_softc *)data;
3548 struct mfi_command *cm;
3549 time_t deadline;
3550 int timedout = 0;
3551
3552 deadline = time_uptime - MFI_CMD_TIMEOUT;
3553 if (sc->adpreset == 0) {
3554 if (!mfi_tbolt_reset(sc)) {
3555 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz, mfi_timeout, sc);
3556 return;
3557 }
3558 }
3559 mtx_lock(&sc->mfi_io_lock);
3560 TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3552 if (sc->mfi_aen_cm == cm)
3561 if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3553 continue;
3562 continue;
3554 if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
3563 if (cm->cm_timestamp < deadline) {
3555 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3556 cm->cm_timestamp = time_uptime;
3557 } else {
3558 device_printf(sc->mfi_dev,
3559 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3560 cm, (int)(time_uptime - cm->cm_timestamp)
3561 );
3562 MFI_PRINT_CMD(cm);
3563 MFI_VALIDATE_CMD(sc, cm);
3564 timedout++;
3565 }
3566 }
3567 }
3568
3569#if 0
3570 if (timedout)
3571 MFI_DUMP_CMDS(SC);
3572#endif
3573
3574 mtx_unlock(&sc->mfi_io_lock);
3575
3576 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
3577 mfi_timeout, sc);
3578
3579 if (0)
3580 mfi_dump_all();
3581 return;
3582}
3564 if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3565 cm->cm_timestamp = time_uptime;
3566 } else {
3567 device_printf(sc->mfi_dev,
3568 "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3569 cm, (int)(time_uptime - cm->cm_timestamp)
3570 );
3571 MFI_PRINT_CMD(cm);
3572 MFI_VALIDATE_CMD(sc, cm);
3573 timedout++;
3574 }
3575 }
3576 }
3577
3578#if 0
3579 if (timedout)
3580 MFI_DUMP_CMDS(SC);
3581#endif
3582
3583 mtx_unlock(&sc->mfi_io_lock);
3584
3585 callout_reset(&sc->mfi_watchdog_callout, MFI_CMD_TIMEOUT * hz,
3586 mfi_timeout, sc);
3587
3588 if (0)
3589 mfi_dump_all();
3590 return;
3591}