Deleted Added
full compact
mfi.c (162118) mfi.c (162458)
1/*-
2 * Copyright (c) 2006 IronPort Systems
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2006 IronPort Systems
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/mfi/mfi.c 162118 2006-09-07 18:40:49Z ambrisko $");
28__FBSDID("$FreeBSD: head/sys/dev/mfi/mfi.c 162458 2006-09-20 06:58:02Z scottl $");
29
30#include "opt_mfi.h"
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/sysctl.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/poll.h>
38#include <sys/selinfo.h>
39#include <sys/bus.h>
40#include <sys/conf.h>
41#include <sys/eventhandler.h>
42#include <sys/rman.h>
43#include <sys/bus_dma.h>
44#include <sys/bio.h>
45#include <sys/ioccom.h>
46#include <sys/uio.h>
47#include <sys/proc.h>
48
49#include <machine/bus.h>
50#include <machine/resource.h>
51
52#include <dev/mfi/mfireg.h>
53#include <dev/mfi/mfi_ioctl.h>
54#include <dev/mfi/mfivar.h>
55
56static int mfi_alloc_commands(struct mfi_softc *);
57static void mfi_release_command(struct mfi_command *cm);
58static int mfi_comms_init(struct mfi_softc *);
59static int mfi_polled_command(struct mfi_softc *, struct mfi_command *);
60static int mfi_wait_command(struct mfi_softc *, struct mfi_command *);
61static int mfi_get_controller_info(struct mfi_softc *);
62static int mfi_get_log_state(struct mfi_softc *,
63 struct mfi_evt_log_state **);
64static int mfi_get_entry(struct mfi_softc *, int);
65static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
66 uint32_t, void **, size_t);
67static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
68static void mfi_startup(void *arg);
69static void mfi_intr(void *arg);
70static void mfi_enable_intr(struct mfi_softc *sc);
71static void mfi_ldprobe(struct mfi_softc *sc);
72static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
73static void mfi_aen_complete(struct mfi_command *);
74static int mfi_aen_setup(struct mfi_softc *, uint32_t);
75static int mfi_add_ld(struct mfi_softc *sc, int);
76static void mfi_add_ld_complete(struct mfi_command *);
77static struct mfi_command * mfi_bio_command(struct mfi_softc *);
78static void mfi_bio_complete(struct mfi_command *);
79static int mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
80static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
81static void mfi_complete(struct mfi_softc *, struct mfi_command *);
82static int mfi_abort(struct mfi_softc *, struct mfi_command *);
83static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, d_thread_t *);
84
85
86SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
87static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
88SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
89 0, "event message locale");
90static int mfi_event_class = MFI_EVT_CLASS_DEBUG;
91SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
92 0, "event message class");
93
94/* Management interface */
95static d_open_t mfi_open;
96static d_close_t mfi_close;
97static d_ioctl_t mfi_ioctl;
98static d_poll_t mfi_poll;
99
100static struct cdevsw mfi_cdevsw = {
101 .d_version = D_VERSION,
102 .d_flags = 0,
103 .d_open = mfi_open,
104 .d_close = mfi_close,
105 .d_ioctl = mfi_ioctl,
106 .d_poll = mfi_poll,
107 .d_name = "mfi",
108};
109
110MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
111
112#define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
113
114static int
115mfi_transition_firmware(struct mfi_softc *sc)
116{
117 int32_t fw_state, cur_state;
118 int max_wait, i;
119
120 fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
121 while (fw_state != MFI_FWSTATE_READY) {
122 if (bootverbose)
123 device_printf(sc->mfi_dev, "Waiting for firmware to "
124 "become ready\n");
125 cur_state = fw_state;
126 switch (fw_state) {
127 case MFI_FWSTATE_FAULT:
128 device_printf(sc->mfi_dev, "Firmware fault\n");
129 return (ENXIO);
130 case MFI_FWSTATE_WAIT_HANDSHAKE:
131 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
132 max_wait = 2;
133 break;
134 case MFI_FWSTATE_OPERATIONAL:
135 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
136 max_wait = 10;
137 break;
138 case MFI_FWSTATE_UNDEFINED:
139 case MFI_FWSTATE_BB_INIT:
140 max_wait = 2;
141 break;
142 case MFI_FWSTATE_FW_INIT:
143 case MFI_FWSTATE_DEVICE_SCAN:
144 case MFI_FWSTATE_FLUSH_CACHE:
145 max_wait = 20;
146 break;
147 default:
148 device_printf(sc->mfi_dev,"Unknown firmware state %d\n",
149 fw_state);
150 return (ENXIO);
151 }
152 for (i = 0; i < (max_wait * 10); i++) {
153 fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
154 if (fw_state == cur_state)
155 DELAY(100000);
156 else
157 break;
158 }
159 if (fw_state == cur_state) {
160 device_printf(sc->mfi_dev, "firmware stuck in state "
161 "%#x\n", fw_state);
162 return (ENXIO);
163 }
164 }
165 return (0);
166}
167
168static void
169mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
170{
171 uint32_t *addr;
172
173 addr = arg;
174 *addr = segs[0].ds_addr;
175}
176
177int
178mfi_attach(struct mfi_softc *sc)
179{
180 uint32_t status;
181 int error, commsz, framessz, sensesz;
29
30#include "opt_mfi.h"
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/sysctl.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/poll.h>
38#include <sys/selinfo.h>
39#include <sys/bus.h>
40#include <sys/conf.h>
41#include <sys/eventhandler.h>
42#include <sys/rman.h>
43#include <sys/bus_dma.h>
44#include <sys/bio.h>
45#include <sys/ioccom.h>
46#include <sys/uio.h>
47#include <sys/proc.h>
48
49#include <machine/bus.h>
50#include <machine/resource.h>
51
52#include <dev/mfi/mfireg.h>
53#include <dev/mfi/mfi_ioctl.h>
54#include <dev/mfi/mfivar.h>
55
56static int mfi_alloc_commands(struct mfi_softc *);
57static void mfi_release_command(struct mfi_command *cm);
58static int mfi_comms_init(struct mfi_softc *);
59static int mfi_polled_command(struct mfi_softc *, struct mfi_command *);
60static int mfi_wait_command(struct mfi_softc *, struct mfi_command *);
61static int mfi_get_controller_info(struct mfi_softc *);
62static int mfi_get_log_state(struct mfi_softc *,
63 struct mfi_evt_log_state **);
64static int mfi_get_entry(struct mfi_softc *, int);
65static int mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
66 uint32_t, void **, size_t);
67static void mfi_data_cb(void *, bus_dma_segment_t *, int, int);
68static void mfi_startup(void *arg);
69static void mfi_intr(void *arg);
70static void mfi_enable_intr(struct mfi_softc *sc);
71static void mfi_ldprobe(struct mfi_softc *sc);
72static int mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
73static void mfi_aen_complete(struct mfi_command *);
74static int mfi_aen_setup(struct mfi_softc *, uint32_t);
75static int mfi_add_ld(struct mfi_softc *sc, int);
76static void mfi_add_ld_complete(struct mfi_command *);
77static struct mfi_command * mfi_bio_command(struct mfi_softc *);
78static void mfi_bio_complete(struct mfi_command *);
79static int mfi_mapcmd(struct mfi_softc *, struct mfi_command *);
80static int mfi_send_frame(struct mfi_softc *, struct mfi_command *);
81static void mfi_complete(struct mfi_softc *, struct mfi_command *);
82static int mfi_abort(struct mfi_softc *, struct mfi_command *);
83static int mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, d_thread_t *);
84
85
86SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
87static int mfi_event_locale = MFI_EVT_LOCALE_ALL;
88SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
89 0, "event message locale");
90static int mfi_event_class = MFI_EVT_CLASS_DEBUG;
91SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
92 0, "event message class");
93
94/* Management interface */
95static d_open_t mfi_open;
96static d_close_t mfi_close;
97static d_ioctl_t mfi_ioctl;
98static d_poll_t mfi_poll;
99
100static struct cdevsw mfi_cdevsw = {
101 .d_version = D_VERSION,
102 .d_flags = 0,
103 .d_open = mfi_open,
104 .d_close = mfi_close,
105 .d_ioctl = mfi_ioctl,
106 .d_poll = mfi_poll,
107 .d_name = "mfi",
108};
109
110MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
111
112#define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
113
114static int
115mfi_transition_firmware(struct mfi_softc *sc)
116{
117 int32_t fw_state, cur_state;
118 int max_wait, i;
119
120 fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
121 while (fw_state != MFI_FWSTATE_READY) {
122 if (bootverbose)
123 device_printf(sc->mfi_dev, "Waiting for firmware to "
124 "become ready\n");
125 cur_state = fw_state;
126 switch (fw_state) {
127 case MFI_FWSTATE_FAULT:
128 device_printf(sc->mfi_dev, "Firmware fault\n");
129 return (ENXIO);
130 case MFI_FWSTATE_WAIT_HANDSHAKE:
131 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
132 max_wait = 2;
133 break;
134 case MFI_FWSTATE_OPERATIONAL:
135 MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
136 max_wait = 10;
137 break;
138 case MFI_FWSTATE_UNDEFINED:
139 case MFI_FWSTATE_BB_INIT:
140 max_wait = 2;
141 break;
142 case MFI_FWSTATE_FW_INIT:
143 case MFI_FWSTATE_DEVICE_SCAN:
144 case MFI_FWSTATE_FLUSH_CACHE:
145 max_wait = 20;
146 break;
147 default:
148 device_printf(sc->mfi_dev,"Unknown firmware state %d\n",
149 fw_state);
150 return (ENXIO);
151 }
152 for (i = 0; i < (max_wait * 10); i++) {
153 fw_state = MFI_READ4(sc, MFI_OMSG0) & MFI_FWSTATE_MASK;
154 if (fw_state == cur_state)
155 DELAY(100000);
156 else
157 break;
158 }
159 if (fw_state == cur_state) {
160 device_printf(sc->mfi_dev, "firmware stuck in state "
161 "%#x\n", fw_state);
162 return (ENXIO);
163 }
164 }
165 return (0);
166}
167
168static void
169mfi_addr32_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
170{
171 uint32_t *addr;
172
173 addr = arg;
174 *addr = segs[0].ds_addr;
175}
176
177int
178mfi_attach(struct mfi_softc *sc)
179{
180 uint32_t status;
181 int error, commsz, framessz, sensesz;
182 int frames, unit;
182 int frames, unit, max_fw_sge;
183
184 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
185 TAILQ_INIT(&sc->mfi_ld_tqh);
186 TAILQ_INIT(&sc->mfi_aen_pids);
187
188 mfi_initq_free(sc);
189 mfi_initq_ready(sc);
190 mfi_initq_busy(sc);
191 mfi_initq_bio(sc);
192
193 /* Before we get too far, see if the firmware is working */
194 if ((error = mfi_transition_firmware(sc)) != 0) {
195 device_printf(sc->mfi_dev, "Firmware not in READY state, "
196 "error %d\n", error);
197 return (ENXIO);
198 }
199
200 /*
201 * Get information needed for sizing the contiguous memory for the
202 * frame pool. Size down the sgl parameter since we know that
203 * we will never need more than what's required for MAXPHYS.
204 * It would be nice if these constants were available at runtime
205 * instead of compile time.
206 */
207 status = MFI_READ4(sc, MFI_OMSG0);
208 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
183
184 mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
185 TAILQ_INIT(&sc->mfi_ld_tqh);
186 TAILQ_INIT(&sc->mfi_aen_pids);
187
188 mfi_initq_free(sc);
189 mfi_initq_ready(sc);
190 mfi_initq_busy(sc);
191 mfi_initq_bio(sc);
192
193 /* Before we get too far, see if the firmware is working */
194 if ((error = mfi_transition_firmware(sc)) != 0) {
195 device_printf(sc->mfi_dev, "Firmware not in READY state, "
196 "error %d\n", error);
197 return (ENXIO);
198 }
199
200 /*
201 * Get information needed for sizing the contiguous memory for the
202 * frame pool. Size down the sgl parameter since we know that
203 * we will never need more than what's required for MAXPHYS.
204 * It would be nice if these constants were available at runtime
205 * instead of compile time.
206 */
207 status = MFI_READ4(sc, MFI_OMSG0);
208 sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
209 sc->mfi_max_fw_sgl = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
210 sc->mfi_total_sgl = min(sc->mfi_max_fw_sgl, ((MAXPHYS / PAGE_SIZE) +1));
209 max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
210 sc->mfi_max_sge = min(max_fw_sge, ((MAXPHYS / PAGE_SIZE) + 1));
211
212 /*
213 * Create the dma tag for data buffers. Used both for block I/O
214 * and for various internal data queries.
215 */
216 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
217 1, 0, /* algnmnt, boundary */
218 BUS_SPACE_MAXADDR, /* lowaddr */
219 BUS_SPACE_MAXADDR, /* highaddr */
220 NULL, NULL, /* filter, filterarg */
221 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
211
212 /*
213 * Create the dma tag for data buffers. Used both for block I/O
214 * and for various internal data queries.
215 */
216 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
217 1, 0, /* algnmnt, boundary */
218 BUS_SPACE_MAXADDR, /* lowaddr */
219 BUS_SPACE_MAXADDR, /* highaddr */
220 NULL, NULL, /* filter, filterarg */
221 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
222 sc->mfi_total_sgl, /* nsegments */
222 sc->mfi_max_sge, /* nsegments */
223 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
224 BUS_DMA_ALLOCNOW, /* flags */
225 busdma_lock_mutex, /* lockfunc */
226 &sc->mfi_io_lock, /* lockfuncarg */
227 &sc->mfi_buffer_dmat)) {
228 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
229 return (ENOMEM);
230 }
231
232 /*
233 * Allocate DMA memory for the comms queues. Keep it under 4GB for
234 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
235 * entry, so the calculated size here will be will be 1 more than
236 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
237 */
238 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
239 sizeof(struct mfi_hwcomms);
240 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
241 1, 0, /* algnmnt, boundary */
242 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
243 BUS_SPACE_MAXADDR, /* highaddr */
244 NULL, NULL, /* filter, filterarg */
245 commsz, /* maxsize */
246 1, /* msegments */
247 commsz, /* maxsegsize */
248 0, /* flags */
249 NULL, NULL, /* lockfunc, lockarg */
250 &sc->mfi_comms_dmat)) {
251 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
252 return (ENOMEM);
253 }
254 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
255 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
256 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
257 return (ENOMEM);
258 }
259 bzero(sc->mfi_comms, commsz);
260 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
261 sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
262
263 /*
264 * Allocate DMA memory for the command frames. Keep them in the
223 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
224 BUS_DMA_ALLOCNOW, /* flags */
225 busdma_lock_mutex, /* lockfunc */
226 &sc->mfi_io_lock, /* lockfuncarg */
227 &sc->mfi_buffer_dmat)) {
228 device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
229 return (ENOMEM);
230 }
231
232 /*
233 * Allocate DMA memory for the comms queues. Keep it under 4GB for
234 * efficiency. The mfi_hwcomms struct includes space for 1 reply queue
235 * entry, so the calculated size here will be will be 1 more than
236 * mfi_max_fw_cmds. This is apparently a requirement of the hardware.
237 */
238 commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
239 sizeof(struct mfi_hwcomms);
240 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
241 1, 0, /* algnmnt, boundary */
242 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
243 BUS_SPACE_MAXADDR, /* highaddr */
244 NULL, NULL, /* filter, filterarg */
245 commsz, /* maxsize */
246 1, /* msegments */
247 commsz, /* maxsegsize */
248 0, /* flags */
249 NULL, NULL, /* lockfunc, lockarg */
250 &sc->mfi_comms_dmat)) {
251 device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
252 return (ENOMEM);
253 }
254 if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
255 BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
256 device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
257 return (ENOMEM);
258 }
259 bzero(sc->mfi_comms, commsz);
260 bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
261 sc->mfi_comms, commsz, mfi_addr32_cb, &sc->mfi_comms_busaddr, 0);
262
263 /*
264 * Allocate DMA memory for the command frames. Keep them in the
265 * lower 4GB for efficiency. Calculate the size of the frames at
266 * the same time; the frame is 64 bytes plus space for the SG lists.
265 * lower 4GB for efficiency. Calculate the size of the commands at
266 * the same time; each command is one 64 byte frame plus a set of
267 * additional frames for holding sg lists or other data.
267 * The assumption here is that the SG list will start at the second
268 * The assumption here is that the SG list will start at the second
268 * 64 byte segment of the frame and not use the unused bytes in the
269 * frame. While this might seem wasteful, apparently the frames must
270 * be 64 byte aligned, so any savings would be negated by the extra
271 * alignment padding.
269 * frame and not use the unused bytes in the first frame. While this
270 * isn't technically correct, it simplifies the calculation and allows
271 * for command frames that might be larger than an mfi_io_frame.
272 */
273 if (sizeof(bus_addr_t) == 8) {
272 */
273 if (sizeof(bus_addr_t) == 8) {
274 sc->mfi_sgsize = sizeof(struct mfi_sg64);
274 sc->mfi_sge_size = sizeof(struct mfi_sg64);
275 sc->mfi_flags |= MFI_FLAGS_SG64;
276 } else {
275 sc->mfi_flags |= MFI_FLAGS_SG64;
276 } else {
277 sc->mfi_sgsize = sizeof(struct mfi_sg32);
277 sc->mfi_sge_size = sizeof(struct mfi_sg32);
278 }
278 }
279 frames = (sc->mfi_sgsize * sc->mfi_total_sgl + MFI_FRAME_SIZE - 1) /
280 MFI_FRAME_SIZE + 1;
281 sc->mfi_frame_size = frames * MFI_FRAME_SIZE;
282 framessz = sc->mfi_frame_size * sc->mfi_max_fw_cmds;
279 frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
280 sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
281 framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
283 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
284 64, 0, /* algnmnt, boundary */
285 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
286 BUS_SPACE_MAXADDR, /* highaddr */
287 NULL, NULL, /* filter, filterarg */
288 framessz, /* maxsize */
289 1, /* nsegments */
290 framessz, /* maxsegsize */
291 0, /* flags */
292 NULL, NULL, /* lockfunc, lockarg */
293 &sc->mfi_frames_dmat)) {
294 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
295 return (ENOMEM);
296 }
297 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
298 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
299 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
300 return (ENOMEM);
301 }
302 bzero(sc->mfi_frames, framessz);
303 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
304 sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
305
306 /*
307 * Allocate DMA memory for the frame sense data. Keep them in the
308 * lower 4GB for efficiency
309 */
310 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
311 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
312 4, 0, /* algnmnt, boundary */
313 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
314 BUS_SPACE_MAXADDR, /* highaddr */
315 NULL, NULL, /* filter, filterarg */
316 sensesz, /* maxsize */
317 1, /* nsegments */
318 sensesz, /* maxsegsize */
319 0, /* flags */
320 NULL, NULL, /* lockfunc, lockarg */
321 &sc->mfi_sense_dmat)) {
322 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
323 return (ENOMEM);
324 }
325 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
326 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
327 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
328 return (ENOMEM);
329 }
330 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
331 sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
332
333 if ((error = mfi_alloc_commands(sc)) != 0)
334 return (error);
335
336 if ((error = mfi_comms_init(sc)) != 0)
337 return (error);
338
339 if ((error = mfi_get_controller_info(sc)) != 0)
340 return (error);
341
342 if ((error = mfi_aen_setup(sc, 0), 0) != 0)
343 return (error);
344
345 /*
346 * Set up the interrupt handler. XXX This should happen in
347 * mfi_pci.c
348 */
349 sc->mfi_irq_rid = 0;
350 if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
351 &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
352 device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
353 return (EINVAL);
354 }
355 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
356 mfi_intr, sc, &sc->mfi_intr)) {
357 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
358 return (EINVAL);
359 }
360
361 /* Register a config hook to probe the bus for arrays */
362 sc->mfi_ich.ich_func = mfi_startup;
363 sc->mfi_ich.ich_arg = sc;
364 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
365 device_printf(sc->mfi_dev, "Cannot establish configuration "
366 "hook\n");
367 return (EINVAL);
368 }
369
370 /*
371 * Register a shutdown handler.
372 */
373 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
374 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
375 device_printf(sc->mfi_dev, "Warning: shutdown event "
376 "registration failed\n");
377 }
378
379 /*
380 * Create the control device for doing management
381 */
382 unit = device_get_unit(sc->mfi_dev);
383 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
384 0640, "mfi%d", unit);
385 if (unit == 0)
386 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
387 if (sc->mfi_cdev != NULL)
388 sc->mfi_cdev->si_drv1 = sc;
389
390 return (0);
391}
392
393static int
394mfi_alloc_commands(struct mfi_softc *sc)
395{
396 struct mfi_command *cm;
397 int i, ncmds;
398
399 /*
400 * XXX Should we allocate all the commands up front, or allocate on
401 * demand later like 'aac' does?
402 */
403 ncmds = sc->mfi_max_fw_cmds;
404 sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
405 M_WAITOK | M_ZERO);
406
407 for (i = 0; i < ncmds; i++) {
408 cm = &sc->mfi_commands[i];
409 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
282 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
283 64, 0, /* algnmnt, boundary */
284 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
285 BUS_SPACE_MAXADDR, /* highaddr */
286 NULL, NULL, /* filter, filterarg */
287 framessz, /* maxsize */
288 1, /* nsegments */
289 framessz, /* maxsegsize */
290 0, /* flags */
291 NULL, NULL, /* lockfunc, lockarg */
292 &sc->mfi_frames_dmat)) {
293 device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
294 return (ENOMEM);
295 }
296 if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
297 BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
298 device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
299 return (ENOMEM);
300 }
301 bzero(sc->mfi_frames, framessz);
302 bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
303 sc->mfi_frames, framessz, mfi_addr32_cb, &sc->mfi_frames_busaddr,0);
304
305 /*
306 * Allocate DMA memory for the frame sense data. Keep them in the
307 * lower 4GB for efficiency
308 */
309 sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
310 if (bus_dma_tag_create( sc->mfi_parent_dmat, /* parent */
311 4, 0, /* algnmnt, boundary */
312 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
313 BUS_SPACE_MAXADDR, /* highaddr */
314 NULL, NULL, /* filter, filterarg */
315 sensesz, /* maxsize */
316 1, /* nsegments */
317 sensesz, /* maxsegsize */
318 0, /* flags */
319 NULL, NULL, /* lockfunc, lockarg */
320 &sc->mfi_sense_dmat)) {
321 device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
322 return (ENOMEM);
323 }
324 if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
325 BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
326 device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
327 return (ENOMEM);
328 }
329 bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
330 sc->mfi_sense, sensesz, mfi_addr32_cb, &sc->mfi_sense_busaddr, 0);
331
332 if ((error = mfi_alloc_commands(sc)) != 0)
333 return (error);
334
335 if ((error = mfi_comms_init(sc)) != 0)
336 return (error);
337
338 if ((error = mfi_get_controller_info(sc)) != 0)
339 return (error);
340
341 if ((error = mfi_aen_setup(sc, 0), 0) != 0)
342 return (error);
343
344 /*
345 * Set up the interrupt handler. XXX This should happen in
346 * mfi_pci.c
347 */
348 sc->mfi_irq_rid = 0;
349 if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ,
350 &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
351 device_printf(sc->mfi_dev, "Cannot allocate interrupt\n");
352 return (EINVAL);
353 }
354 if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE|INTR_TYPE_BIO,
355 mfi_intr, sc, &sc->mfi_intr)) {
356 device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
357 return (EINVAL);
358 }
359
360 /* Register a config hook to probe the bus for arrays */
361 sc->mfi_ich.ich_func = mfi_startup;
362 sc->mfi_ich.ich_arg = sc;
363 if (config_intrhook_establish(&sc->mfi_ich) != 0) {
364 device_printf(sc->mfi_dev, "Cannot establish configuration "
365 "hook\n");
366 return (EINVAL);
367 }
368
369 /*
370 * Register a shutdown handler.
371 */
372 if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
373 sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
374 device_printf(sc->mfi_dev, "Warning: shutdown event "
375 "registration failed\n");
376 }
377
378 /*
379 * Create the control device for doing management
380 */
381 unit = device_get_unit(sc->mfi_dev);
382 sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
383 0640, "mfi%d", unit);
384 if (unit == 0)
385 make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
386 if (sc->mfi_cdev != NULL)
387 sc->mfi_cdev->si_drv1 = sc;
388
389 return (0);
390}
391
392static int
393mfi_alloc_commands(struct mfi_softc *sc)
394{
395 struct mfi_command *cm;
396 int i, ncmds;
397
398 /*
399 * XXX Should we allocate all the commands up front, or allocate on
400 * demand later like 'aac' does?
401 */
402 ncmds = sc->mfi_max_fw_cmds;
403 sc->mfi_commands = malloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
404 M_WAITOK | M_ZERO);
405
406 for (i = 0; i < ncmds; i++) {
407 cm = &sc->mfi_commands[i];
408 cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
410 sc->mfi_frame_size * i);
409 sc->mfi_cmd_size * i);
411 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
410 cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
412 sc->mfi_frame_size * i;
411 sc->mfi_cmd_size * i;
413 cm->cm_frame->header.context = i;
414 cm->cm_sense = &sc->mfi_sense[i];
415 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
416 cm->cm_sc = sc;
417 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
418 &cm->cm_dmamap) == 0)
419 mfi_release_command(cm);
420 else
421 break;
422 sc->mfi_total_cmds++;
423 }
424
425 return (0);
426}
427
428static void
429mfi_release_command(struct mfi_command *cm)
430{
431 uint32_t *hdr_data;
432
433 /*
434 * Zero out the important fields of the frame, but make sure the
435 * context field is preserved
436 */
437 hdr_data = (uint32_t *)cm->cm_frame;
438 hdr_data[0] = 0;
439 hdr_data[1] = 0;
440
441 cm->cm_extra_frames = 0;
442 cm->cm_flags = 0;
443 cm->cm_complete = NULL;
444 cm->cm_private = NULL;
445 cm->cm_sg = 0;
446 cm->cm_total_frame_size = 0;
447 mfi_enqueue_free(cm);
448}
449
450static int
451mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
452 void **bufp, size_t bufsize)
453{
454 struct mfi_command *cm;
455 struct mfi_dcmd_frame *dcmd;
456 void *buf = NULL;
457
458 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
459
460 cm = mfi_dequeue_free(sc);
461 if (cm == NULL)
462 return (EBUSY);
463
464 if ((bufsize > 0) && (bufp != NULL)) {
465 if (*bufp == NULL) {
466 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
467 if (buf == NULL) {
468 mfi_release_command(cm);
469 return (ENOMEM);
470 }
471 *bufp = buf;
472 } else {
473 buf = *bufp;
474 }
475 }
476
477 dcmd = &cm->cm_frame->dcmd;
478 bzero(dcmd->mbox, MFI_MBOX_SIZE);
479 dcmd->header.cmd = MFI_CMD_DCMD;
480 dcmd->header.timeout = 0;
481 dcmd->header.flags = 0;
482 dcmd->header.data_len = bufsize;
483 dcmd->opcode = opcode;
484 cm->cm_sg = &dcmd->sgl;
485 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
486 cm->cm_flags = 0;
487 cm->cm_data = buf;
488 cm->cm_private = buf;
489 cm->cm_len = bufsize;
490
491 *cmp = cm;
492 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
493 *bufp = buf;
494 return (0);
495}
496
497static int
498mfi_comms_init(struct mfi_softc *sc)
499{
500 struct mfi_command *cm;
501 struct mfi_init_frame *init;
502 struct mfi_init_qinfo *qinfo;
503 int error;
504
505 if ((cm = mfi_dequeue_free(sc)) == NULL)
506 return (EBUSY);
507
508 /*
509 * Abuse the SG list area of the frame to hold the init_qinfo
510 * object;
511 */
512 init = &cm->cm_frame->init;
513 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
514
515 bzero(qinfo, sizeof(struct mfi_init_qinfo));
516 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
517 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
518 offsetof(struct mfi_hwcomms, hw_reply_q);
519 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
520 offsetof(struct mfi_hwcomms, hw_pi);
521 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
522 offsetof(struct mfi_hwcomms, hw_ci);
523
524 init->header.cmd = MFI_CMD_INIT;
525 init->header.data_len = sizeof(struct mfi_init_qinfo);
526 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
527
528 if ((error = mfi_polled_command(sc, cm)) != 0) {
529 device_printf(sc->mfi_dev, "failed to send init command\n");
530 return (error);
531 }
532 mfi_release_command(cm);
533
534 return (0);
535}
536
537static int
538mfi_get_controller_info(struct mfi_softc *sc)
539{
540 struct mfi_command *cm = NULL;
541 struct mfi_ctrl_info *ci = NULL;
542 uint32_t max_sectors_1, max_sectors_2;
543 int error;
544
545 mtx_lock(&sc->mfi_io_lock);
546 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
547 (void **)&ci, sizeof(*ci));
548 if (error)
549 goto out;
550 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
551
552 if ((error = mfi_mapcmd(sc, cm)) != 0) {
553 device_printf(sc->mfi_dev, "Controller info buffer map failed\n");
554 free(ci, M_MFIBUF);
555 mfi_release_command(cm);
556 return (error);
557 }
558
559 /* It's ok if this fails, just use default info instead */
560 if ((error = mfi_polled_command(sc, cm)) != 0) {
561 device_printf(sc->mfi_dev, "Failed to get controller info\n");
412 cm->cm_frame->header.context = i;
413 cm->cm_sense = &sc->mfi_sense[i];
414 cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
415 cm->cm_sc = sc;
416 if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
417 &cm->cm_dmamap) == 0)
418 mfi_release_command(cm);
419 else
420 break;
421 sc->mfi_total_cmds++;
422 }
423
424 return (0);
425}
426
427static void
428mfi_release_command(struct mfi_command *cm)
429{
430 uint32_t *hdr_data;
431
432 /*
433 * Zero out the important fields of the frame, but make sure the
434 * context field is preserved
435 */
436 hdr_data = (uint32_t *)cm->cm_frame;
437 hdr_data[0] = 0;
438 hdr_data[1] = 0;
439
440 cm->cm_extra_frames = 0;
441 cm->cm_flags = 0;
442 cm->cm_complete = NULL;
443 cm->cm_private = NULL;
444 cm->cm_sg = 0;
445 cm->cm_total_frame_size = 0;
446 mfi_enqueue_free(cm);
447}
448
449static int
450mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp, uint32_t opcode,
451 void **bufp, size_t bufsize)
452{
453 struct mfi_command *cm;
454 struct mfi_dcmd_frame *dcmd;
455 void *buf = NULL;
456
457 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
458
459 cm = mfi_dequeue_free(sc);
460 if (cm == NULL)
461 return (EBUSY);
462
463 if ((bufsize > 0) && (bufp != NULL)) {
464 if (*bufp == NULL) {
465 buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
466 if (buf == NULL) {
467 mfi_release_command(cm);
468 return (ENOMEM);
469 }
470 *bufp = buf;
471 } else {
472 buf = *bufp;
473 }
474 }
475
476 dcmd = &cm->cm_frame->dcmd;
477 bzero(dcmd->mbox, MFI_MBOX_SIZE);
478 dcmd->header.cmd = MFI_CMD_DCMD;
479 dcmd->header.timeout = 0;
480 dcmd->header.flags = 0;
481 dcmd->header.data_len = bufsize;
482 dcmd->opcode = opcode;
483 cm->cm_sg = &dcmd->sgl;
484 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
485 cm->cm_flags = 0;
486 cm->cm_data = buf;
487 cm->cm_private = buf;
488 cm->cm_len = bufsize;
489
490 *cmp = cm;
491 if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
492 *bufp = buf;
493 return (0);
494}
495
496static int
497mfi_comms_init(struct mfi_softc *sc)
498{
499 struct mfi_command *cm;
500 struct mfi_init_frame *init;
501 struct mfi_init_qinfo *qinfo;
502 int error;
503
504 if ((cm = mfi_dequeue_free(sc)) == NULL)
505 return (EBUSY);
506
507 /*
508 * Abuse the SG list area of the frame to hold the init_qinfo
509 * object;
510 */
511 init = &cm->cm_frame->init;
512 qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
513
514 bzero(qinfo, sizeof(struct mfi_init_qinfo));
515 qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
516 qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
517 offsetof(struct mfi_hwcomms, hw_reply_q);
518 qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
519 offsetof(struct mfi_hwcomms, hw_pi);
520 qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
521 offsetof(struct mfi_hwcomms, hw_ci);
522
523 init->header.cmd = MFI_CMD_INIT;
524 init->header.data_len = sizeof(struct mfi_init_qinfo);
525 init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
526
527 if ((error = mfi_polled_command(sc, cm)) != 0) {
528 device_printf(sc->mfi_dev, "failed to send init command\n");
529 return (error);
530 }
531 mfi_release_command(cm);
532
533 return (0);
534}
535
536static int
537mfi_get_controller_info(struct mfi_softc *sc)
538{
539 struct mfi_command *cm = NULL;
540 struct mfi_ctrl_info *ci = NULL;
541 uint32_t max_sectors_1, max_sectors_2;
542 int error;
543
544 mtx_lock(&sc->mfi_io_lock);
545 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
546 (void **)&ci, sizeof(*ci));
547 if (error)
548 goto out;
549 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
550
551 if ((error = mfi_mapcmd(sc, cm)) != 0) {
552 device_printf(sc->mfi_dev, "Controller info buffer map failed\n");
553 free(ci, M_MFIBUF);
554 mfi_release_command(cm);
555 return (error);
556 }
557
558 /* It's ok if this fails, just use default info instead */
559 if ((error = mfi_polled_command(sc, cm)) != 0) {
560 device_printf(sc->mfi_dev, "Failed to get controller info\n");
562 sc->mfi_max_io = (sc->mfi_total_sgl - 1) * PAGE_SIZE /
561 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
563 MFI_SECTOR_LEN;
564 error = 0;
565 goto out;
566 }
567
568 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
569 BUS_DMASYNC_POSTREAD);
570 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
571
572 max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
573 max_sectors_2 = ci->max_request_size;
574 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
575
576out:
577 if (ci)
578 free(ci, M_MFIBUF);
579 if (cm)
580 mfi_release_command(cm);
581 mtx_unlock(&sc->mfi_io_lock);
582 return (error);
583}
584
585static int
586mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
587{
588 struct mfi_command *cm = NULL;
589 int error;
590
591 mtx_lock(&sc->mfi_io_lock);
592 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
593 (void **)log_state, sizeof(**log_state));
594 if (error)
595 goto out;
596 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
597
598 if ((error = mfi_mapcmd(sc, cm)) != 0) {
599 device_printf(sc->mfi_dev, "Log state buffer map failed\n");
600 goto out;
601 }
602
603 if ((error = mfi_polled_command(sc, cm)) != 0) {
604 device_printf(sc->mfi_dev, "Failed to get log state\n");
605 goto out;
606 }
607
608 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
609 BUS_DMASYNC_POSTREAD);
610 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
611
612out:
613 if (cm)
614 mfi_release_command(cm);
615 mtx_unlock(&sc->mfi_io_lock);
616
617 return (error);
618}
619
620static int
621mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
622{
623 struct mfi_evt_log_state *log_state = NULL;
624 union mfi_evt class_locale;
625 int error = 0;
626 uint32_t seq;
627
628 class_locale.members.reserved = 0;
629 class_locale.members.locale = mfi_event_locale;
630 class_locale.members.class = mfi_event_class;
631
632 if (seq_start == 0) {
633 error = mfi_get_log_state(sc, &log_state);
634 if (error) {
635 if (log_state)
636 free(log_state, M_MFIBUF);
637 return (error);
638 }
639 /*
640 * Don't run them yet since we can't parse them.
641 * We can indirectly get the contents from
642 * the AEN mechanism via setting it lower then
643 * current. The firmware will iterate through them.
644 */
645 for (seq = log_state->shutdown_seq_num;
646 seq <= log_state->newest_seq_num; seq++) {
647 mfi_get_entry(sc, seq);
648 }
649 } else
650 seq = seq_start;
651 mfi_aen_register(sc, seq, class_locale.word);
652 free(log_state, M_MFIBUF);
653
654 return 0;
655}
656
657static int
658mfi_polled_command(struct mfi_softc *sc, struct mfi_command *cm)
659{
660 struct mfi_frame_header *hdr;
661 int tm = MFI_POLL_TIMEOUT_SECS * 1000000;
662
663 hdr = &cm->cm_frame->header;
664 hdr->cmd_status = 0xff;
665 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
666
667 mfi_send_frame(sc, cm);
668
669 while (hdr->cmd_status == 0xff) {
670 DELAY(1000);
671 tm -= 1000;
672 if (tm <= 0)
673 break;
674 }
675
676 if (hdr->cmd_status == 0xff) {
677 device_printf(sc->mfi_dev, "Frame %p timed out\n", hdr);
678 return (ETIMEDOUT);
679 }
680
681 return (0);
682}
683
684static int
685mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
686{
687
688 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
689 cm->cm_complete = NULL;
690
691 mfi_enqueue_ready(cm);
692 mfi_startio(sc);
693 return (msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0));
694}
695
696void
697mfi_free(struct mfi_softc *sc)
698{
699 struct mfi_command *cm;
700 int i;
701
702 if (sc->mfi_cdev != NULL)
703 destroy_dev(sc->mfi_cdev);
704
705 if (sc->mfi_total_cmds != 0) {
706 for (i = 0; i < sc->mfi_total_cmds; i++) {
707 cm = &sc->mfi_commands[i];
708 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
709 }
710 free(sc->mfi_commands, M_MFIBUF);
711 }
712
713 if (sc->mfi_intr)
714 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
715 if (sc->mfi_irq != NULL)
716 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
717 sc->mfi_irq);
718
719 if (sc->mfi_sense_busaddr != 0)
720 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
721 if (sc->mfi_sense != NULL)
722 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
723 sc->mfi_sense_dmamap);
724 if (sc->mfi_sense_dmat != NULL)
725 bus_dma_tag_destroy(sc->mfi_sense_dmat);
726
727 if (sc->mfi_frames_busaddr != 0)
728 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
729 if (sc->mfi_frames != NULL)
730 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
731 sc->mfi_frames_dmamap);
732 if (sc->mfi_frames_dmat != NULL)
733 bus_dma_tag_destroy(sc->mfi_frames_dmat);
734
735 if (sc->mfi_comms_busaddr != 0)
736 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
737 if (sc->mfi_comms != NULL)
738 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
739 sc->mfi_comms_dmamap);
740 if (sc->mfi_comms_dmat != NULL)
741 bus_dma_tag_destroy(sc->mfi_comms_dmat);
742
743 if (sc->mfi_buffer_dmat != NULL)
744 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
745 if (sc->mfi_parent_dmat != NULL)
746 bus_dma_tag_destroy(sc->mfi_parent_dmat);
747
748 if (mtx_initialized(&sc->mfi_io_lock))
749 mtx_destroy(&sc->mfi_io_lock);
750
751 return;
752}
753
754static void
755mfi_startup(void *arg)
756{
757 struct mfi_softc *sc;
758
759 sc = (struct mfi_softc *)arg;
760
761 config_intrhook_disestablish(&sc->mfi_ich);
762
763 mfi_enable_intr(sc);
764 mfi_ldprobe(sc);
765}
766
767static void
768mfi_intr(void *arg)
769{
770 struct mfi_softc *sc;
771 struct mfi_command *cm;
772 uint32_t status, pi, ci, context;
773
774 sc = (struct mfi_softc *)arg;
775
776 status = MFI_READ4(sc, MFI_OSTS);
777 if ((status & MFI_OSTS_INTR_VALID) == 0)
778 return;
779 MFI_WRITE4(sc, MFI_OSTS, status);
780
781 pi = sc->mfi_comms->hw_pi;
782 ci = sc->mfi_comms->hw_ci;
783 mtx_lock(&sc->mfi_io_lock);
784 while (ci != pi) {
785 context = sc->mfi_comms->hw_reply_q[ci];
786 cm = &sc->mfi_commands[context];
787 mfi_remove_busy(cm);
788 mfi_complete(sc, cm);
789 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
790 ci = 0;
791 }
792 }
793 mtx_unlock(&sc->mfi_io_lock);
794
795 sc->mfi_comms->hw_ci = ci;
796
797 return;
798}
799
800int
801mfi_shutdown(struct mfi_softc *sc)
802{
803 struct mfi_dcmd_frame *dcmd;
804 struct mfi_command *cm;
805 int error;
806
807 mtx_lock(&sc->mfi_io_lock);
808 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
809 mtx_unlock(&sc->mfi_io_lock);
810 if (error)
811 return (error);
812
813 if (sc->mfi_aen_cm != NULL)
814 mfi_abort(sc, sc->mfi_aen_cm);
815
816 dcmd = &cm->cm_frame->dcmd;
817 dcmd->header.flags = MFI_FRAME_DIR_NONE;
818
819 if ((error = mfi_polled_command(sc, cm)) != 0) {
820 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
821 }
822
823 mfi_release_command(cm);
824 return (error);
825}
826
827static void
828mfi_enable_intr(struct mfi_softc *sc)
829{
830
831 MFI_WRITE4(sc, MFI_OMSK, 0x01);
832}
833
834static void
835mfi_ldprobe(struct mfi_softc *sc)
836{
837 struct mfi_frame_header *hdr;
838 struct mfi_command *cm = NULL;
839 struct mfi_ld_list *list = NULL;
840 int error, i;
841
842 mtx_lock(&sc->mfi_io_lock);
843 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
844 (void **)&list, sizeof(*list));
845 if (error)
846 goto out;
847
848 cm->cm_flags = MFI_CMD_DATAIN;
849 if (mfi_wait_command(sc, cm) != 0) {
850 device_printf(sc->mfi_dev, "Failed to get device listing\n");
851 goto out;
852 }
853
854 hdr = &cm->cm_frame->header;
855 if (hdr->cmd_status != MFI_STAT_OK) {
856 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
857 hdr->cmd_status);
858 goto out;
859 }
860
861 for (i = 0; i < list->ld_count; i++)
862 mfi_add_ld(sc, list->ld_list[i].ld.target_id);
863out:
864 if (list)
865 free(list, M_MFIBUF);
866 if (cm)
867 mfi_release_command(cm);
868 mtx_unlock(&sc->mfi_io_lock);
869 return;
870}
871
872static void
873mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
874{
875 switch (detail->arg_type) {
876 case MR_EVT_ARGS_NONE:
877 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - %s\n",
878 detail->seq,
879 detail->time,
880 detail->class.members.locale,
881 detail->class.members.class,
882 detail->description
883 );
884 break;
885 case MR_EVT_ARGS_CDB_SENSE:
886 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) CDB %*D"
887 "Sense %*D\n: %s\n",
888 detail->seq,
889 detail->time,
890 detail->class.members.locale,
891 detail->class.members.class,
892 detail->args.cdb_sense.pd.device_id,
893 detail->args.cdb_sense.pd.enclosure_index,
894 detail->args.cdb_sense.pd.slot_number,
895 detail->args.cdb_sense.cdb_len,
896 detail->args.cdb_sense.cdb,
897 ":",
898 detail->args.cdb_sense.sense_len,
899 detail->args.cdb_sense.sense,
900 ":",
901 detail->description
902 );
903 break;
904 case MR_EVT_ARGS_LD:
905 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
906 "event: %s\n",
907 detail->seq,
908 detail->time,
909 detail->class.members.locale,
910 detail->class.members.class,
911 detail->args.ld.ld_index,
912 detail->args.ld.target_id,
913 detail->description
914 );
915 break;
916 case MR_EVT_ARGS_LD_COUNT:
917 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
918 "count %lld: %s\n",
919 detail->seq,
920 detail->time,
921 detail->class.members.locale,
922 detail->class.members.class,
923 detail->args.ld_count.ld.ld_index,
924 detail->args.ld_count.ld.target_id,
925 (long long)detail->args.ld_count.count,
926 detail->description
927 );
928 break;
929 case MR_EVT_ARGS_LD_LBA:
930 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
931 "lba %lld: %s\n",
932 detail->seq,
933 detail->time,
934 detail->class.members.locale,
935 detail->class.members.class,
936 detail->args.ld_lba.ld.ld_index,
937 detail->args.ld_lba.ld.target_id,
938 (long long)detail->args.ld_lba.lba,
939 detail->description
940 );
941 break;
942 case MR_EVT_ARGS_LD_OWNER:
943 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
944 "owner changed: prior %d, new %d: %s\n",
945 detail->seq,
946 detail->time,
947 detail->class.members.locale,
948 detail->class.members.class,
949 detail->args.ld_owner.ld.ld_index,
950 detail->args.ld_owner.ld.target_id,
951 detail->args.ld_owner.pre_owner,
952 detail->args.ld_owner.new_owner,
953 detail->description
954 );
955 break;
956 case MR_EVT_ARGS_LD_LBA_PD_LBA:
957 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
958 "lba %lld, physical drive PD %02d(e%d/s%d) lba %lld: %s\n",
959 detail->seq,
960 detail->time,
961 detail->class.members.locale,
962 detail->class.members.class,
963 detail->args.ld_lba_pd_lba.ld.ld_index,
964 detail->args.ld_lba_pd_lba.ld.target_id,
965 (long long)detail->args.ld_lba_pd_lba.ld_lba,
966 detail->args.ld_lba_pd_lba.pd.device_id,
967 detail->args.ld_lba_pd_lba.pd.enclosure_index,
968 detail->args.ld_lba_pd_lba.pd.slot_number,
969 (long long)detail->args.ld_lba_pd_lba.pd_lba,
970 detail->description
971 );
972 break;
973 case MR_EVT_ARGS_LD_PROG:
974 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
975 "progress %d%% in %ds: %s\n",
976 detail->seq,
977 detail->time,
978 detail->class.members.locale,
979 detail->class.members.class,
980 detail->args.ld_prog.ld.ld_index,
981 detail->args.ld_prog.ld.target_id,
982 detail->args.ld_prog.prog.progress/655,
983 detail->args.ld_prog.prog.elapsed_seconds,
984 detail->description
985 );
986 break;
987 case MR_EVT_ARGS_LD_STATE:
988 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
989 "state prior %d new %d: %s\n",
990 detail->seq,
991 detail->time,
992 detail->class.members.locale,
993 detail->class.members.class,
994 detail->args.ld_state.ld.ld_index,
995 detail->args.ld_state.ld.target_id,
996 detail->args.ld_state.prev_state,
997 detail->args.ld_state.new_state,
998 detail->description
999 );
1000 break;
1001 case MR_EVT_ARGS_LD_STRIP:
1002 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1003 "strip %lld: %s\n",
1004 detail->seq,
1005 detail->time,
1006 detail->class.members.locale,
1007 detail->class.members.class,
1008 detail->args.ld_strip.ld.ld_index,
1009 detail->args.ld_strip.ld.target_id,
1010 (long long)detail->args.ld_strip.strip,
1011 detail->description
1012 );
1013 break;
1014 case MR_EVT_ARGS_PD:
1015 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1016 "event: %s\n",
1017 detail->seq,
1018 detail->time,
1019 detail->class.members.locale,
1020 detail->class.members.class,
1021 detail->args.pd.device_id,
1022 detail->args.pd.enclosure_index,
1023 detail->args.pd.slot_number,
1024 detail->description
1025 );
1026 break;
1027 case MR_EVT_ARGS_PD_ERR:
1028 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1029 "err %d: %s\n",
1030 detail->seq,
1031 detail->time,
1032 detail->class.members.locale,
1033 detail->class.members.class,
1034 detail->args.pd_err.pd.device_id,
1035 detail->args.pd_err.pd.enclosure_index,
1036 detail->args.pd_err.pd.slot_number,
1037 detail->args.pd_err.err,
1038 detail->description
1039 );
1040 break;
1041 case MR_EVT_ARGS_PD_LBA:
1042 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1043 "lba %lld: %s\n",
1044 detail->seq,
1045 detail->time,
1046 detail->class.members.locale,
1047 detail->class.members.class,
1048 detail->args.pd_lba.pd.device_id,
1049 detail->args.pd_lba.pd.enclosure_index,
1050 detail->args.pd_lba.pd.slot_number,
1051 (long long)detail->args.pd_lba.lba,
1052 detail->description
1053 );
1054 break;
1055 case MR_EVT_ARGS_PD_LBA_LD:
1056 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1057 "lba %lld VD %02d/%d: %s\n",
1058 detail->seq,
1059 detail->time,
1060 detail->class.members.locale,
1061 detail->class.members.class,
1062 detail->args.pd_lba_ld.pd.device_id,
1063 detail->args.pd_lba_ld.pd.enclosure_index,
1064 detail->args.pd_lba_ld.pd.slot_number,
1065 (long long)detail->args.pd_lba.lba,
1066 detail->args.pd_lba_ld.ld.ld_index,
1067 detail->args.pd_lba_ld.ld.target_id,
1068 detail->description
1069 );
1070 break;
1071 case MR_EVT_ARGS_PD_PROG:
1072 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1073 "progress %d%% seconds %ds: %s\n",
1074 detail->seq,
1075 detail->time,
1076 detail->class.members.locale,
1077 detail->class.members.class,
1078 detail->args.pd_prog.pd.device_id,
1079 detail->args.pd_prog.pd.enclosure_index,
1080 detail->args.pd_prog.pd.slot_number,
1081 detail->args.pd_prog.prog.progress/655,
1082 detail->args.pd_prog.prog.elapsed_seconds,
1083 detail->description
1084 );
1085 break;
1086 case MR_EVT_ARGS_PD_STATE:
1087 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1088 "state prior %d new %d: %s\n",
1089 detail->seq,
1090 detail->time,
1091 detail->class.members.locale,
1092 detail->class.members.class,
1093 detail->args.pd_prog.pd.device_id,
1094 detail->args.pd_prog.pd.enclosure_index,
1095 detail->args.pd_prog.pd.slot_number,
1096 detail->args.pd_state.prev_state,
1097 detail->args.pd_state.new_state,
1098 detail->description
1099 );
1100 break;
1101 case MR_EVT_ARGS_PCI:
1102 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PCI 0x04%x 0x04%x "
1103 "0x04%x 0x04%x: %s\n",
1104 detail->seq,
1105 detail->time,
1106 detail->class.members.locale,
1107 detail->class.members.class,
1108 detail->args.pci.venderId,
1109 detail->args.pci.deviceId,
1110 detail->args.pci.subVenderId,
1111 detail->args.pci.subDeviceId,
1112 detail->description
1113 );
1114 break;
1115 case MR_EVT_ARGS_RATE:
1116 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Rebuild rate %d: %s\n",
1117 detail->seq,
1118 detail->time,
1119 detail->class.members.locale,
1120 detail->class.members.class,
1121 detail->args.rate,
1122 detail->description
1123 );
1124 break;
1125 case MR_EVT_ARGS_TIME:
1126 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Adapter ticks %d "
1127 "elapsed %ds: %s\n",
1128 detail->seq,
1129 detail->time,
1130 detail->class.members.locale,
1131 detail->class.members.class,
1132 detail->args.time.rtc,
1133 detail->args.time.elapsedSeconds,
1134 detail->description
1135 );
1136 break;
1137 case MR_EVT_ARGS_ECC:
1138 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Adapter ECC %x,%x: %s: %s\n",
1139 detail->seq,
1140 detail->time,
1141 detail->class.members.locale,
1142 detail->class.members.class,
1143 detail->args.ecc.ecar,
1144 detail->args.ecc.elog,
1145 detail->args.ecc.str,
1146 detail->description
1147 );
1148 break;
1149 default:
1150 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Type %d: %s\n",
1151 detail->seq,
1152 detail->time,
1153 detail->class.members.locale,
1154 detail->class.members.class,
1155 detail->arg_type, detail->description
1156 );
1157 }
1158}
1159
1160static int
1161mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1162{
1163 struct mfi_command *cm;
1164 struct mfi_dcmd_frame *dcmd;
1165 union mfi_evt current_aen, prior_aen;
1166 struct mfi_evt_detail *ed = NULL;
1167 int error;
1168
1169 current_aen.word = locale;
1170 if (sc->mfi_aen_cm != NULL) {
1171 prior_aen.word =
1172 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1173 if (prior_aen.members.class <= current_aen.members.class &&
1174 !((prior_aen.members.locale & current_aen.members.locale)
1175 ^current_aen.members.locale)) {
1176 return (0);
1177 } else {
1178 prior_aen.members.locale |= current_aen.members.locale;
1179 if (prior_aen.members.class
1180 < current_aen.members.class)
1181 current_aen.members.class =
1182 prior_aen.members.class;
1183 mfi_abort(sc, sc->mfi_aen_cm);
1184 }
1185 }
1186
1187 mtx_lock(&sc->mfi_io_lock);
1188 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1189 (void **)&ed, sizeof(*ed));
1190 mtx_unlock(&sc->mfi_io_lock);
1191 if (error)
1192 return (error);
1193
1194 dcmd = &cm->cm_frame->dcmd;
1195 ((uint32_t *)&dcmd->mbox)[0] = seq;
1196 ((uint32_t *)&dcmd->mbox)[1] = locale;
1197 cm->cm_flags = MFI_CMD_DATAIN;
1198 cm->cm_complete = mfi_aen_complete;
1199
1200 sc->mfi_aen_cm = cm;
1201
1202 mfi_enqueue_ready(cm);
1203 mfi_startio(sc);
1204
1205 return (0);
1206}
1207
1208static void
1209mfi_aen_complete(struct mfi_command *cm)
1210{
1211 struct mfi_frame_header *hdr;
1212 struct mfi_softc *sc;
1213 struct mfi_evt_detail *detail;
1214 struct mfi_aen *mfi_aen_entry;
1215 int seq = 0, aborted = 0;
1216
1217 sc = cm->cm_sc;
1218 hdr = &cm->cm_frame->header;
1219
1220 if (sc->mfi_aen_cm == NULL)
1221 return;
1222
1223 if (sc->mfi_aen_cm->cm_aen_abort || hdr->cmd_status == 0xff) {
1224 sc->mfi_aen_cm->cm_aen_abort = 0;
1225 aborted = 1;
1226 } else {
1227 sc->mfi_aen_triggered = 1;
1228 if (sc->mfi_poll_waiting)
1229 selwakeup(&sc->mfi_select);
1230 detail = cm->cm_data;
1231 mtx_unlock(&sc->mfi_io_lock);
1232 mfi_decode_evt(sc, detail);
1233 mtx_lock(&sc->mfi_io_lock);
1234 seq = detail->seq + 1;
1235 TAILQ_FOREACH(mfi_aen_entry, &sc->mfi_aen_pids, aen_link) {
1236 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1237 aen_link);
1238 psignal(mfi_aen_entry->p, SIGIO);
1239 free(mfi_aen_entry, M_MFIBUF);
1240 }
1241 }
1242
1243 free(cm->cm_data, M_MFIBUF);
1244 sc->mfi_aen_cm = NULL;
1245 wakeup(&sc->mfi_aen_cm);
1246 mfi_release_command(cm);
1247
1248 /* set it up again so the driver can catch more events */
1249 if (!aborted) {
1250 mtx_unlock(&sc->mfi_io_lock);
1251 mfi_aen_setup(sc, seq);
1252 mtx_lock(&sc->mfi_io_lock);
1253 }
1254}
1255
1256/* Only do one event for now so we can easily iterate through them */
1257#define MAX_EVENTS 1
1258static int
1259mfi_get_entry(struct mfi_softc *sc, int seq)
1260{
1261 struct mfi_command *cm;
1262 struct mfi_dcmd_frame *dcmd;
1263 struct mfi_evt_list *el;
1264 int error;
1265 int i;
1266 int size;
1267
1268 mtx_lock(&sc->mfi_io_lock);
1269 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1270 mtx_unlock(&sc->mfi_io_lock);
1271 return (EBUSY);
1272 }
1273 mtx_unlock(&sc->mfi_io_lock);
1274
1275 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1276 * (MAX_EVENTS - 1);
1277 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1278 if (el == NULL) {
1279 mtx_lock(&sc->mfi_io_lock);
1280 mfi_release_command(cm);
1281 mtx_unlock(&sc->mfi_io_lock);
1282 return (ENOMEM);
1283 }
1284
1285 dcmd = &cm->cm_frame->dcmd;
1286 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1287 dcmd->header.cmd = MFI_CMD_DCMD;
1288 dcmd->header.timeout = 0;
1289 dcmd->header.data_len = size;
1290 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1291 ((uint32_t *)&dcmd->mbox)[0] = seq;
1292 ((uint32_t *)&dcmd->mbox)[1] = MFI_EVT_LOCALE_ALL;
1293 cm->cm_sg = &dcmd->sgl;
1294 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1295 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1296 cm->cm_data = el;
1297 cm->cm_len = size;
1298
1299 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1300 device_printf(sc->mfi_dev, "Controller info buffer map failed");
1301 free(el, M_MFIBUF);
1302 mfi_release_command(cm);
1303 return (error);
1304 }
1305
1306 if ((error = mfi_polled_command(sc, cm)) != 0) {
1307 device_printf(sc->mfi_dev, "Failed to get controller entry\n");
562 MFI_SECTOR_LEN;
563 error = 0;
564 goto out;
565 }
566
567 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
568 BUS_DMASYNC_POSTREAD);
569 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
570
571 max_sectors_1 = (1 << ci->stripe_sz_ops.min) * ci->max_strips_per_io;
572 max_sectors_2 = ci->max_request_size;
573 sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
574
575out:
576 if (ci)
577 free(ci, M_MFIBUF);
578 if (cm)
579 mfi_release_command(cm);
580 mtx_unlock(&sc->mfi_io_lock);
581 return (error);
582}
583
584static int
585mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
586{
587 struct mfi_command *cm = NULL;
588 int error;
589
590 mtx_lock(&sc->mfi_io_lock);
591 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
592 (void **)log_state, sizeof(**log_state));
593 if (error)
594 goto out;
595 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
596
597 if ((error = mfi_mapcmd(sc, cm)) != 0) {
598 device_printf(sc->mfi_dev, "Log state buffer map failed\n");
599 goto out;
600 }
601
602 if ((error = mfi_polled_command(sc, cm)) != 0) {
603 device_printf(sc->mfi_dev, "Failed to get log state\n");
604 goto out;
605 }
606
607 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
608 BUS_DMASYNC_POSTREAD);
609 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
610
611out:
612 if (cm)
613 mfi_release_command(cm);
614 mtx_unlock(&sc->mfi_io_lock);
615
616 return (error);
617}
618
619static int
620mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
621{
622 struct mfi_evt_log_state *log_state = NULL;
623 union mfi_evt class_locale;
624 int error = 0;
625 uint32_t seq;
626
627 class_locale.members.reserved = 0;
628 class_locale.members.locale = mfi_event_locale;
629 class_locale.members.class = mfi_event_class;
630
631 if (seq_start == 0) {
632 error = mfi_get_log_state(sc, &log_state);
633 if (error) {
634 if (log_state)
635 free(log_state, M_MFIBUF);
636 return (error);
637 }
638 /*
639 * Don't run them yet since we can't parse them.
640 * We can indirectly get the contents from
641 * the AEN mechanism via setting it lower then
642 * current. The firmware will iterate through them.
643 */
644 for (seq = log_state->shutdown_seq_num;
645 seq <= log_state->newest_seq_num; seq++) {
646 mfi_get_entry(sc, seq);
647 }
648 } else
649 seq = seq_start;
650 mfi_aen_register(sc, seq, class_locale.word);
651 free(log_state, M_MFIBUF);
652
653 return 0;
654}
655
656static int
657mfi_polled_command(struct mfi_softc *sc, struct mfi_command *cm)
658{
659 struct mfi_frame_header *hdr;
660 int tm = MFI_POLL_TIMEOUT_SECS * 1000000;
661
662 hdr = &cm->cm_frame->header;
663 hdr->cmd_status = 0xff;
664 hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
665
666 mfi_send_frame(sc, cm);
667
668 while (hdr->cmd_status == 0xff) {
669 DELAY(1000);
670 tm -= 1000;
671 if (tm <= 0)
672 break;
673 }
674
675 if (hdr->cmd_status == 0xff) {
676 device_printf(sc->mfi_dev, "Frame %p timed out\n", hdr);
677 return (ETIMEDOUT);
678 }
679
680 return (0);
681}
682
683static int
684mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
685{
686
687 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
688 cm->cm_complete = NULL;
689
690 mfi_enqueue_ready(cm);
691 mfi_startio(sc);
692 return (msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0));
693}
694
695void
696mfi_free(struct mfi_softc *sc)
697{
698 struct mfi_command *cm;
699 int i;
700
701 if (sc->mfi_cdev != NULL)
702 destroy_dev(sc->mfi_cdev);
703
704 if (sc->mfi_total_cmds != 0) {
705 for (i = 0; i < sc->mfi_total_cmds; i++) {
706 cm = &sc->mfi_commands[i];
707 bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
708 }
709 free(sc->mfi_commands, M_MFIBUF);
710 }
711
712 if (sc->mfi_intr)
713 bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
714 if (sc->mfi_irq != NULL)
715 bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
716 sc->mfi_irq);
717
718 if (sc->mfi_sense_busaddr != 0)
719 bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
720 if (sc->mfi_sense != NULL)
721 bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
722 sc->mfi_sense_dmamap);
723 if (sc->mfi_sense_dmat != NULL)
724 bus_dma_tag_destroy(sc->mfi_sense_dmat);
725
726 if (sc->mfi_frames_busaddr != 0)
727 bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
728 if (sc->mfi_frames != NULL)
729 bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
730 sc->mfi_frames_dmamap);
731 if (sc->mfi_frames_dmat != NULL)
732 bus_dma_tag_destroy(sc->mfi_frames_dmat);
733
734 if (sc->mfi_comms_busaddr != 0)
735 bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
736 if (sc->mfi_comms != NULL)
737 bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
738 sc->mfi_comms_dmamap);
739 if (sc->mfi_comms_dmat != NULL)
740 bus_dma_tag_destroy(sc->mfi_comms_dmat);
741
742 if (sc->mfi_buffer_dmat != NULL)
743 bus_dma_tag_destroy(sc->mfi_buffer_dmat);
744 if (sc->mfi_parent_dmat != NULL)
745 bus_dma_tag_destroy(sc->mfi_parent_dmat);
746
747 if (mtx_initialized(&sc->mfi_io_lock))
748 mtx_destroy(&sc->mfi_io_lock);
749
750 return;
751}
752
753static void
754mfi_startup(void *arg)
755{
756 struct mfi_softc *sc;
757
758 sc = (struct mfi_softc *)arg;
759
760 config_intrhook_disestablish(&sc->mfi_ich);
761
762 mfi_enable_intr(sc);
763 mfi_ldprobe(sc);
764}
765
766static void
767mfi_intr(void *arg)
768{
769 struct mfi_softc *sc;
770 struct mfi_command *cm;
771 uint32_t status, pi, ci, context;
772
773 sc = (struct mfi_softc *)arg;
774
775 status = MFI_READ4(sc, MFI_OSTS);
776 if ((status & MFI_OSTS_INTR_VALID) == 0)
777 return;
778 MFI_WRITE4(sc, MFI_OSTS, status);
779
780 pi = sc->mfi_comms->hw_pi;
781 ci = sc->mfi_comms->hw_ci;
782 mtx_lock(&sc->mfi_io_lock);
783 while (ci != pi) {
784 context = sc->mfi_comms->hw_reply_q[ci];
785 cm = &sc->mfi_commands[context];
786 mfi_remove_busy(cm);
787 mfi_complete(sc, cm);
788 if (++ci == (sc->mfi_max_fw_cmds + 1)) {
789 ci = 0;
790 }
791 }
792 mtx_unlock(&sc->mfi_io_lock);
793
794 sc->mfi_comms->hw_ci = ci;
795
796 return;
797}
798
799int
800mfi_shutdown(struct mfi_softc *sc)
801{
802 struct mfi_dcmd_frame *dcmd;
803 struct mfi_command *cm;
804 int error;
805
806 mtx_lock(&sc->mfi_io_lock);
807 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
808 mtx_unlock(&sc->mfi_io_lock);
809 if (error)
810 return (error);
811
812 if (sc->mfi_aen_cm != NULL)
813 mfi_abort(sc, sc->mfi_aen_cm);
814
815 dcmd = &cm->cm_frame->dcmd;
816 dcmd->header.flags = MFI_FRAME_DIR_NONE;
817
818 if ((error = mfi_polled_command(sc, cm)) != 0) {
819 device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
820 }
821
822 mfi_release_command(cm);
823 return (error);
824}
825
826static void
827mfi_enable_intr(struct mfi_softc *sc)
828{
829
830 MFI_WRITE4(sc, MFI_OMSK, 0x01);
831}
832
833static void
834mfi_ldprobe(struct mfi_softc *sc)
835{
836 struct mfi_frame_header *hdr;
837 struct mfi_command *cm = NULL;
838 struct mfi_ld_list *list = NULL;
839 int error, i;
840
841 mtx_lock(&sc->mfi_io_lock);
842 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
843 (void **)&list, sizeof(*list));
844 if (error)
845 goto out;
846
847 cm->cm_flags = MFI_CMD_DATAIN;
848 if (mfi_wait_command(sc, cm) != 0) {
849 device_printf(sc->mfi_dev, "Failed to get device listing\n");
850 goto out;
851 }
852
853 hdr = &cm->cm_frame->header;
854 if (hdr->cmd_status != MFI_STAT_OK) {
855 device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
856 hdr->cmd_status);
857 goto out;
858 }
859
860 for (i = 0; i < list->ld_count; i++)
861 mfi_add_ld(sc, list->ld_list[i].ld.target_id);
862out:
863 if (list)
864 free(list, M_MFIBUF);
865 if (cm)
866 mfi_release_command(cm);
867 mtx_unlock(&sc->mfi_io_lock);
868 return;
869}
870
871static void
872mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
873{
874 switch (detail->arg_type) {
875 case MR_EVT_ARGS_NONE:
876 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - %s\n",
877 detail->seq,
878 detail->time,
879 detail->class.members.locale,
880 detail->class.members.class,
881 detail->description
882 );
883 break;
884 case MR_EVT_ARGS_CDB_SENSE:
885 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) CDB %*D"
886 "Sense %*D\n: %s\n",
887 detail->seq,
888 detail->time,
889 detail->class.members.locale,
890 detail->class.members.class,
891 detail->args.cdb_sense.pd.device_id,
892 detail->args.cdb_sense.pd.enclosure_index,
893 detail->args.cdb_sense.pd.slot_number,
894 detail->args.cdb_sense.cdb_len,
895 detail->args.cdb_sense.cdb,
896 ":",
897 detail->args.cdb_sense.sense_len,
898 detail->args.cdb_sense.sense,
899 ":",
900 detail->description
901 );
902 break;
903 case MR_EVT_ARGS_LD:
904 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
905 "event: %s\n",
906 detail->seq,
907 detail->time,
908 detail->class.members.locale,
909 detail->class.members.class,
910 detail->args.ld.ld_index,
911 detail->args.ld.target_id,
912 detail->description
913 );
914 break;
915 case MR_EVT_ARGS_LD_COUNT:
916 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
917 "count %lld: %s\n",
918 detail->seq,
919 detail->time,
920 detail->class.members.locale,
921 detail->class.members.class,
922 detail->args.ld_count.ld.ld_index,
923 detail->args.ld_count.ld.target_id,
924 (long long)detail->args.ld_count.count,
925 detail->description
926 );
927 break;
928 case MR_EVT_ARGS_LD_LBA:
929 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
930 "lba %lld: %s\n",
931 detail->seq,
932 detail->time,
933 detail->class.members.locale,
934 detail->class.members.class,
935 detail->args.ld_lba.ld.ld_index,
936 detail->args.ld_lba.ld.target_id,
937 (long long)detail->args.ld_lba.lba,
938 detail->description
939 );
940 break;
941 case MR_EVT_ARGS_LD_OWNER:
942 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
943 "owner changed: prior %d, new %d: %s\n",
944 detail->seq,
945 detail->time,
946 detail->class.members.locale,
947 detail->class.members.class,
948 detail->args.ld_owner.ld.ld_index,
949 detail->args.ld_owner.ld.target_id,
950 detail->args.ld_owner.pre_owner,
951 detail->args.ld_owner.new_owner,
952 detail->description
953 );
954 break;
955 case MR_EVT_ARGS_LD_LBA_PD_LBA:
956 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
957 "lba %lld, physical drive PD %02d(e%d/s%d) lba %lld: %s\n",
958 detail->seq,
959 detail->time,
960 detail->class.members.locale,
961 detail->class.members.class,
962 detail->args.ld_lba_pd_lba.ld.ld_index,
963 detail->args.ld_lba_pd_lba.ld.target_id,
964 (long long)detail->args.ld_lba_pd_lba.ld_lba,
965 detail->args.ld_lba_pd_lba.pd.device_id,
966 detail->args.ld_lba_pd_lba.pd.enclosure_index,
967 detail->args.ld_lba_pd_lba.pd.slot_number,
968 (long long)detail->args.ld_lba_pd_lba.pd_lba,
969 detail->description
970 );
971 break;
972 case MR_EVT_ARGS_LD_PROG:
973 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
974 "progress %d%% in %ds: %s\n",
975 detail->seq,
976 detail->time,
977 detail->class.members.locale,
978 detail->class.members.class,
979 detail->args.ld_prog.ld.ld_index,
980 detail->args.ld_prog.ld.target_id,
981 detail->args.ld_prog.prog.progress/655,
982 detail->args.ld_prog.prog.elapsed_seconds,
983 detail->description
984 );
985 break;
986 case MR_EVT_ARGS_LD_STATE:
987 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
988 "state prior %d new %d: %s\n",
989 detail->seq,
990 detail->time,
991 detail->class.members.locale,
992 detail->class.members.class,
993 detail->args.ld_state.ld.ld_index,
994 detail->args.ld_state.ld.target_id,
995 detail->args.ld_state.prev_state,
996 detail->args.ld_state.new_state,
997 detail->description
998 );
999 break;
1000 case MR_EVT_ARGS_LD_STRIP:
1001 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - VD %02d/%d "
1002 "strip %lld: %s\n",
1003 detail->seq,
1004 detail->time,
1005 detail->class.members.locale,
1006 detail->class.members.class,
1007 detail->args.ld_strip.ld.ld_index,
1008 detail->args.ld_strip.ld.target_id,
1009 (long long)detail->args.ld_strip.strip,
1010 detail->description
1011 );
1012 break;
1013 case MR_EVT_ARGS_PD:
1014 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1015 "event: %s\n",
1016 detail->seq,
1017 detail->time,
1018 detail->class.members.locale,
1019 detail->class.members.class,
1020 detail->args.pd.device_id,
1021 detail->args.pd.enclosure_index,
1022 detail->args.pd.slot_number,
1023 detail->description
1024 );
1025 break;
1026 case MR_EVT_ARGS_PD_ERR:
1027 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1028 "err %d: %s\n",
1029 detail->seq,
1030 detail->time,
1031 detail->class.members.locale,
1032 detail->class.members.class,
1033 detail->args.pd_err.pd.device_id,
1034 detail->args.pd_err.pd.enclosure_index,
1035 detail->args.pd_err.pd.slot_number,
1036 detail->args.pd_err.err,
1037 detail->description
1038 );
1039 break;
1040 case MR_EVT_ARGS_PD_LBA:
1041 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1042 "lba %lld: %s\n",
1043 detail->seq,
1044 detail->time,
1045 detail->class.members.locale,
1046 detail->class.members.class,
1047 detail->args.pd_lba.pd.device_id,
1048 detail->args.pd_lba.pd.enclosure_index,
1049 detail->args.pd_lba.pd.slot_number,
1050 (long long)detail->args.pd_lba.lba,
1051 detail->description
1052 );
1053 break;
1054 case MR_EVT_ARGS_PD_LBA_LD:
1055 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1056 "lba %lld VD %02d/%d: %s\n",
1057 detail->seq,
1058 detail->time,
1059 detail->class.members.locale,
1060 detail->class.members.class,
1061 detail->args.pd_lba_ld.pd.device_id,
1062 detail->args.pd_lba_ld.pd.enclosure_index,
1063 detail->args.pd_lba_ld.pd.slot_number,
1064 (long long)detail->args.pd_lba.lba,
1065 detail->args.pd_lba_ld.ld.ld_index,
1066 detail->args.pd_lba_ld.ld.target_id,
1067 detail->description
1068 );
1069 break;
1070 case MR_EVT_ARGS_PD_PROG:
1071 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1072 "progress %d%% seconds %ds: %s\n",
1073 detail->seq,
1074 detail->time,
1075 detail->class.members.locale,
1076 detail->class.members.class,
1077 detail->args.pd_prog.pd.device_id,
1078 detail->args.pd_prog.pd.enclosure_index,
1079 detail->args.pd_prog.pd.slot_number,
1080 detail->args.pd_prog.prog.progress/655,
1081 detail->args.pd_prog.prog.elapsed_seconds,
1082 detail->description
1083 );
1084 break;
1085 case MR_EVT_ARGS_PD_STATE:
1086 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PD %02d(e%d/s%d) "
1087 "state prior %d new %d: %s\n",
1088 detail->seq,
1089 detail->time,
1090 detail->class.members.locale,
1091 detail->class.members.class,
1092 detail->args.pd_prog.pd.device_id,
1093 detail->args.pd_prog.pd.enclosure_index,
1094 detail->args.pd_prog.pd.slot_number,
1095 detail->args.pd_state.prev_state,
1096 detail->args.pd_state.new_state,
1097 detail->description
1098 );
1099 break;
1100 case MR_EVT_ARGS_PCI:
1101 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - PCI 0x04%x 0x04%x "
1102 "0x04%x 0x04%x: %s\n",
1103 detail->seq,
1104 detail->time,
1105 detail->class.members.locale,
1106 detail->class.members.class,
1107 detail->args.pci.venderId,
1108 detail->args.pci.deviceId,
1109 detail->args.pci.subVenderId,
1110 detail->args.pci.subDeviceId,
1111 detail->description
1112 );
1113 break;
1114 case MR_EVT_ARGS_RATE:
1115 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Rebuild rate %d: %s\n",
1116 detail->seq,
1117 detail->time,
1118 detail->class.members.locale,
1119 detail->class.members.class,
1120 detail->args.rate,
1121 detail->description
1122 );
1123 break;
1124 case MR_EVT_ARGS_TIME:
1125 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Adapter ticks %d "
1126 "elapsed %ds: %s\n",
1127 detail->seq,
1128 detail->time,
1129 detail->class.members.locale,
1130 detail->class.members.class,
1131 detail->args.time.rtc,
1132 detail->args.time.elapsedSeconds,
1133 detail->description
1134 );
1135 break;
1136 case MR_EVT_ARGS_ECC:
1137 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Adapter ECC %x,%x: %s: %s\n",
1138 detail->seq,
1139 detail->time,
1140 detail->class.members.locale,
1141 detail->class.members.class,
1142 detail->args.ecc.ecar,
1143 detail->args.ecc.elog,
1144 detail->args.ecc.str,
1145 detail->description
1146 );
1147 break;
1148 default:
1149 device_printf(sc->mfi_dev, "%d (%us/0x%04x/%d) - Type %d: %s\n",
1150 detail->seq,
1151 detail->time,
1152 detail->class.members.locale,
1153 detail->class.members.class,
1154 detail->arg_type, detail->description
1155 );
1156 }
1157}
1158
1159static int
1160mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1161{
1162 struct mfi_command *cm;
1163 struct mfi_dcmd_frame *dcmd;
1164 union mfi_evt current_aen, prior_aen;
1165 struct mfi_evt_detail *ed = NULL;
1166 int error;
1167
1168 current_aen.word = locale;
1169 if (sc->mfi_aen_cm != NULL) {
1170 prior_aen.word =
1171 ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1172 if (prior_aen.members.class <= current_aen.members.class &&
1173 !((prior_aen.members.locale & current_aen.members.locale)
1174 ^current_aen.members.locale)) {
1175 return (0);
1176 } else {
1177 prior_aen.members.locale |= current_aen.members.locale;
1178 if (prior_aen.members.class
1179 < current_aen.members.class)
1180 current_aen.members.class =
1181 prior_aen.members.class;
1182 mfi_abort(sc, sc->mfi_aen_cm);
1183 }
1184 }
1185
1186 mtx_lock(&sc->mfi_io_lock);
1187 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1188 (void **)&ed, sizeof(*ed));
1189 mtx_unlock(&sc->mfi_io_lock);
1190 if (error)
1191 return (error);
1192
1193 dcmd = &cm->cm_frame->dcmd;
1194 ((uint32_t *)&dcmd->mbox)[0] = seq;
1195 ((uint32_t *)&dcmd->mbox)[1] = locale;
1196 cm->cm_flags = MFI_CMD_DATAIN;
1197 cm->cm_complete = mfi_aen_complete;
1198
1199 sc->mfi_aen_cm = cm;
1200
1201 mfi_enqueue_ready(cm);
1202 mfi_startio(sc);
1203
1204 return (0);
1205}
1206
1207static void
1208mfi_aen_complete(struct mfi_command *cm)
1209{
1210 struct mfi_frame_header *hdr;
1211 struct mfi_softc *sc;
1212 struct mfi_evt_detail *detail;
1213 struct mfi_aen *mfi_aen_entry;
1214 int seq = 0, aborted = 0;
1215
1216 sc = cm->cm_sc;
1217 hdr = &cm->cm_frame->header;
1218
1219 if (sc->mfi_aen_cm == NULL)
1220 return;
1221
1222 if (sc->mfi_aen_cm->cm_aen_abort || hdr->cmd_status == 0xff) {
1223 sc->mfi_aen_cm->cm_aen_abort = 0;
1224 aborted = 1;
1225 } else {
1226 sc->mfi_aen_triggered = 1;
1227 if (sc->mfi_poll_waiting)
1228 selwakeup(&sc->mfi_select);
1229 detail = cm->cm_data;
1230 mtx_unlock(&sc->mfi_io_lock);
1231 mfi_decode_evt(sc, detail);
1232 mtx_lock(&sc->mfi_io_lock);
1233 seq = detail->seq + 1;
1234 TAILQ_FOREACH(mfi_aen_entry, &sc->mfi_aen_pids, aen_link) {
1235 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1236 aen_link);
1237 psignal(mfi_aen_entry->p, SIGIO);
1238 free(mfi_aen_entry, M_MFIBUF);
1239 }
1240 }
1241
1242 free(cm->cm_data, M_MFIBUF);
1243 sc->mfi_aen_cm = NULL;
1244 wakeup(&sc->mfi_aen_cm);
1245 mfi_release_command(cm);
1246
1247 /* set it up again so the driver can catch more events */
1248 if (!aborted) {
1249 mtx_unlock(&sc->mfi_io_lock);
1250 mfi_aen_setup(sc, seq);
1251 mtx_lock(&sc->mfi_io_lock);
1252 }
1253}
1254
1255/* Only do one event for now so we can easily iterate through them */
1256#define MAX_EVENTS 1
1257static int
1258mfi_get_entry(struct mfi_softc *sc, int seq)
1259{
1260 struct mfi_command *cm;
1261 struct mfi_dcmd_frame *dcmd;
1262 struct mfi_evt_list *el;
1263 int error;
1264 int i;
1265 int size;
1266
1267 mtx_lock(&sc->mfi_io_lock);
1268 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1269 mtx_unlock(&sc->mfi_io_lock);
1270 return (EBUSY);
1271 }
1272 mtx_unlock(&sc->mfi_io_lock);
1273
1274 size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1275 * (MAX_EVENTS - 1);
1276 el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1277 if (el == NULL) {
1278 mtx_lock(&sc->mfi_io_lock);
1279 mfi_release_command(cm);
1280 mtx_unlock(&sc->mfi_io_lock);
1281 return (ENOMEM);
1282 }
1283
1284 dcmd = &cm->cm_frame->dcmd;
1285 bzero(dcmd->mbox, MFI_MBOX_SIZE);
1286 dcmd->header.cmd = MFI_CMD_DCMD;
1287 dcmd->header.timeout = 0;
1288 dcmd->header.data_len = size;
1289 dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1290 ((uint32_t *)&dcmd->mbox)[0] = seq;
1291 ((uint32_t *)&dcmd->mbox)[1] = MFI_EVT_LOCALE_ALL;
1292 cm->cm_sg = &dcmd->sgl;
1293 cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1294 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1295 cm->cm_data = el;
1296 cm->cm_len = size;
1297
1298 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1299 device_printf(sc->mfi_dev, "Controller info buffer map failed");
1300 free(el, M_MFIBUF);
1301 mfi_release_command(cm);
1302 return (error);
1303 }
1304
1305 if ((error = mfi_polled_command(sc, cm)) != 0) {
1306 device_printf(sc->mfi_dev, "Failed to get controller entry\n");
1308 sc->mfi_max_io = (sc->mfi_total_sgl - 1) * PAGE_SIZE /
1307 sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1309 MFI_SECTOR_LEN;
1310 free(el, M_MFIBUF);
1311 mfi_release_command(cm);
1312 return (0);
1313 }
1314
1315 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1316 BUS_DMASYNC_POSTREAD);
1317 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1318
1319 for (i = 0; i < el->count; i++) {
1320 mfi_decode_evt(sc, &el->event[0]);
1321 }
1322
1323 mtx_lock(&sc->mfi_io_lock);
1324 free(cm->cm_data, M_MFIBUF);
1325 mfi_release_command(cm);
1326 mtx_unlock(&sc->mfi_io_lock);
1327 return (0);
1328}
1329
1330static int
1331mfi_add_ld(struct mfi_softc *sc, int id)
1332{
1333 struct mfi_command *cm;
1334 struct mfi_dcmd_frame *dcmd = NULL;
1335 struct mfi_ld_info *ld_info = NULL;
1336 int error;
1337
1338 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1339
1340 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1341 (void **)&ld_info, sizeof(*ld_info));
1342 if (error) {
1343 device_printf(sc->mfi_dev,
1344 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1345 if (ld_info)
1346 free(ld_info, M_MFIBUF);
1347 return (error);
1348 }
1349 cm->cm_flags = MFI_CMD_DATAIN;
1350 dcmd = &cm->cm_frame->dcmd;
1351 dcmd->mbox[0] = id;
1352 if (mfi_wait_command(sc, cm) != 0) {
1353 device_printf(sc->mfi_dev,
1354 "Failed to get logical drive: %d\n", id);
1355 free(ld_info, M_MFIBUF);
1356 return (0);
1357 }
1358
1359 mfi_add_ld_complete(cm);
1360 return (0);
1361}
1362
1363static void
1364mfi_add_ld_complete(struct mfi_command *cm)
1365{
1366 struct mfi_frame_header *hdr;
1367 struct mfi_ld_info *ld_info;
1368 struct mfi_softc *sc;
1369 struct mfi_ld *ld;
1370 device_t child;
1371
1372 sc = cm->cm_sc;
1373 hdr = &cm->cm_frame->header;
1374 ld_info = cm->cm_private;
1375
1376 if (hdr->cmd_status != MFI_STAT_OK) {
1377 free(ld_info, M_MFIBUF);
1378 mfi_release_command(cm);
1379 return;
1380 }
1381 mfi_release_command(cm);
1382
1383 ld = malloc(sizeof(struct mfi_ld), M_MFIBUF, M_NOWAIT|M_ZERO);
1384 if (ld == NULL) {
1385 device_printf(sc->mfi_dev, "Cannot allocate ld\n");
1386 free(ld_info, M_MFIBUF);
1387 return;
1388 }
1389
1390 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1391 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1392 free(ld, M_MFIBUF);
1393 free(ld_info, M_MFIBUF);
1394 return;
1395 }
1396
1397 ld->ld_id = ld_info->ld_config.properties.ld.target_id;
1398 ld->ld_disk = child;
1399 ld->ld_info = ld_info;
1400
1401 device_set_ivars(child, ld);
1402 device_set_desc(child, "MFI Logical Disk");
1403 mtx_unlock(&sc->mfi_io_lock);
1404 mtx_lock(&Giant);
1405 bus_generic_attach(sc->mfi_dev);
1406 mtx_unlock(&Giant);
1407 mtx_lock(&sc->mfi_io_lock);
1408}
1409
1410static struct mfi_command *
1411mfi_bio_command(struct mfi_softc *sc)
1412{
1413 struct mfi_io_frame *io;
1414 struct mfi_command *cm;
1415 struct bio *bio;
1416 int flags, blkcount;
1417
1418 if ((cm = mfi_dequeue_free(sc)) == NULL)
1419 return (NULL);
1420
1421 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1422 mfi_release_command(cm);
1423 return (NULL);
1424 }
1425
1426 io = &cm->cm_frame->io;
1427 switch (bio->bio_cmd & 0x03) {
1428 case BIO_READ:
1429 io->header.cmd = MFI_CMD_LD_READ;
1430 flags = MFI_CMD_DATAIN;
1431 break;
1432 case BIO_WRITE:
1433 io->header.cmd = MFI_CMD_LD_WRITE;
1434 flags = MFI_CMD_DATAOUT;
1435 break;
1436 default:
1437 panic("Invalid bio command");
1438 }
1439
1440 /* Cheat with the sector length to avoid a non-constant division */
1441 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1442 io->header.target_id = (uintptr_t)bio->bio_driver1;
1443 io->header.timeout = 0;
1444 io->header.flags = 0;
1445 io->header.sense_len = MFI_SENSE_LEN;
1446 io->header.data_len = blkcount;
1447 io->sense_addr_lo = cm->cm_sense_busaddr;
1448 io->sense_addr_hi = 0;
1449 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
1450 io->lba_lo = bio->bio_pblkno & 0xffffffff;
1451 cm->cm_complete = mfi_bio_complete;
1452 cm->cm_private = bio;
1453 cm->cm_data = bio->bio_data;
1454 cm->cm_len = bio->bio_bcount;
1455 cm->cm_sg = &io->sgl;
1456 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1457 cm->cm_flags = flags;
1458
1459 return (cm);
1460}
1461
1462static void
1463mfi_bio_complete(struct mfi_command *cm)
1464{
1465 struct bio *bio;
1466 struct mfi_frame_header *hdr;
1467 struct mfi_softc *sc;
1468
1469 bio = cm->cm_private;
1470 hdr = &cm->cm_frame->header;
1471 sc = cm->cm_sc;
1472
1473 if ((hdr->cmd_status != 0) || (hdr->scsi_status != 0)) {
1474 bio->bio_flags |= BIO_ERROR;
1475 bio->bio_error = EIO;
1476 device_printf(sc->mfi_dev, "I/O error, status= %d "
1477 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1478 mfi_print_sense(cm->cm_sc, cm->cm_sense);
1479 }
1480
1481 mfi_release_command(cm);
1482 mfi_disk_complete(bio);
1483}
1484
1485void
1486mfi_startio(struct mfi_softc *sc)
1487{
1488 struct mfi_command *cm;
1489
1490 for (;;) {
1491 /* Don't bother if we're short on resources */
1492 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1493 break;
1494
1495 /* Try a command that has already been prepared */
1496 cm = mfi_dequeue_ready(sc);
1497
1498 /* Nope, so look for work on the bioq */
1499 if (cm == NULL)
1500 cm = mfi_bio_command(sc);
1501
1502 /* No work available, so exit */
1503 if (cm == NULL)
1504 break;
1505
1506 /* Send the command to the controller */
1507 if (mfi_mapcmd(sc, cm) != 0) {
1508 mfi_requeue_ready(cm);
1509 break;
1510 }
1511 }
1512}
1513
1514static int
1515mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1516{
1517 int error, polled;
1518
1519 if (cm->cm_data != NULL) {
1520 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1521 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1522 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1523 if (error == EINPROGRESS) {
1524 sc->mfi_flags |= MFI_FLAGS_QFRZN;
1525 return (0);
1526 }
1527 } else {
1528 mfi_enqueue_busy(cm);
1529 error = mfi_send_frame(sc, cm);
1530 }
1531
1532 return (error);
1533}
1534
1535static void
1536mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1537{
1538 struct mfi_frame_header *hdr;
1539 struct mfi_command *cm;
1540 union mfi_sgl *sgl;
1541 struct mfi_softc *sc;
1542 int i, dir;
1543
1544 if (error)
1545 return;
1546
1547 cm = (struct mfi_command *)arg;
1548 sc = cm->cm_sc;
1549 hdr = &cm->cm_frame->header;
1550 sgl = cm->cm_sg;
1551
1552 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1553 for (i = 0; i < nsegs; i++) {
1554 sgl->sg32[i].addr = segs[i].ds_addr;
1555 sgl->sg32[i].len = segs[i].ds_len;
1556 }
1557 } else {
1558 for (i = 0; i < nsegs; i++) {
1559 sgl->sg64[i].addr = segs[i].ds_addr;
1560 sgl->sg64[i].len = segs[i].ds_len;
1561 }
1562 hdr->flags |= MFI_FRAME_SGL64;
1563 }
1564 hdr->sg_count = nsegs;
1565
1566 dir = 0;
1567 if (cm->cm_flags & MFI_CMD_DATAIN) {
1568 dir |= BUS_DMASYNC_PREREAD;
1569 hdr->flags |= MFI_FRAME_DIR_READ;
1570 }
1571 if (cm->cm_flags & MFI_CMD_DATAOUT) {
1572 dir |= BUS_DMASYNC_PREWRITE;
1573 hdr->flags |= MFI_FRAME_DIR_WRITE;
1574 }
1575 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1576 cm->cm_flags |= MFI_CMD_MAPPED;
1577
1578 /*
1579 * Instead of calculating the total number of frames in the
1580 * compound frame, it's already assumed that there will be at
1581 * least 1 frame, so don't compensate for the modulo of the
1582 * following division.
1583 */
1308 MFI_SECTOR_LEN;
1309 free(el, M_MFIBUF);
1310 mfi_release_command(cm);
1311 return (0);
1312 }
1313
1314 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1315 BUS_DMASYNC_POSTREAD);
1316 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1317
1318 for (i = 0; i < el->count; i++) {
1319 mfi_decode_evt(sc, &el->event[0]);
1320 }
1321
1322 mtx_lock(&sc->mfi_io_lock);
1323 free(cm->cm_data, M_MFIBUF);
1324 mfi_release_command(cm);
1325 mtx_unlock(&sc->mfi_io_lock);
1326 return (0);
1327}
1328
1329static int
1330mfi_add_ld(struct mfi_softc *sc, int id)
1331{
1332 struct mfi_command *cm;
1333 struct mfi_dcmd_frame *dcmd = NULL;
1334 struct mfi_ld_info *ld_info = NULL;
1335 int error;
1336
1337 mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1338
1339 error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1340 (void **)&ld_info, sizeof(*ld_info));
1341 if (error) {
1342 device_printf(sc->mfi_dev,
1343 "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1344 if (ld_info)
1345 free(ld_info, M_MFIBUF);
1346 return (error);
1347 }
1348 cm->cm_flags = MFI_CMD_DATAIN;
1349 dcmd = &cm->cm_frame->dcmd;
1350 dcmd->mbox[0] = id;
1351 if (mfi_wait_command(sc, cm) != 0) {
1352 device_printf(sc->mfi_dev,
1353 "Failed to get logical drive: %d\n", id);
1354 free(ld_info, M_MFIBUF);
1355 return (0);
1356 }
1357
1358 mfi_add_ld_complete(cm);
1359 return (0);
1360}
1361
1362static void
1363mfi_add_ld_complete(struct mfi_command *cm)
1364{
1365 struct mfi_frame_header *hdr;
1366 struct mfi_ld_info *ld_info;
1367 struct mfi_softc *sc;
1368 struct mfi_ld *ld;
1369 device_t child;
1370
1371 sc = cm->cm_sc;
1372 hdr = &cm->cm_frame->header;
1373 ld_info = cm->cm_private;
1374
1375 if (hdr->cmd_status != MFI_STAT_OK) {
1376 free(ld_info, M_MFIBUF);
1377 mfi_release_command(cm);
1378 return;
1379 }
1380 mfi_release_command(cm);
1381
1382 ld = malloc(sizeof(struct mfi_ld), M_MFIBUF, M_NOWAIT|M_ZERO);
1383 if (ld == NULL) {
1384 device_printf(sc->mfi_dev, "Cannot allocate ld\n");
1385 free(ld_info, M_MFIBUF);
1386 return;
1387 }
1388
1389 if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1390 device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1391 free(ld, M_MFIBUF);
1392 free(ld_info, M_MFIBUF);
1393 return;
1394 }
1395
1396 ld->ld_id = ld_info->ld_config.properties.ld.target_id;
1397 ld->ld_disk = child;
1398 ld->ld_info = ld_info;
1399
1400 device_set_ivars(child, ld);
1401 device_set_desc(child, "MFI Logical Disk");
1402 mtx_unlock(&sc->mfi_io_lock);
1403 mtx_lock(&Giant);
1404 bus_generic_attach(sc->mfi_dev);
1405 mtx_unlock(&Giant);
1406 mtx_lock(&sc->mfi_io_lock);
1407}
1408
1409static struct mfi_command *
1410mfi_bio_command(struct mfi_softc *sc)
1411{
1412 struct mfi_io_frame *io;
1413 struct mfi_command *cm;
1414 struct bio *bio;
1415 int flags, blkcount;
1416
1417 if ((cm = mfi_dequeue_free(sc)) == NULL)
1418 return (NULL);
1419
1420 if ((bio = mfi_dequeue_bio(sc)) == NULL) {
1421 mfi_release_command(cm);
1422 return (NULL);
1423 }
1424
1425 io = &cm->cm_frame->io;
1426 switch (bio->bio_cmd & 0x03) {
1427 case BIO_READ:
1428 io->header.cmd = MFI_CMD_LD_READ;
1429 flags = MFI_CMD_DATAIN;
1430 break;
1431 case BIO_WRITE:
1432 io->header.cmd = MFI_CMD_LD_WRITE;
1433 flags = MFI_CMD_DATAOUT;
1434 break;
1435 default:
1436 panic("Invalid bio command");
1437 }
1438
1439 /* Cheat with the sector length to avoid a non-constant division */
1440 blkcount = (bio->bio_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1441 io->header.target_id = (uintptr_t)bio->bio_driver1;
1442 io->header.timeout = 0;
1443 io->header.flags = 0;
1444 io->header.sense_len = MFI_SENSE_LEN;
1445 io->header.data_len = blkcount;
1446 io->sense_addr_lo = cm->cm_sense_busaddr;
1447 io->sense_addr_hi = 0;
1448 io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
1449 io->lba_lo = bio->bio_pblkno & 0xffffffff;
1450 cm->cm_complete = mfi_bio_complete;
1451 cm->cm_private = bio;
1452 cm->cm_data = bio->bio_data;
1453 cm->cm_len = bio->bio_bcount;
1454 cm->cm_sg = &io->sgl;
1455 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1456 cm->cm_flags = flags;
1457
1458 return (cm);
1459}
1460
1461static void
1462mfi_bio_complete(struct mfi_command *cm)
1463{
1464 struct bio *bio;
1465 struct mfi_frame_header *hdr;
1466 struct mfi_softc *sc;
1467
1468 bio = cm->cm_private;
1469 hdr = &cm->cm_frame->header;
1470 sc = cm->cm_sc;
1471
1472 if ((hdr->cmd_status != 0) || (hdr->scsi_status != 0)) {
1473 bio->bio_flags |= BIO_ERROR;
1474 bio->bio_error = EIO;
1475 device_printf(sc->mfi_dev, "I/O error, status= %d "
1476 "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
1477 mfi_print_sense(cm->cm_sc, cm->cm_sense);
1478 }
1479
1480 mfi_release_command(cm);
1481 mfi_disk_complete(bio);
1482}
1483
1484void
1485mfi_startio(struct mfi_softc *sc)
1486{
1487 struct mfi_command *cm;
1488
1489 for (;;) {
1490 /* Don't bother if we're short on resources */
1491 if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1492 break;
1493
1494 /* Try a command that has already been prepared */
1495 cm = mfi_dequeue_ready(sc);
1496
1497 /* Nope, so look for work on the bioq */
1498 if (cm == NULL)
1499 cm = mfi_bio_command(sc);
1500
1501 /* No work available, so exit */
1502 if (cm == NULL)
1503 break;
1504
1505 /* Send the command to the controller */
1506 if (mfi_mapcmd(sc, cm) != 0) {
1507 mfi_requeue_ready(cm);
1508 break;
1509 }
1510 }
1511}
1512
1513static int
1514mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
1515{
1516 int error, polled;
1517
1518 if (cm->cm_data != NULL) {
1519 polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
1520 error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
1521 cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
1522 if (error == EINPROGRESS) {
1523 sc->mfi_flags |= MFI_FLAGS_QFRZN;
1524 return (0);
1525 }
1526 } else {
1527 mfi_enqueue_busy(cm);
1528 error = mfi_send_frame(sc, cm);
1529 }
1530
1531 return (error);
1532}
1533
1534static void
1535mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1536{
1537 struct mfi_frame_header *hdr;
1538 struct mfi_command *cm;
1539 union mfi_sgl *sgl;
1540 struct mfi_softc *sc;
1541 int i, dir;
1542
1543 if (error)
1544 return;
1545
1546 cm = (struct mfi_command *)arg;
1547 sc = cm->cm_sc;
1548 hdr = &cm->cm_frame->header;
1549 sgl = cm->cm_sg;
1550
1551 if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
1552 for (i = 0; i < nsegs; i++) {
1553 sgl->sg32[i].addr = segs[i].ds_addr;
1554 sgl->sg32[i].len = segs[i].ds_len;
1555 }
1556 } else {
1557 for (i = 0; i < nsegs; i++) {
1558 sgl->sg64[i].addr = segs[i].ds_addr;
1559 sgl->sg64[i].len = segs[i].ds_len;
1560 }
1561 hdr->flags |= MFI_FRAME_SGL64;
1562 }
1563 hdr->sg_count = nsegs;
1564
1565 dir = 0;
1566 if (cm->cm_flags & MFI_CMD_DATAIN) {
1567 dir |= BUS_DMASYNC_PREREAD;
1568 hdr->flags |= MFI_FRAME_DIR_READ;
1569 }
1570 if (cm->cm_flags & MFI_CMD_DATAOUT) {
1571 dir |= BUS_DMASYNC_PREWRITE;
1572 hdr->flags |= MFI_FRAME_DIR_WRITE;
1573 }
1574 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1575 cm->cm_flags |= MFI_CMD_MAPPED;
1576
1577 /*
1578 * Instead of calculating the total number of frames in the
1579 * compound frame, it's already assumed that there will be at
1580 * least 1 frame, so don't compensate for the modulo of the
1581 * following division.
1582 */
1584 cm->cm_total_frame_size += (sc->mfi_sgsize * nsegs);
1583 cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
1585 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1586
1587 /* The caller will take care of delivering polled commands */
1588 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1589 mfi_enqueue_busy(cm);
1590 mfi_send_frame(sc, cm);
1591 }
1592
1593 return;
1594}
1595
1596static int
1597mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1598{
1599
1600 /*
1601 * The bus address of the command is aligned on a 64 byte boundary,
1602 * leaving the least 6 bits as zero. For whatever reason, the
1603 * hardware wants the address shifted right by three, leaving just
1584 cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
1585
1586 /* The caller will take care of delivering polled commands */
1587 if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1588 mfi_enqueue_busy(cm);
1589 mfi_send_frame(sc, cm);
1590 }
1591
1592 return;
1593}
1594
1595static int
1596mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1597{
1598
1599 /*
1600 * The bus address of the command is aligned on a 64 byte boundary,
1601 * leaving the least 6 bits as zero. For whatever reason, the
1602 * hardware wants the address shifted right by three, leaving just
1604 * 3 zero bits. These three bits are then used to indicate how many
1605 * 64 byte frames beyond the first one are used in the command. The
1606 * extra frames are typically filled with S/G elements. The extra
1607 * frames must also be contiguous. Thus, a compound frame can be at
1608 * most 512 bytes long, allowing for up to 59 32-bit S/G elements or
1609 * 39 64-bit S/G elements for block I/O commands. This means that
1610 * I/O transfers of 256k and higher simply are not possible, which
1611 * is quite odd for such a modern adapter.
1603 * 3 zero bits. These three bits are then used as a prefetching
1604 * hint for the hardware to predict how many frames need to be
1605 * fetched across the bus. If a command has more than 8 frames
1606 * then the 3 bits are set to 0x7 and the firmware uses other
1607 * information in the command to determine the total amount to fetch.
1608 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
1609 * is enough for both 32bit and 64bit systems.
1612 */
1610 */
1611 if (cm->cm_extra_frames > 7)
1612 cm->cm_extra_frames = 7;
1613
1613 MFI_WRITE4(sc, MFI_IQP, (cm->cm_frame_busaddr >> 3) |
1614 cm->cm_extra_frames);
1615 return (0);
1616}
1617
1618static void
1619mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
1620{
1621 int dir;
1622
1623 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
1624 dir = 0;
1625 if (cm->cm_flags & MFI_CMD_DATAIN)
1626 dir |= BUS_DMASYNC_POSTREAD;
1627 if (cm->cm_flags & MFI_CMD_DATAOUT)
1628 dir |= BUS_DMASYNC_POSTWRITE;
1629
1630 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1631 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1632 cm->cm_flags &= ~MFI_CMD_MAPPED;
1633 }
1634
1635 if (cm->cm_complete != NULL)
1636 cm->cm_complete(cm);
1637 else
1638 wakeup(cm);
1639
1640 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1641 mfi_startio(sc);
1642}
1643
1644static int
1645mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
1646{
1647 struct mfi_command *cm;
1648 struct mfi_abort_frame *abort;
1649
1650 mtx_lock(&sc->mfi_io_lock);
1651 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1652 mtx_unlock(&sc->mfi_io_lock);
1653 return (EBUSY);
1654 }
1655 mtx_unlock(&sc->mfi_io_lock);
1656
1657 abort = &cm->cm_frame->abort;
1658 abort->header.cmd = MFI_CMD_ABORT;
1659 abort->header.flags = 0;
1660 abort->abort_context = cm_abort->cm_frame->header.context;
1661 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
1662 abort->abort_mfi_addr_hi = 0;
1663 cm->cm_data = NULL;
1664
1665 sc->mfi_aen_cm->cm_aen_abort = 1;
1666 mfi_mapcmd(sc, cm);
1667 mfi_polled_command(sc, cm);
1668 mtx_lock(&sc->mfi_io_lock);
1669 mfi_release_command(cm);
1670 mtx_unlock(&sc->mfi_io_lock);
1671
1672 while (sc->mfi_aen_cm != NULL) {
1673 tsleep(&sc->mfi_aen_cm, 0, "mfiabort", 5 * hz);
1674 }
1675
1676 return (0);
1677}
1678
1679int
1680mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
1681{
1682 struct mfi_command *cm;
1683 struct mfi_io_frame *io;
1684 int error;
1685
1686 if ((cm = mfi_dequeue_free(sc)) == NULL)
1687 return (EBUSY);
1688
1689 io = &cm->cm_frame->io;
1690 io->header.cmd = MFI_CMD_LD_WRITE;
1691 io->header.target_id = id;
1692 io->header.timeout = 0;
1693 io->header.flags = 0;
1694 io->header.sense_len = MFI_SENSE_LEN;
1695 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1696 io->sense_addr_lo = cm->cm_sense_busaddr;
1697 io->sense_addr_hi = 0;
1698 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
1699 io->lba_lo = lba & 0xffffffff;
1700 cm->cm_data = virt;
1701 cm->cm_len = len;
1702 cm->cm_sg = &io->sgl;
1703 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1704 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
1705
1706 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1707 mfi_release_command(cm);
1708 return (error);
1709 }
1710
1711 error = mfi_polled_command(sc, cm);
1712 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1713 BUS_DMASYNC_POSTWRITE);
1714 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1715 mfi_release_command(cm);
1716
1717 return (error);
1718}
1719
1720static int
1721mfi_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1722{
1723 struct mfi_softc *sc;
1724
1725 sc = dev->si_drv1;
1726 sc->mfi_flags |= MFI_FLAGS_OPEN;
1727
1728 return (0);
1729}
1730
1731static int
1732mfi_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1733{
1734 struct mfi_softc *sc;
1735 struct mfi_aen *mfi_aen_entry;
1736
1737 sc = dev->si_drv1;
1738 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
1739
1740 TAILQ_FOREACH(mfi_aen_entry, &sc->mfi_aen_pids, aen_link) {
1741 if (mfi_aen_entry->p == curproc) {
1742 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1743 aen_link);
1744 free(mfi_aen_entry, M_MFIBUF);
1745 }
1746 }
1747 return (0);
1748}
1749
1750static int
1751mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1752{
1753 struct mfi_softc *sc;
1754 union mfi_statrequest *ms;
1755 int error;
1756
1757 sc = dev->si_drv1;
1758 error = 0;
1759
1760 switch (cmd) {
1761 case MFIIO_STATS:
1762 ms = (union mfi_statrequest *)arg;
1763 switch (ms->ms_item) {
1764 case MFIQ_FREE:
1765 case MFIQ_BIO:
1766 case MFIQ_READY:
1767 case MFIQ_BUSY:
1768 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
1769 sizeof(struct mfi_qstat));
1770 break;
1771 default:
1772 error = ENOIOCTL;
1773 break;
1774 }
1775 break;
1776 case 0xc1144d01: /* Firmware Linux ioctl shim */
1777 {
1778 devclass_t devclass;
1779 struct mfi_linux_ioc_packet l_ioc;
1780 int adapter;
1781
1782 devclass = devclass_find("mfi");
1783 if (devclass == NULL)
1784 return (ENOENT);
1785
1786 error = copyin(arg, &l_ioc, sizeof(l_ioc));
1787 if (error)
1788 return (error);
1789 adapter = l_ioc.lioc_adapter_no;
1790 sc = devclass_get_softc(devclass, adapter);
1791 if (sc == NULL)
1792 return (ENOENT);
1793 return (mfi_linux_ioctl_int(sc->mfi_cdev,
1794 cmd, arg, flag, td));
1795 break;
1796 }
1797 case 0x400c4d03: /* AEN Linux ioctl shim */
1798 {
1799 devclass_t devclass;
1800 struct mfi_linux_ioc_aen l_aen;
1801 int adapter;
1802
1803 devclass = devclass_find("mfi");
1804 if (devclass == NULL)
1805 return (ENOENT);
1806
1807 error = copyin(arg, &l_aen, sizeof(l_aen));
1808 if (error)
1809 return (error);
1810 adapter = l_aen.laen_adapter_no;
1811 sc = devclass_get_softc(devclass, adapter);
1812 if (sc == NULL)
1813 return (ENOENT);
1814 return (mfi_linux_ioctl_int(sc->mfi_cdev,
1815 cmd, arg, flag, td));
1816 break;
1817 }
1818 default:
1819 error = ENOENT;
1820 break;
1821 }
1822
1823 return (error);
1824}
1825
1826static int
1827mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1828{
1829 struct mfi_softc *sc;
1830 struct mfi_linux_ioc_packet l_ioc;
1831 struct mfi_linux_ioc_aen l_aen;
1832 struct mfi_command *cm = NULL;
1833 struct mfi_aen *mfi_aen_entry;
1834 uint32_t *sense_ptr;
1835 uint32_t context;
1836 uint8_t *data = NULL, *temp;
1837 int i;
1838 int error;
1839
1840 sc = dev->si_drv1;
1841 error = 0;
1842 switch (cmd) {
1843 case 0xc1144d01: /* Firmware Linux ioctl shim */
1844 error = copyin(arg, &l_ioc, sizeof(l_ioc));
1845 if (error != 0)
1846 return (error);
1847
1848 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
1849 return (EINVAL);
1850 }
1851
1852 mtx_lock(&sc->mfi_io_lock);
1853 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1854 mtx_unlock(&sc->mfi_io_lock);
1855 return (EBUSY);
1856 }
1857 mtx_unlock(&sc->mfi_io_lock);
1858
1859 /*
1860 * save off original context since copying from user
1861 * will clobber some data
1862 */
1863 context = cm->cm_frame->header.context;
1864
1865 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
1866 l_ioc.lioc_sgl_off); /* Linux can do 2 frames ? */
1867 cm->cm_total_frame_size = l_ioc.lioc_sgl_off;
1868 cm->cm_sg =
1869 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
1870 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT
1871 | MFI_CMD_POLLED;
1872 cm->cm_len = cm->cm_frame->header.data_len;
1873 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
1874 M_WAITOK | M_ZERO);
1875
1876 /* restore header context */
1877 cm->cm_frame->header.context = context;
1878
1879 temp = data;
1880 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
1881 error = copyin(l_ioc.lioc_sgl[i].iov_base,
1882 temp,
1883 l_ioc.lioc_sgl[i].iov_len);
1884 if (error != 0) {
1885 device_printf(sc->mfi_dev,
1886 "Copy in failed");
1887 goto out;
1888 }
1889 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
1890 }
1891
1892 if (l_ioc.lioc_sense_len) {
1893 sense_ptr =
1894 (void *)&cm->cm_frame->bytes[l_ioc.lioc_sense_off];
1895 *sense_ptr = cm->cm_sense_busaddr;
1896 }
1897
1898 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1899 device_printf(sc->mfi_dev,
1900 "Controller info buffer map failed");
1901 goto out;
1902 }
1903
1904 if ((error = mfi_polled_command(sc, cm)) != 0) {
1905 device_printf(sc->mfi_dev,
1906 "Controller polled failed");
1907 goto out;
1908 }
1909
1910 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1911 BUS_DMASYNC_POSTREAD);
1912 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1913
1914 temp = data;
1915 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
1916 error = copyout(temp,
1917 l_ioc.lioc_sgl[i].iov_base,
1918 l_ioc.lioc_sgl[i].iov_len);
1919 if (error != 0) {
1920 device_printf(sc->mfi_dev,
1921 "Copy out failed");
1922 goto out;
1923 }
1924 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
1925 }
1926
1927 if (l_ioc.lioc_sense_len) {
1928 /* copy out sense */
1929 sense_ptr = (void *)
1930 &l_ioc.lioc_frame.raw[l_ioc.lioc_sense_off];
1931 temp = 0;
1932 temp += cm->cm_sense_busaddr;
1933 error = copyout(temp, sense_ptr,
1934 l_ioc.lioc_sense_len);
1935 if (error != 0) {
1936 device_printf(sc->mfi_dev,
1937 "Copy out failed");
1938 goto out;
1939 }
1940 }
1941
1942 error = copyout(&cm->cm_frame->header.cmd_status,
1943 &((struct mfi_linux_ioc_packet*)arg)
1944 ->lioc_frame.hdr.cmd_status,
1945 1);
1946 if (error != 0) {
1947 device_printf(sc->mfi_dev,
1948 "Copy out failed");
1949 goto out;
1950 }
1951
1952out:
1953 if (data)
1954 free(data, M_MFIBUF);
1955 if (cm) {
1956 mtx_lock(&sc->mfi_io_lock);
1957 mfi_release_command(cm);
1958 mtx_unlock(&sc->mfi_io_lock);
1959 }
1960
1961 return (error);
1962 case 0x400c4d03: /* AEN Linux ioctl shim */
1963 error = copyin(arg, &l_aen, sizeof(l_aen));
1964 if (error != 0)
1965 return (error);
1966 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
1967 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
1968 M_WAITOK);
1969 if (mfi_aen_entry != NULL) {
1970 mfi_aen_entry->p = curproc;
1971 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
1972 aen_link);
1973 }
1974 error = mfi_aen_register(sc, l_aen.laen_seq_num,
1975 l_aen.laen_class_locale);
1976
1977 if (error != 0) {
1978 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1979 aen_link);
1980 free(mfi_aen_entry, M_MFIBUF);
1981 }
1982
1983 return (error);
1984 default:
1985 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
1986 error = ENOENT;
1987 break;
1988 }
1989
1990 return (error);
1991}
1992
1993static int
1994mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
1995{
1996 struct mfi_softc *sc;
1997 int revents = 0;
1998
1999 sc = dev->si_drv1;
2000
2001 if (poll_events & (POLLIN | POLLRDNORM)) {
2002 if (sc->mfi_aen_triggered != 0)
2003 revents |= poll_events & (POLLIN | POLLRDNORM);
2004 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
2005 revents |= POLLERR;
2006 }
2007 }
2008
2009 if (revents == 0) {
2010 if (poll_events & (POLLIN | POLLRDNORM)) {
2011 sc->mfi_poll_waiting = 1;
2012 selrecord(td, &sc->mfi_select);
2013 sc->mfi_poll_waiting = 0;
2014 }
2015 }
2016
2017 return revents;
2018}
1614 MFI_WRITE4(sc, MFI_IQP, (cm->cm_frame_busaddr >> 3) |
1615 cm->cm_extra_frames);
1616 return (0);
1617}
1618
1619static void
1620mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
1621{
1622 int dir;
1623
1624 if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
1625 dir = 0;
1626 if (cm->cm_flags & MFI_CMD_DATAIN)
1627 dir |= BUS_DMASYNC_POSTREAD;
1628 if (cm->cm_flags & MFI_CMD_DATAOUT)
1629 dir |= BUS_DMASYNC_POSTWRITE;
1630
1631 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
1632 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1633 cm->cm_flags &= ~MFI_CMD_MAPPED;
1634 }
1635
1636 if (cm->cm_complete != NULL)
1637 cm->cm_complete(cm);
1638 else
1639 wakeup(cm);
1640
1641 sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1642 mfi_startio(sc);
1643}
1644
1645static int
1646mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
1647{
1648 struct mfi_command *cm;
1649 struct mfi_abort_frame *abort;
1650
1651 mtx_lock(&sc->mfi_io_lock);
1652 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1653 mtx_unlock(&sc->mfi_io_lock);
1654 return (EBUSY);
1655 }
1656 mtx_unlock(&sc->mfi_io_lock);
1657
1658 abort = &cm->cm_frame->abort;
1659 abort->header.cmd = MFI_CMD_ABORT;
1660 abort->header.flags = 0;
1661 abort->abort_context = cm_abort->cm_frame->header.context;
1662 abort->abort_mfi_addr_lo = cm_abort->cm_frame_busaddr;
1663 abort->abort_mfi_addr_hi = 0;
1664 cm->cm_data = NULL;
1665
1666 sc->mfi_aen_cm->cm_aen_abort = 1;
1667 mfi_mapcmd(sc, cm);
1668 mfi_polled_command(sc, cm);
1669 mtx_lock(&sc->mfi_io_lock);
1670 mfi_release_command(cm);
1671 mtx_unlock(&sc->mfi_io_lock);
1672
1673 while (sc->mfi_aen_cm != NULL) {
1674 tsleep(&sc->mfi_aen_cm, 0, "mfiabort", 5 * hz);
1675 }
1676
1677 return (0);
1678}
1679
1680int
1681mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt, int len)
1682{
1683 struct mfi_command *cm;
1684 struct mfi_io_frame *io;
1685 int error;
1686
1687 if ((cm = mfi_dequeue_free(sc)) == NULL)
1688 return (EBUSY);
1689
1690 io = &cm->cm_frame->io;
1691 io->header.cmd = MFI_CMD_LD_WRITE;
1692 io->header.target_id = id;
1693 io->header.timeout = 0;
1694 io->header.flags = 0;
1695 io->header.sense_len = MFI_SENSE_LEN;
1696 io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
1697 io->sense_addr_lo = cm->cm_sense_busaddr;
1698 io->sense_addr_hi = 0;
1699 io->lba_hi = (lba & 0xffffffff00000000) >> 32;
1700 io->lba_lo = lba & 0xffffffff;
1701 cm->cm_data = virt;
1702 cm->cm_len = len;
1703 cm->cm_sg = &io->sgl;
1704 cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
1705 cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
1706
1707 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1708 mfi_release_command(cm);
1709 return (error);
1710 }
1711
1712 error = mfi_polled_command(sc, cm);
1713 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1714 BUS_DMASYNC_POSTWRITE);
1715 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1716 mfi_release_command(cm);
1717
1718 return (error);
1719}
1720
1721static int
1722mfi_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1723{
1724 struct mfi_softc *sc;
1725
1726 sc = dev->si_drv1;
1727 sc->mfi_flags |= MFI_FLAGS_OPEN;
1728
1729 return (0);
1730}
1731
1732static int
1733mfi_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
1734{
1735 struct mfi_softc *sc;
1736 struct mfi_aen *mfi_aen_entry;
1737
1738 sc = dev->si_drv1;
1739 sc->mfi_flags &= ~MFI_FLAGS_OPEN;
1740
1741 TAILQ_FOREACH(mfi_aen_entry, &sc->mfi_aen_pids, aen_link) {
1742 if (mfi_aen_entry->p == curproc) {
1743 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1744 aen_link);
1745 free(mfi_aen_entry, M_MFIBUF);
1746 }
1747 }
1748 return (0);
1749}
1750
1751static int
1752mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1753{
1754 struct mfi_softc *sc;
1755 union mfi_statrequest *ms;
1756 int error;
1757
1758 sc = dev->si_drv1;
1759 error = 0;
1760
1761 switch (cmd) {
1762 case MFIIO_STATS:
1763 ms = (union mfi_statrequest *)arg;
1764 switch (ms->ms_item) {
1765 case MFIQ_FREE:
1766 case MFIQ_BIO:
1767 case MFIQ_READY:
1768 case MFIQ_BUSY:
1769 bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
1770 sizeof(struct mfi_qstat));
1771 break;
1772 default:
1773 error = ENOIOCTL;
1774 break;
1775 }
1776 break;
1777 case 0xc1144d01: /* Firmware Linux ioctl shim */
1778 {
1779 devclass_t devclass;
1780 struct mfi_linux_ioc_packet l_ioc;
1781 int adapter;
1782
1783 devclass = devclass_find("mfi");
1784 if (devclass == NULL)
1785 return (ENOENT);
1786
1787 error = copyin(arg, &l_ioc, sizeof(l_ioc));
1788 if (error)
1789 return (error);
1790 adapter = l_ioc.lioc_adapter_no;
1791 sc = devclass_get_softc(devclass, adapter);
1792 if (sc == NULL)
1793 return (ENOENT);
1794 return (mfi_linux_ioctl_int(sc->mfi_cdev,
1795 cmd, arg, flag, td));
1796 break;
1797 }
1798 case 0x400c4d03: /* AEN Linux ioctl shim */
1799 {
1800 devclass_t devclass;
1801 struct mfi_linux_ioc_aen l_aen;
1802 int adapter;
1803
1804 devclass = devclass_find("mfi");
1805 if (devclass == NULL)
1806 return (ENOENT);
1807
1808 error = copyin(arg, &l_aen, sizeof(l_aen));
1809 if (error)
1810 return (error);
1811 adapter = l_aen.laen_adapter_no;
1812 sc = devclass_get_softc(devclass, adapter);
1813 if (sc == NULL)
1814 return (ENOENT);
1815 return (mfi_linux_ioctl_int(sc->mfi_cdev,
1816 cmd, arg, flag, td));
1817 break;
1818 }
1819 default:
1820 error = ENOENT;
1821 break;
1822 }
1823
1824 return (error);
1825}
1826
1827static int
1828mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1829{
1830 struct mfi_softc *sc;
1831 struct mfi_linux_ioc_packet l_ioc;
1832 struct mfi_linux_ioc_aen l_aen;
1833 struct mfi_command *cm = NULL;
1834 struct mfi_aen *mfi_aen_entry;
1835 uint32_t *sense_ptr;
1836 uint32_t context;
1837 uint8_t *data = NULL, *temp;
1838 int i;
1839 int error;
1840
1841 sc = dev->si_drv1;
1842 error = 0;
1843 switch (cmd) {
1844 case 0xc1144d01: /* Firmware Linux ioctl shim */
1845 error = copyin(arg, &l_ioc, sizeof(l_ioc));
1846 if (error != 0)
1847 return (error);
1848
1849 if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
1850 return (EINVAL);
1851 }
1852
1853 mtx_lock(&sc->mfi_io_lock);
1854 if ((cm = mfi_dequeue_free(sc)) == NULL) {
1855 mtx_unlock(&sc->mfi_io_lock);
1856 return (EBUSY);
1857 }
1858 mtx_unlock(&sc->mfi_io_lock);
1859
1860 /*
1861 * save off original context since copying from user
1862 * will clobber some data
1863 */
1864 context = cm->cm_frame->header.context;
1865
1866 bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
1867 l_ioc.lioc_sgl_off); /* Linux can do 2 frames ? */
1868 cm->cm_total_frame_size = l_ioc.lioc_sgl_off;
1869 cm->cm_sg =
1870 (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
1871 cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT
1872 | MFI_CMD_POLLED;
1873 cm->cm_len = cm->cm_frame->header.data_len;
1874 cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
1875 M_WAITOK | M_ZERO);
1876
1877 /* restore header context */
1878 cm->cm_frame->header.context = context;
1879
1880 temp = data;
1881 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
1882 error = copyin(l_ioc.lioc_sgl[i].iov_base,
1883 temp,
1884 l_ioc.lioc_sgl[i].iov_len);
1885 if (error != 0) {
1886 device_printf(sc->mfi_dev,
1887 "Copy in failed");
1888 goto out;
1889 }
1890 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
1891 }
1892
1893 if (l_ioc.lioc_sense_len) {
1894 sense_ptr =
1895 (void *)&cm->cm_frame->bytes[l_ioc.lioc_sense_off];
1896 *sense_ptr = cm->cm_sense_busaddr;
1897 }
1898
1899 if ((error = mfi_mapcmd(sc, cm)) != 0) {
1900 device_printf(sc->mfi_dev,
1901 "Controller info buffer map failed");
1902 goto out;
1903 }
1904
1905 if ((error = mfi_polled_command(sc, cm)) != 0) {
1906 device_printf(sc->mfi_dev,
1907 "Controller polled failed");
1908 goto out;
1909 }
1910
1911 bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1912 BUS_DMASYNC_POSTREAD);
1913 bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1914
1915 temp = data;
1916 for (i = 0; i < l_ioc.lioc_sge_count; i++) {
1917 error = copyout(temp,
1918 l_ioc.lioc_sgl[i].iov_base,
1919 l_ioc.lioc_sgl[i].iov_len);
1920 if (error != 0) {
1921 device_printf(sc->mfi_dev,
1922 "Copy out failed");
1923 goto out;
1924 }
1925 temp = &temp[l_ioc.lioc_sgl[i].iov_len];
1926 }
1927
1928 if (l_ioc.lioc_sense_len) {
1929 /* copy out sense */
1930 sense_ptr = (void *)
1931 &l_ioc.lioc_frame.raw[l_ioc.lioc_sense_off];
1932 temp = 0;
1933 temp += cm->cm_sense_busaddr;
1934 error = copyout(temp, sense_ptr,
1935 l_ioc.lioc_sense_len);
1936 if (error != 0) {
1937 device_printf(sc->mfi_dev,
1938 "Copy out failed");
1939 goto out;
1940 }
1941 }
1942
1943 error = copyout(&cm->cm_frame->header.cmd_status,
1944 &((struct mfi_linux_ioc_packet*)arg)
1945 ->lioc_frame.hdr.cmd_status,
1946 1);
1947 if (error != 0) {
1948 device_printf(sc->mfi_dev,
1949 "Copy out failed");
1950 goto out;
1951 }
1952
1953out:
1954 if (data)
1955 free(data, M_MFIBUF);
1956 if (cm) {
1957 mtx_lock(&sc->mfi_io_lock);
1958 mfi_release_command(cm);
1959 mtx_unlock(&sc->mfi_io_lock);
1960 }
1961
1962 return (error);
1963 case 0x400c4d03: /* AEN Linux ioctl shim */
1964 error = copyin(arg, &l_aen, sizeof(l_aen));
1965 if (error != 0)
1966 return (error);
1967 printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
1968 mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
1969 M_WAITOK);
1970 if (mfi_aen_entry != NULL) {
1971 mfi_aen_entry->p = curproc;
1972 TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
1973 aen_link);
1974 }
1975 error = mfi_aen_register(sc, l_aen.laen_seq_num,
1976 l_aen.laen_class_locale);
1977
1978 if (error != 0) {
1979 TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1980 aen_link);
1981 free(mfi_aen_entry, M_MFIBUF);
1982 }
1983
1984 return (error);
1985 default:
1986 device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
1987 error = ENOENT;
1988 break;
1989 }
1990
1991 return (error);
1992}
1993
1994static int
1995mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
1996{
1997 struct mfi_softc *sc;
1998 int revents = 0;
1999
2000 sc = dev->si_drv1;
2001
2002 if (poll_events & (POLLIN | POLLRDNORM)) {
2003 if (sc->mfi_aen_triggered != 0)
2004 revents |= poll_events & (POLLIN | POLLRDNORM);
2005 if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
2006 revents |= POLLERR;
2007 }
2008 }
2009
2010 if (revents == 0) {
2011 if (poll_events & (POLLIN | POLLRDNORM)) {
2012 sc->mfi_poll_waiting = 1;
2013 selrecord(td, &sc->mfi_select);
2014 sc->mfi_poll_waiting = 0;
2015 }
2016 }
2017
2018 return revents;
2019}