Deleted Added
full compact
mrsas.c (272735) mrsas.c (272737)
1/*
2 * Copyright (c) 2014, LSI Corp.
3 * All rights reserved.
4 * Author: Marian Choy
5 * Support: freebsdraid@lsi.com
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of the <ORGANIZATION> nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 *
34 * The views and conclusions contained in the software and documentation
35 * are those of the authors and should not be interpreted as representing
36 * official policies,either expressed or implied, of the FreeBSD Project.
37 *
38 * Send feedback to: <megaraidfbsd@lsi.com>
39 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
40 * ATTN: MegaRaid FreeBSD
41 *
42 */
43
44#include <sys/cdefs.h>
1/*
2 * Copyright (c) 2014, LSI Corp.
3 * All rights reserved.
4 * Author: Marian Choy
5 * Support: freebsdraid@lsi.com
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of the <ORGANIZATION> nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 *
34 * The views and conclusions contained in the software and documentation
35 * are those of the authors and should not be interpreted as representing
36 * official policies,either expressed or implied, of the FreeBSD Project.
37 *
38 * Send feedback to: <megaraidfbsd@lsi.com>
39 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
40 * ATTN: MegaRaid FreeBSD
41 *
42 */
43
44#include <sys/cdefs.h>
45__FBSDID("$FreeBSD: head/sys/dev/mrsas/mrsas.c 272735 2014-10-08 08:48:18Z kadesai $");
45__FBSDID("$FreeBSD: head/sys/dev/mrsas/mrsas.c 272737 2014-10-08 09:19:35Z kadesai $");
46
47#include <dev/mrsas/mrsas.h>
48#include <dev/mrsas/mrsas_ioctl.h>
49
50#include <cam/cam.h>
51#include <cam/cam_ccb.h>
52
53#include <sys/sysctl.h>
54#include <sys/types.h>
55#include <sys/kthread.h>
56#include <sys/taskqueue.h>
57
58
59/*
60 * Function prototypes
61 */
62static d_open_t mrsas_open;
63static d_close_t mrsas_close;
64static d_read_t mrsas_read;
65static d_write_t mrsas_write;
66static d_ioctl_t mrsas_ioctl;
67
46
47#include <dev/mrsas/mrsas.h>
48#include <dev/mrsas/mrsas_ioctl.h>
49
50#include <cam/cam.h>
51#include <cam/cam_ccb.h>
52
53#include <sys/sysctl.h>
54#include <sys/types.h>
55#include <sys/kthread.h>
56#include <sys/taskqueue.h>
57
58
59/*
60 * Function prototypes
61 */
62static d_open_t mrsas_open;
63static d_close_t mrsas_close;
64static d_read_t mrsas_read;
65static d_write_t mrsas_write;
66static d_ioctl_t mrsas_ioctl;
67
68static struct mrsas_mgmt_info mrsas_mgmt_info;
68static struct mrsas_ident *mrsas_find_ident(device_t);
69static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
70static void mrsas_flush_cache(struct mrsas_softc *sc);
71static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
72static void mrsas_ocr_thread(void *arg);
73static int mrsas_get_map_info(struct mrsas_softc *sc);
74static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
75static int mrsas_sync_map_info(struct mrsas_softc *sc);
76static int mrsas_get_pd_list(struct mrsas_softc *sc);
77static int mrsas_get_ld_list(struct mrsas_softc *sc);
78static int mrsas_setup_irq(struct mrsas_softc *sc);
79static int mrsas_alloc_mem(struct mrsas_softc *sc);
80static int mrsas_init_fw(struct mrsas_softc *sc);
81static int mrsas_setup_raidmap(struct mrsas_softc *sc);
82static int mrsas_complete_cmd(struct mrsas_softc *sc);
83static int mrsas_clear_intr(struct mrsas_softc *sc);
84static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
85 struct mrsas_ctrl_info *ctrl_info);
86static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
87 struct mrsas_mfi_cmd *cmd_to_abort);
88u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
89u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
90 struct mrsas_mfi_cmd *mfi_cmd);
91int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
92int mrsas_init_adapter(struct mrsas_softc *sc);
93int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
94int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
95int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
96int mrsas_ioc_init(struct mrsas_softc *sc);
97int mrsas_bus_scan(struct mrsas_softc *sc);
98int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
99int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
100int mrsas_reset_ctrl(struct mrsas_softc *sc);
101int mrsas_wait_for_outstanding(struct mrsas_softc *sc);
102int mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
103 struct mrsas_mfi_cmd *cmd);
104int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
105 int size);
106void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
107void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
108void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110void mrsas_disable_intr(struct mrsas_softc *sc);
111void mrsas_enable_intr(struct mrsas_softc *sc);
112void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
113void mrsas_free_mem(struct mrsas_softc *sc);
114void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
115void mrsas_isr(void *arg);
116void mrsas_teardown_intr(struct mrsas_softc *sc);
117void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
118void mrsas_kill_hba (struct mrsas_softc *sc);
119void mrsas_aen_handler(struct mrsas_softc *sc);
120void mrsas_write_reg(struct mrsas_softc *sc, int offset,
121 u_int32_t value);
122void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
123 u_int32_t req_desc_hi);
124void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
125void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
126 struct mrsas_mfi_cmd *cmd, u_int8_t status);
127void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
128 u_int8_t extStatus);
129struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc);
130MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_build_mpt_cmd(struct mrsas_softc *sc,
131 struct mrsas_mfi_cmd *cmd);
132
133extern int mrsas_cam_attach(struct mrsas_softc *sc);
134extern void mrsas_cam_detach(struct mrsas_softc *sc);
135extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
136extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
137extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
138extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
139extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
69static struct mrsas_ident *mrsas_find_ident(device_t);
70static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
71static void mrsas_flush_cache(struct mrsas_softc *sc);
72static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
73static void mrsas_ocr_thread(void *arg);
74static int mrsas_get_map_info(struct mrsas_softc *sc);
75static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
76static int mrsas_sync_map_info(struct mrsas_softc *sc);
77static int mrsas_get_pd_list(struct mrsas_softc *sc);
78static int mrsas_get_ld_list(struct mrsas_softc *sc);
79static int mrsas_setup_irq(struct mrsas_softc *sc);
80static int mrsas_alloc_mem(struct mrsas_softc *sc);
81static int mrsas_init_fw(struct mrsas_softc *sc);
82static int mrsas_setup_raidmap(struct mrsas_softc *sc);
83static int mrsas_complete_cmd(struct mrsas_softc *sc);
84static int mrsas_clear_intr(struct mrsas_softc *sc);
85static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
86 struct mrsas_ctrl_info *ctrl_info);
87static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
88 struct mrsas_mfi_cmd *cmd_to_abort);
89u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
90u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
91 struct mrsas_mfi_cmd *mfi_cmd);
92int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
93int mrsas_init_adapter(struct mrsas_softc *sc);
94int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
95int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
96int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
97int mrsas_ioc_init(struct mrsas_softc *sc);
98int mrsas_bus_scan(struct mrsas_softc *sc);
99int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
100int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
101int mrsas_reset_ctrl(struct mrsas_softc *sc);
102int mrsas_wait_for_outstanding(struct mrsas_softc *sc);
103int mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
104 struct mrsas_mfi_cmd *cmd);
105int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
106 int size);
107void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
108void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
111void mrsas_disable_intr(struct mrsas_softc *sc);
112void mrsas_enable_intr(struct mrsas_softc *sc);
113void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
114void mrsas_free_mem(struct mrsas_softc *sc);
115void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
116void mrsas_isr(void *arg);
117void mrsas_teardown_intr(struct mrsas_softc *sc);
118void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
119void mrsas_kill_hba (struct mrsas_softc *sc);
120void mrsas_aen_handler(struct mrsas_softc *sc);
121void mrsas_write_reg(struct mrsas_softc *sc, int offset,
122 u_int32_t value);
123void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
124 u_int32_t req_desc_hi);
125void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
126void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
127 struct mrsas_mfi_cmd *cmd, u_int8_t status);
128void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
129 u_int8_t extStatus);
130struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc);
131MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_build_mpt_cmd(struct mrsas_softc *sc,
132 struct mrsas_mfi_cmd *cmd);
133
134extern int mrsas_cam_attach(struct mrsas_softc *sc);
135extern void mrsas_cam_detach(struct mrsas_softc *sc);
136extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
137extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
138extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
139extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
140extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
140extern int mrsas_passthru(struct mrsas_softc *sc, void *arg);
141extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
141extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
142extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL *map);
143extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL *map);
144extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
145extern void mrsas_xpt_release(struct mrsas_softc *sc);
146extern MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc,
147 u_int16_t index);
148extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
149static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
150static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
151SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
152
153
154/**
155 * PCI device struct and table
156 *
157 */
158typedef struct mrsas_ident {
159 uint16_t vendor;
160 uint16_t device;
161 uint16_t subvendor;
162 uint16_t subdevice;
163 const char *desc;
164} MRSAS_CTLR_ID;
165
166MRSAS_CTLR_ID device_table[] = {
167 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "LSI Thunderbolt SAS Controller"},
168 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "LSI Invader SAS Controller"},
169 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "LSI Fury SAS Controller"},
170 {0, 0, 0, 0, NULL}
171};
172
173/**
174 * Character device entry points
175 *
176 */
177static struct cdevsw mrsas_cdevsw = {
178 .d_version = D_VERSION,
179 .d_open = mrsas_open,
180 .d_close = mrsas_close,
181 .d_read = mrsas_read,
182 .d_write = mrsas_write,
183 .d_ioctl = mrsas_ioctl,
184 .d_name = "mrsas",
185};
186
187MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
188
189/**
190 * In the cdevsw routines, we find our softc by using the si_drv1 member
191 * of struct cdev. We set this variable to point to our softc in our
192 * attach routine when we create the /dev entry.
193 */
194int
195mrsas_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td)
196{
197 struct mrsas_softc *sc;
198
199 sc = dev->si_drv1;
200 return (0);
201}
202
203int
204mrsas_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td)
205{
206 struct mrsas_softc *sc;
207
208 sc = dev->si_drv1;
209 return (0);
210}
211
212int
213mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
214{
215 struct mrsas_softc *sc;
216
217 sc = dev->si_drv1;
218 return (0);
219}
220int
221mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
222{
223 struct mrsas_softc *sc;
224
225 sc = dev->si_drv1;
226 return (0);
227}
228
229/**
230 * Register Read/Write Functions
231 *
232 */
233void
234mrsas_write_reg(struct mrsas_softc *sc, int offset,
235 u_int32_t value)
236{
237 bus_space_tag_t bus_tag = sc->bus_tag;
238 bus_space_handle_t bus_handle = sc->bus_handle;
239
240 bus_space_write_4(bus_tag, bus_handle, offset, value);
241}
242
243u_int32_t
244mrsas_read_reg(struct mrsas_softc *sc, int offset)
245{
246 bus_space_tag_t bus_tag = sc->bus_tag;
247 bus_space_handle_t bus_handle = sc->bus_handle;
248
249 return((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
250}
251
252
253/**
254 * Interrupt Disable/Enable/Clear Functions
255 *
256 */
257void mrsas_disable_intr(struct mrsas_softc *sc)
258{
259 u_int32_t mask = 0xFFFFFFFF;
260 u_int32_t status;
261
262 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
263 /* Dummy read to force pci flush */
264 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
265}
266
267void mrsas_enable_intr(struct mrsas_softc *sc)
268{
269 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
270 u_int32_t status;
271
272 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
273 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
274
275 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
276 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
277}
278
279static int mrsas_clear_intr(struct mrsas_softc *sc)
280{
281 u_int32_t status, fw_status, fw_state;
282
283 /* Read received interrupt */
284 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
285
286 /* If FW state change interrupt is received, write to it again to clear */
287 if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
288 fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
289 outbound_scratch_pad));
290 fw_state = fw_status & MFI_STATE_MASK;
291 if (fw_state == MFI_STATE_FAULT) {
292 device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
293 if(sc->ocr_thread_active)
294 wakeup(&sc->ocr_chan);
295 }
296 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
297 mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
298 return(1);
299 }
300
301 /* Not our interrupt, so just return */
302 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
303 return(0);
304
305 /* We got a reply interrupt */
306 return(1);
307}
308
309/**
310 * PCI Support Functions
311 *
312 */
313static struct mrsas_ident * mrsas_find_ident(device_t dev)
314{
315 struct mrsas_ident *pci_device;
316
317 for (pci_device=device_table; pci_device->vendor != 0; pci_device++)
318 {
319 if ((pci_device->vendor == pci_get_vendor(dev)) &&
320 (pci_device->device == pci_get_device(dev)) &&
321 ((pci_device->subvendor == pci_get_subvendor(dev)) ||
322 (pci_device->subvendor == 0xffff)) &&
323 ((pci_device->subdevice == pci_get_subdevice(dev)) ||
324 (pci_device->subdevice == 0xffff)))
325 return (pci_device);
326 }
327 return (NULL);
328}
329
330static int mrsas_probe(device_t dev)
331{
332 static u_int8_t first_ctrl = 1;
333 struct mrsas_ident *id;
334
335 if ((id = mrsas_find_ident(dev)) != NULL) {
336 if (first_ctrl) {
337 printf("LSI MegaRAID SAS FreeBSD mrsas driver version: %s\n", MRSAS_VERSION);
338 first_ctrl = 0;
339 }
340 device_set_desc(dev, id->desc);
341 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
342 return (-30);
343 }
344 return (ENXIO);
345}
346
347/**
348 * mrsas_setup_sysctl: setup sysctl values for mrsas
349 * input: Adapter instance soft state
350 *
351 * Setup sysctl entries for mrsas driver.
352 */
353static void
354mrsas_setup_sysctl(struct mrsas_softc *sc)
355{
356 struct sysctl_ctx_list *sysctl_ctx = NULL;
357 struct sysctl_oid *sysctl_tree = NULL;
358 char tmpstr[80], tmpstr2[80];
359
360 /*
361 * Setup the sysctl variable so the user can change the debug level
362 * on the fly.
363 */
364 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
365 device_get_unit(sc->mrsas_dev));
366 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
367
368 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
369 if (sysctl_ctx != NULL)
370 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
371
372 if (sysctl_tree == NULL) {
373 sysctl_ctx_init(&sc->sysctl_ctx);
374 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
375 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
376 CTLFLAG_RD, 0, tmpstr);
377 if (sc->sysctl_tree == NULL)
378 return;
379 sysctl_ctx = &sc->sysctl_ctx;
380 sysctl_tree = sc->sysctl_tree;
381 }
382 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
383 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
384 "Disable the use of OCR");
385
386 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
387 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
388 strlen(MRSAS_VERSION), "driver version");
389
390 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
391 OID_AUTO, "reset_count", CTLFLAG_RD,
392 &sc->reset_count, 0, "number of ocr from start of the day");
393
394 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
395 OID_AUTO, "fw_outstanding", CTLFLAG_RD,
396 &sc->fw_outstanding, 0, "FW outstanding commands");
397
398 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
399 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
400 &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
401
402 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
403 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
404 "Driver debug level");
405
406 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
407 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
408 0, "Driver IO timeout value in mili-second.");
409
410 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
411 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
412 &sc->mrsas_fw_fault_check_delay,
413 0, "FW fault check thread delay in seconds. <default is 1 sec>");
414
415 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
416 OID_AUTO, "reset_in_progress", CTLFLAG_RD,
417 &sc->reset_in_progress, 0, "ocr in progress status");
418
419}
420
421/**
422 * mrsas_get_tunables: get tunable parameters.
423 * input: Adapter instance soft state
424 *
425 * Get tunable parameters. This will help to debug driver at boot time.
426 */
427static void
428mrsas_get_tunables(struct mrsas_softc *sc)
429{
430 char tmpstr[80];
431
432 /* XXX default to some debugging for now */
433 sc->mrsas_debug = MRSAS_FAULT;
434 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
435 sc->mrsas_fw_fault_check_delay = 1;
436 sc->reset_count = 0;
437 sc->reset_in_progress = 0;
438
439 /*
440 * Grab the global variables.
441 */
442 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
443
444 /* Grab the unit-instance variables */
445 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
446 device_get_unit(sc->mrsas_dev));
447 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
448}
449
450/**
451 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
452 * Used to get sequence number at driver load time.
453 * input: Adapter soft state
454 *
455 * Allocates DMAable memory for the event log info internal command.
456 */
457int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
458{
459 int el_info_size;
460
461 /* Allocate get event log info command */
462 el_info_size = sizeof(struct mrsas_evt_log_info);
463 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
464 1, 0, // algnmnt, boundary
465 BUS_SPACE_MAXADDR_32BIT,// lowaddr
466 BUS_SPACE_MAXADDR, // highaddr
467 NULL, NULL, // filter, filterarg
468 el_info_size, // maxsize
469 1, // msegments
470 el_info_size, // maxsegsize
471 BUS_DMA_ALLOCNOW, // flags
472 NULL, NULL, // lockfunc, lockarg
473 &sc->el_info_tag)) {
474 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
475 return (ENOMEM);
476 }
477 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
478 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
479 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
480 return (ENOMEM);
481 }
482 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
483 sc->el_info_mem, el_info_size, mrsas_addr_cb,
484 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
485 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
486 return (ENOMEM);
487 }
488
489 memset(sc->el_info_mem, 0, el_info_size);
490 return (0);
491}
492
493/**
494 * mrsas_free_evt_info_cmd: Free memory for Event log info command
495 * input: Adapter soft state
496 *
497 * Deallocates memory for the event log info internal command.
498 */
499void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
500{
501 if (sc->el_info_phys_addr)
502 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
503 if (sc->el_info_mem != NULL)
504 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
505 if (sc->el_info_tag != NULL)
506 bus_dma_tag_destroy(sc->el_info_tag);
507}
508
509/**
510 * mrsas_get_seq_num: Get latest event sequence number
511 * @sc: Adapter soft state
512 * @eli: Firmware event log sequence number information.
513 * Firmware maintains a log of all events in a non-volatile area.
514 * Driver get the sequence number using DCMD
515 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
516 */
517
518static int
519mrsas_get_seq_num(struct mrsas_softc *sc,
520 struct mrsas_evt_log_info *eli)
521{
522 struct mrsas_mfi_cmd *cmd;
523 struct mrsas_dcmd_frame *dcmd;
524
525 cmd = mrsas_get_mfi_cmd(sc);
526
527 if (!cmd) {
528 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
529 return -ENOMEM;
530 }
531
532 dcmd = &cmd->frame->dcmd;
533
534 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
535 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
536 mrsas_release_mfi_cmd(cmd);
537 return -ENOMEM;
538 }
539
540 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
541
542 dcmd->cmd = MFI_CMD_DCMD;
543 dcmd->cmd_status = 0x0;
544 dcmd->sge_count = 1;
545 dcmd->flags = MFI_FRAME_DIR_READ;
546 dcmd->timeout = 0;
547 dcmd->pad_0 = 0;
548 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
549 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
550 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
551 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
552
553 mrsas_issue_blocked_cmd(sc, cmd);
554
555 /*
556 * Copy the data back into callers buffer
557 */
558 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
559 mrsas_free_evt_log_info_cmd(sc);
560 mrsas_release_mfi_cmd(cmd);
561
562 return 0;
563}
564
565
566/**
567 * mrsas_register_aen: Register for asynchronous event notification
568 * @sc: Adapter soft state
569 * @seq_num: Starting sequence number
570 * @class_locale: Class of the event
571 * This function subscribes for events beyond the @seq_num
572 * and type @class_locale.
573 *
574 * */
575static int
576mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
577 u_int32_t class_locale_word)
578{
579 int ret_val;
580 struct mrsas_mfi_cmd *cmd;
581 struct mrsas_dcmd_frame *dcmd;
582 union mrsas_evt_class_locale curr_aen;
583 union mrsas_evt_class_locale prev_aen;
584
585/*
586 * If there an AEN pending already (aen_cmd), check if the
587 * class_locale of that pending AEN is inclusive of the new
588 * AEN request we currently have. If it is, then we don't have
589 * to do anything. In other words, whichever events the current
590 * AEN request is subscribing to, have already been subscribed
591 * to.
592 * If the old_cmd is _not_ inclusive, then we have to abort
593 * that command, form a class_locale that is superset of both
594 * old and current and re-issue to the FW
595 * */
596
597 curr_aen.word = class_locale_word;
598
599 if (sc->aen_cmd) {
600
601 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
602
603/*
604 * A class whose enum value is smaller is inclusive of all
605 * higher values. If a PROGRESS (= -1) was previously
606 * registered, then a new registration requests for higher
607 * classes need not be sent to FW. They are automatically
608 * included.
609 * Locale numbers don't have such hierarchy. They are bitmap values
610 */
611 if ((prev_aen.members.class <= curr_aen.members.class) &&
612 !((prev_aen.members.locale & curr_aen.members.locale) ^
613 curr_aen.members.locale)) {
614 /*
615 * Previously issued event registration includes
616 * current request. Nothing to do.
617 */
618 return 0;
619 } else {
620 curr_aen.members.locale |= prev_aen.members.locale;
621
622 if (prev_aen.members.class < curr_aen.members.class)
623 curr_aen.members.class = prev_aen.members.class;
624
625 sc->aen_cmd->abort_aen = 1;
626 ret_val = mrsas_issue_blocked_abort_cmd(sc,
627 sc->aen_cmd);
628
629 if (ret_val) {
630 printf("mrsas: Failed to abort "
631 "previous AEN command\n");
632 return ret_val;
633 }
634 }
635 }
636
637 cmd = mrsas_get_mfi_cmd(sc);
638
639 if (!cmd)
640 return -ENOMEM;
641
642 dcmd = &cmd->frame->dcmd;
643
644 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
645
646/*
647 * Prepare DCMD for aen registration
648 */
649 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
650
651 dcmd->cmd = MFI_CMD_DCMD;
652 dcmd->cmd_status = 0x0;
653 dcmd->sge_count = 1;
654 dcmd->flags = MFI_FRAME_DIR_READ;
655 dcmd->timeout = 0;
656 dcmd->pad_0 = 0;
657 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
658 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
659 dcmd->mbox.w[0] = seq_num;
142extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
143extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL *map);
144extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL *map);
145extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
146extern void mrsas_xpt_release(struct mrsas_softc *sc);
147extern MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc,
148 u_int16_t index);
149extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
150static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
151static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
152SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
153
154
155/**
156 * PCI device struct and table
157 *
158 */
159typedef struct mrsas_ident {
160 uint16_t vendor;
161 uint16_t device;
162 uint16_t subvendor;
163 uint16_t subdevice;
164 const char *desc;
165} MRSAS_CTLR_ID;
166
167MRSAS_CTLR_ID device_table[] = {
168 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "LSI Thunderbolt SAS Controller"},
169 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "LSI Invader SAS Controller"},
170 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "LSI Fury SAS Controller"},
171 {0, 0, 0, 0, NULL}
172};
173
174/**
175 * Character device entry points
176 *
177 */
178static struct cdevsw mrsas_cdevsw = {
179 .d_version = D_VERSION,
180 .d_open = mrsas_open,
181 .d_close = mrsas_close,
182 .d_read = mrsas_read,
183 .d_write = mrsas_write,
184 .d_ioctl = mrsas_ioctl,
185 .d_name = "mrsas",
186};
187
188MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
189
190/**
191 * In the cdevsw routines, we find our softc by using the si_drv1 member
192 * of struct cdev. We set this variable to point to our softc in our
193 * attach routine when we create the /dev entry.
194 */
195int
196mrsas_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td)
197{
198 struct mrsas_softc *sc;
199
200 sc = dev->si_drv1;
201 return (0);
202}
203
204int
205mrsas_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td)
206{
207 struct mrsas_softc *sc;
208
209 sc = dev->si_drv1;
210 return (0);
211}
212
213int
214mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
215{
216 struct mrsas_softc *sc;
217
218 sc = dev->si_drv1;
219 return (0);
220}
221int
222mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
223{
224 struct mrsas_softc *sc;
225
226 sc = dev->si_drv1;
227 return (0);
228}
229
230/**
231 * Register Read/Write Functions
232 *
233 */
234void
235mrsas_write_reg(struct mrsas_softc *sc, int offset,
236 u_int32_t value)
237{
238 bus_space_tag_t bus_tag = sc->bus_tag;
239 bus_space_handle_t bus_handle = sc->bus_handle;
240
241 bus_space_write_4(bus_tag, bus_handle, offset, value);
242}
243
244u_int32_t
245mrsas_read_reg(struct mrsas_softc *sc, int offset)
246{
247 bus_space_tag_t bus_tag = sc->bus_tag;
248 bus_space_handle_t bus_handle = sc->bus_handle;
249
250 return((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
251}
252
253
254/**
255 * Interrupt Disable/Enable/Clear Functions
256 *
257 */
258void mrsas_disable_intr(struct mrsas_softc *sc)
259{
260 u_int32_t mask = 0xFFFFFFFF;
261 u_int32_t status;
262
263 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
264 /* Dummy read to force pci flush */
265 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
266}
267
268void mrsas_enable_intr(struct mrsas_softc *sc)
269{
270 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
271 u_int32_t status;
272
273 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
274 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
275
276 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
277 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
278}
279
280static int mrsas_clear_intr(struct mrsas_softc *sc)
281{
282 u_int32_t status, fw_status, fw_state;
283
284 /* Read received interrupt */
285 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
286
287 /* If FW state change interrupt is received, write to it again to clear */
288 if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
289 fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
290 outbound_scratch_pad));
291 fw_state = fw_status & MFI_STATE_MASK;
292 if (fw_state == MFI_STATE_FAULT) {
293 device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
294 if(sc->ocr_thread_active)
295 wakeup(&sc->ocr_chan);
296 }
297 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
298 mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
299 return(1);
300 }
301
302 /* Not our interrupt, so just return */
303 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
304 return(0);
305
306 /* We got a reply interrupt */
307 return(1);
308}
309
310/**
311 * PCI Support Functions
312 *
313 */
314static struct mrsas_ident * mrsas_find_ident(device_t dev)
315{
316 struct mrsas_ident *pci_device;
317
318 for (pci_device=device_table; pci_device->vendor != 0; pci_device++)
319 {
320 if ((pci_device->vendor == pci_get_vendor(dev)) &&
321 (pci_device->device == pci_get_device(dev)) &&
322 ((pci_device->subvendor == pci_get_subvendor(dev)) ||
323 (pci_device->subvendor == 0xffff)) &&
324 ((pci_device->subdevice == pci_get_subdevice(dev)) ||
325 (pci_device->subdevice == 0xffff)))
326 return (pci_device);
327 }
328 return (NULL);
329}
330
331static int mrsas_probe(device_t dev)
332{
333 static u_int8_t first_ctrl = 1;
334 struct mrsas_ident *id;
335
336 if ((id = mrsas_find_ident(dev)) != NULL) {
337 if (first_ctrl) {
338 printf("LSI MegaRAID SAS FreeBSD mrsas driver version: %s\n", MRSAS_VERSION);
339 first_ctrl = 0;
340 }
341 device_set_desc(dev, id->desc);
342 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
343 return (-30);
344 }
345 return (ENXIO);
346}
347
348/**
349 * mrsas_setup_sysctl: setup sysctl values for mrsas
350 * input: Adapter instance soft state
351 *
352 * Setup sysctl entries for mrsas driver.
353 */
354static void
355mrsas_setup_sysctl(struct mrsas_softc *sc)
356{
357 struct sysctl_ctx_list *sysctl_ctx = NULL;
358 struct sysctl_oid *sysctl_tree = NULL;
359 char tmpstr[80], tmpstr2[80];
360
361 /*
362 * Setup the sysctl variable so the user can change the debug level
363 * on the fly.
364 */
365 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
366 device_get_unit(sc->mrsas_dev));
367 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
368
369 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
370 if (sysctl_ctx != NULL)
371 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
372
373 if (sysctl_tree == NULL) {
374 sysctl_ctx_init(&sc->sysctl_ctx);
375 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
376 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
377 CTLFLAG_RD, 0, tmpstr);
378 if (sc->sysctl_tree == NULL)
379 return;
380 sysctl_ctx = &sc->sysctl_ctx;
381 sysctl_tree = sc->sysctl_tree;
382 }
383 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
384 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
385 "Disable the use of OCR");
386
387 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
388 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
389 strlen(MRSAS_VERSION), "driver version");
390
391 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
392 OID_AUTO, "reset_count", CTLFLAG_RD,
393 &sc->reset_count, 0, "number of ocr from start of the day");
394
395 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
396 OID_AUTO, "fw_outstanding", CTLFLAG_RD,
397 &sc->fw_outstanding, 0, "FW outstanding commands");
398
399 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
400 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
401 &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
402
403 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
404 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
405 "Driver debug level");
406
407 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
408 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
409 0, "Driver IO timeout value in mili-second.");
410
411 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
412 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
413 &sc->mrsas_fw_fault_check_delay,
414 0, "FW fault check thread delay in seconds. <default is 1 sec>");
415
416 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
417 OID_AUTO, "reset_in_progress", CTLFLAG_RD,
418 &sc->reset_in_progress, 0, "ocr in progress status");
419
420}
421
422/**
423 * mrsas_get_tunables: get tunable parameters.
424 * input: Adapter instance soft state
425 *
426 * Get tunable parameters. This will help to debug driver at boot time.
427 */
428static void
429mrsas_get_tunables(struct mrsas_softc *sc)
430{
431 char tmpstr[80];
432
433 /* XXX default to some debugging for now */
434 sc->mrsas_debug = MRSAS_FAULT;
435 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
436 sc->mrsas_fw_fault_check_delay = 1;
437 sc->reset_count = 0;
438 sc->reset_in_progress = 0;
439
440 /*
441 * Grab the global variables.
442 */
443 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
444
445 /* Grab the unit-instance variables */
446 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
447 device_get_unit(sc->mrsas_dev));
448 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
449}
450
451/**
452 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
453 * Used to get sequence number at driver load time.
454 * input: Adapter soft state
455 *
456 * Allocates DMAable memory for the event log info internal command.
457 */
458int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
459{
460 int el_info_size;
461
462 /* Allocate get event log info command */
463 el_info_size = sizeof(struct mrsas_evt_log_info);
464 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
465 1, 0, // algnmnt, boundary
466 BUS_SPACE_MAXADDR_32BIT,// lowaddr
467 BUS_SPACE_MAXADDR, // highaddr
468 NULL, NULL, // filter, filterarg
469 el_info_size, // maxsize
470 1, // msegments
471 el_info_size, // maxsegsize
472 BUS_DMA_ALLOCNOW, // flags
473 NULL, NULL, // lockfunc, lockarg
474 &sc->el_info_tag)) {
475 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
476 return (ENOMEM);
477 }
478 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
479 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
480 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
481 return (ENOMEM);
482 }
483 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
484 sc->el_info_mem, el_info_size, mrsas_addr_cb,
485 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
486 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
487 return (ENOMEM);
488 }
489
490 memset(sc->el_info_mem, 0, el_info_size);
491 return (0);
492}
493
494/**
495 * mrsas_free_evt_info_cmd: Free memory for Event log info command
496 * input: Adapter soft state
497 *
498 * Deallocates memory for the event log info internal command.
499 */
500void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
501{
502 if (sc->el_info_phys_addr)
503 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
504 if (sc->el_info_mem != NULL)
505 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
506 if (sc->el_info_tag != NULL)
507 bus_dma_tag_destroy(sc->el_info_tag);
508}
509
510/**
511 * mrsas_get_seq_num: Get latest event sequence number
512 * @sc: Adapter soft state
513 * @eli: Firmware event log sequence number information.
514 * Firmware maintains a log of all events in a non-volatile area.
515 * Driver get the sequence number using DCMD
516 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
517 */
518
519static int
520mrsas_get_seq_num(struct mrsas_softc *sc,
521 struct mrsas_evt_log_info *eli)
522{
523 struct mrsas_mfi_cmd *cmd;
524 struct mrsas_dcmd_frame *dcmd;
525
526 cmd = mrsas_get_mfi_cmd(sc);
527
528 if (!cmd) {
529 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
530 return -ENOMEM;
531 }
532
533 dcmd = &cmd->frame->dcmd;
534
535 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
536 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
537 mrsas_release_mfi_cmd(cmd);
538 return -ENOMEM;
539 }
540
541 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
542
543 dcmd->cmd = MFI_CMD_DCMD;
544 dcmd->cmd_status = 0x0;
545 dcmd->sge_count = 1;
546 dcmd->flags = MFI_FRAME_DIR_READ;
547 dcmd->timeout = 0;
548 dcmd->pad_0 = 0;
549 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
550 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
551 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
552 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
553
554 mrsas_issue_blocked_cmd(sc, cmd);
555
556 /*
557 * Copy the data back into callers buffer
558 */
559 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
560 mrsas_free_evt_log_info_cmd(sc);
561 mrsas_release_mfi_cmd(cmd);
562
563 return 0;
564}
565
566
567/**
568 * mrsas_register_aen: Register for asynchronous event notification
569 * @sc: Adapter soft state
570 * @seq_num: Starting sequence number
571 * @class_locale: Class of the event
572 * This function subscribes for events beyond the @seq_num
573 * and type @class_locale.
574 *
575 * */
576static int
577mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
578 u_int32_t class_locale_word)
579{
580 int ret_val;
581 struct mrsas_mfi_cmd *cmd;
582 struct mrsas_dcmd_frame *dcmd;
583 union mrsas_evt_class_locale curr_aen;
584 union mrsas_evt_class_locale prev_aen;
585
586/*
587 * If there an AEN pending already (aen_cmd), check if the
588 * class_locale of that pending AEN is inclusive of the new
589 * AEN request we currently have. If it is, then we don't have
590 * to do anything. In other words, whichever events the current
591 * AEN request is subscribing to, have already been subscribed
592 * to.
593 * If the old_cmd is _not_ inclusive, then we have to abort
594 * that command, form a class_locale that is superset of both
595 * old and current and re-issue to the FW
596 * */
597
598 curr_aen.word = class_locale_word;
599
600 if (sc->aen_cmd) {
601
602 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
603
604/*
605 * A class whose enum value is smaller is inclusive of all
606 * higher values. If a PROGRESS (= -1) was previously
607 * registered, then a new registration requests for higher
608 * classes need not be sent to FW. They are automatically
609 * included.
610 * Locale numbers don't have such hierarchy. They are bitmap values
611 */
612 if ((prev_aen.members.class <= curr_aen.members.class) &&
613 !((prev_aen.members.locale & curr_aen.members.locale) ^
614 curr_aen.members.locale)) {
615 /*
616 * Previously issued event registration includes
617 * current request. Nothing to do.
618 */
619 return 0;
620 } else {
621 curr_aen.members.locale |= prev_aen.members.locale;
622
623 if (prev_aen.members.class < curr_aen.members.class)
624 curr_aen.members.class = prev_aen.members.class;
625
626 sc->aen_cmd->abort_aen = 1;
627 ret_val = mrsas_issue_blocked_abort_cmd(sc,
628 sc->aen_cmd);
629
630 if (ret_val) {
631 printf("mrsas: Failed to abort "
632 "previous AEN command\n");
633 return ret_val;
634 }
635 }
636 }
637
638 cmd = mrsas_get_mfi_cmd(sc);
639
640 if (!cmd)
641 return -ENOMEM;
642
643 dcmd = &cmd->frame->dcmd;
644
645 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
646
647/*
648 * Prepare DCMD for aen registration
649 */
650 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
651
652 dcmd->cmd = MFI_CMD_DCMD;
653 dcmd->cmd_status = 0x0;
654 dcmd->sge_count = 1;
655 dcmd->flags = MFI_FRAME_DIR_READ;
656 dcmd->timeout = 0;
657 dcmd->pad_0 = 0;
658 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
659 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
660 dcmd->mbox.w[0] = seq_num;
660 sc->last_seq_num = seq_num;
661 sc->last_seq_num = seq_num;
661 dcmd->mbox.w[1] = curr_aen.word;
662 dcmd->sgl.sge32[0].phys_addr = (u_int32_t) sc->evt_detail_phys_addr;
663 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
664
665 if (sc->aen_cmd != NULL) {
666 mrsas_release_mfi_cmd(cmd);
667 return 0;
668 }
669
670 /*
671 * Store reference to the cmd used to register for AEN. When an
672 * application wants us to register for AEN, we have to abort this
673 * cmd and re-register with a new EVENT LOCALE supplied by that app
674 */
675 sc->aen_cmd = cmd;
676
677 /*
678 Issue the aen registration frame
679 */
680 if (mrsas_issue_dcmd(sc, cmd)){
681 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
682 return(1);
683 }
684
685 return 0;
686}
687/**
688 * mrsas_start_aen - Subscribes to AEN during driver load time
689 * @instance: Adapter soft state
690 */
691static int mrsas_start_aen(struct mrsas_softc *sc)
692{
693 struct mrsas_evt_log_info eli;
694 union mrsas_evt_class_locale class_locale;
695
696
697 /* Get the latest sequence number from FW*/
698
699 memset(&eli, 0, sizeof(eli));
700
701 if (mrsas_get_seq_num(sc, &eli))
702 return -1;
703
704 /* Register AEN with FW for latest sequence number plus 1*/
705 class_locale.members.reserved = 0;
706 class_locale.members.locale = MR_EVT_LOCALE_ALL;
707 class_locale.members.class = MR_EVT_CLASS_DEBUG;
708
709 return mrsas_register_aen(sc, eli.newest_seq_num + 1,
710 class_locale.word);
711}
712
713/**
714 * mrsas_attach: PCI entry point
715 * input: device struct pointer
716 *
717 * Performs setup of PCI and registers, initializes mutexes and
718 * linked lists, registers interrupts and CAM, and initializes
719 * the adapter/controller to its proper state.
720 */
721static int mrsas_attach(device_t dev)
722{
723 struct mrsas_softc *sc = device_get_softc(dev);
724 uint32_t cmd, bar, error;
725
726 /* Look up our softc and initialize its fields. */
727 sc->mrsas_dev = dev;
728 sc->device_id = pci_get_device(dev);
729
730 mrsas_get_tunables(sc);
731
732 /*
733 * Set up PCI and registers
734 */
735 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
736 if ( (cmd & PCIM_CMD_PORTEN) == 0) {
737 return (ENXIO);
738 }
739 /* Force the busmaster enable bit on. */
740 cmd |= PCIM_CMD_BUSMASTEREN;
741 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
742
743 //bar = pci_read_config(dev, MRSAS_PCI_BAR0, 4);
744 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
745
746 sc->reg_res_id = MRSAS_PCI_BAR1; /* BAR1 offset */
747 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
748 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
749 == NULL) {
750 device_printf(dev, "Cannot allocate PCI registers\n");
751 goto attach_fail;
752 }
753 sc->bus_tag = rman_get_bustag(sc->reg_res);
754 sc->bus_handle = rman_get_bushandle(sc->reg_res);
755
756 /* Intialize mutexes */
757 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
758 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
759 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
760 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
761 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
762 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
763 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
764 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
765
766 /* Intialize linked list */
767 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
768 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
769
770 atomic_set(&sc->fw_outstanding,0);
771
772 sc->io_cmds_highwater = 0;
773
774 /* Create a /dev entry for this device. */
775 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT,
776 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
777 device_get_unit(dev));
662 dcmd->mbox.w[1] = curr_aen.word;
663 dcmd->sgl.sge32[0].phys_addr = (u_int32_t) sc->evt_detail_phys_addr;
664 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
665
666 if (sc->aen_cmd != NULL) {
667 mrsas_release_mfi_cmd(cmd);
668 return 0;
669 }
670
671 /*
672 * Store reference to the cmd used to register for AEN. When an
673 * application wants us to register for AEN, we have to abort this
674 * cmd and re-register with a new EVENT LOCALE supplied by that app
675 */
676 sc->aen_cmd = cmd;
677
678 /*
679 Issue the aen registration frame
680 */
681 if (mrsas_issue_dcmd(sc, cmd)){
682 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
683 return(1);
684 }
685
686 return 0;
687}
688/**
689 * mrsas_start_aen - Subscribes to AEN during driver load time
690 * @instance: Adapter soft state
691 */
692static int mrsas_start_aen(struct mrsas_softc *sc)
693{
694 struct mrsas_evt_log_info eli;
695 union mrsas_evt_class_locale class_locale;
696
697
698 /* Get the latest sequence number from FW*/
699
700 memset(&eli, 0, sizeof(eli));
701
702 if (mrsas_get_seq_num(sc, &eli))
703 return -1;
704
705 /* Register AEN with FW for latest sequence number plus 1*/
706 class_locale.members.reserved = 0;
707 class_locale.members.locale = MR_EVT_LOCALE_ALL;
708 class_locale.members.class = MR_EVT_CLASS_DEBUG;
709
710 return mrsas_register_aen(sc, eli.newest_seq_num + 1,
711 class_locale.word);
712}
713
714/**
715 * mrsas_attach: PCI entry point
716 * input: device struct pointer
717 *
718 * Performs setup of PCI and registers, initializes mutexes and
719 * linked lists, registers interrupts and CAM, and initializes
720 * the adapter/controller to its proper state.
721 */
722static int mrsas_attach(device_t dev)
723{
724 struct mrsas_softc *sc = device_get_softc(dev);
725 uint32_t cmd, bar, error;
726
727 /* Look up our softc and initialize its fields. */
728 sc->mrsas_dev = dev;
729 sc->device_id = pci_get_device(dev);
730
731 mrsas_get_tunables(sc);
732
733 /*
734 * Set up PCI and registers
735 */
736 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
737 if ( (cmd & PCIM_CMD_PORTEN) == 0) {
738 return (ENXIO);
739 }
740 /* Force the busmaster enable bit on. */
741 cmd |= PCIM_CMD_BUSMASTEREN;
742 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
743
744 //bar = pci_read_config(dev, MRSAS_PCI_BAR0, 4);
745 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
746
747 sc->reg_res_id = MRSAS_PCI_BAR1; /* BAR1 offset */
748 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
749 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
750 == NULL) {
751 device_printf(dev, "Cannot allocate PCI registers\n");
752 goto attach_fail;
753 }
754 sc->bus_tag = rman_get_bustag(sc->reg_res);
755 sc->bus_handle = rman_get_bushandle(sc->reg_res);
756
757 /* Intialize mutexes */
758 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
759 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
760 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
761 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
762 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
763 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
764 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
765 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
766
767 /* Intialize linked list */
768 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
769 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
770
771 atomic_set(&sc->fw_outstanding,0);
772
773 sc->io_cmds_highwater = 0;
774
775 /* Create a /dev entry for this device. */
776 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT,
777 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
778 device_get_unit(dev));
779 if (device_get_unit(dev) == 0)
780 make_dev_alias(sc->mrsas_cdev, "megaraid_sas_ioctl_node");
778 if (sc->mrsas_cdev)
779 sc->mrsas_cdev->si_drv1 = sc;
780
781 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
782 sc->UnevenSpanSupport = 0;
783
784 /* Initialize Firmware */
785 if (mrsas_init_fw(sc) != SUCCESS) {
786 goto attach_fail_fw;
787 }
788
789 /* Register SCSI mid-layer */
790 if ((mrsas_cam_attach(sc) != SUCCESS)) {
791 goto attach_fail_cam;
792 }
793
794 /* Register IRQs */
795 if (mrsas_setup_irq(sc) != SUCCESS) {
796 goto attach_fail_irq;
797 }
798
799 /* Enable Interrupts */
800 mrsas_enable_intr(sc);
801
802 error = mrsas_kproc_create(mrsas_ocr_thread, sc,
803 &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
804 device_get_unit(sc->mrsas_dev));
805 if (error) {
806 printf("Error %d starting rescan thread\n", error);
807 goto attach_fail_irq;
808 }
809
810 mrsas_setup_sysctl(sc);
811
812 /* Initiate AEN (Asynchronous Event Notification)*/
813
814 if (mrsas_start_aen(sc)) {
815 printf("Error: start aen failed\n");
816 goto fail_start_aen;
817 }
818
781 if (sc->mrsas_cdev)
782 sc->mrsas_cdev->si_drv1 = sc;
783
784 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
785 sc->UnevenSpanSupport = 0;
786
787 /* Initialize Firmware */
788 if (mrsas_init_fw(sc) != SUCCESS) {
789 goto attach_fail_fw;
790 }
791
792 /* Register SCSI mid-layer */
793 if ((mrsas_cam_attach(sc) != SUCCESS)) {
794 goto attach_fail_cam;
795 }
796
797 /* Register IRQs */
798 if (mrsas_setup_irq(sc) != SUCCESS) {
799 goto attach_fail_irq;
800 }
801
802 /* Enable Interrupts */
803 mrsas_enable_intr(sc);
804
805 error = mrsas_kproc_create(mrsas_ocr_thread, sc,
806 &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
807 device_get_unit(sc->mrsas_dev));
808 if (error) {
809 printf("Error %d starting rescan thread\n", error);
810 goto attach_fail_irq;
811 }
812
813 mrsas_setup_sysctl(sc);
814
815 /* Initiate AEN (Asynchronous Event Notification)*/
816
817 if (mrsas_start_aen(sc)) {
818 printf("Error: start aen failed\n");
819 goto fail_start_aen;
820 }
821
822 /*
823 * Add this controller to mrsas_mgmt_info structure so that it
824 * can be exported to management applications
825 */
826 if (device_get_unit(dev) == 0)
827 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
828
829 mrsas_mgmt_info.count++;
830 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
831 mrsas_mgmt_info.max_index++;
832
819 return (0);
820
821fail_start_aen:
822attach_fail_irq:
823 mrsas_teardown_intr(sc);
824attach_fail_cam:
825 mrsas_cam_detach(sc);
826attach_fail_fw:
827//attach_fail_raidmap:
828 mrsas_free_mem(sc);
829 mtx_destroy(&sc->sim_lock);
830 mtx_destroy(&sc->aen_lock);
831 mtx_destroy(&sc->pci_lock);
832 mtx_destroy(&sc->io_lock);
833 mtx_destroy(&sc->ioctl_lock);
834 mtx_destroy(&sc->mpt_cmd_pool_lock);
835 mtx_destroy(&sc->mfi_cmd_pool_lock);
836 mtx_destroy(&sc->raidmap_lock);
837attach_fail:
838 destroy_dev(sc->mrsas_cdev);
839 if (sc->reg_res){
840 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
841 sc->reg_res_id, sc->reg_res);
842 }
843 return (ENXIO);
844}
845
846/**
847 * mrsas_detach: De-allocates and teardown resources
848 * input: device struct pointer
849 *
850 * This function is the entry point for device disconnect and detach. It
851 * performs memory de-allocations, shutdown of the controller and various
852 * teardown and destroy resource functions.
853 */
854static int mrsas_detach(device_t dev)
855{
856 struct mrsas_softc *sc;
857 int i = 0;
858
859 sc = device_get_softc(dev);
860 sc->remove_in_progress = 1;
833 return (0);
834
835fail_start_aen:
836attach_fail_irq:
837 mrsas_teardown_intr(sc);
838attach_fail_cam:
839 mrsas_cam_detach(sc);
840attach_fail_fw:
841//attach_fail_raidmap:
842 mrsas_free_mem(sc);
843 mtx_destroy(&sc->sim_lock);
844 mtx_destroy(&sc->aen_lock);
845 mtx_destroy(&sc->pci_lock);
846 mtx_destroy(&sc->io_lock);
847 mtx_destroy(&sc->ioctl_lock);
848 mtx_destroy(&sc->mpt_cmd_pool_lock);
849 mtx_destroy(&sc->mfi_cmd_pool_lock);
850 mtx_destroy(&sc->raidmap_lock);
851attach_fail:
852 destroy_dev(sc->mrsas_cdev);
853 if (sc->reg_res){
854 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
855 sc->reg_res_id, sc->reg_res);
856 }
857 return (ENXIO);
858}
859
860/**
861 * mrsas_detach: De-allocates and teardown resources
862 * input: device struct pointer
863 *
864 * This function is the entry point for device disconnect and detach. It
865 * performs memory de-allocations, shutdown of the controller and various
866 * teardown and destroy resource functions.
867 */
868static int mrsas_detach(device_t dev)
869{
870 struct mrsas_softc *sc;
871 int i = 0;
872
873 sc = device_get_softc(dev);
874 sc->remove_in_progress = 1;
875
876 /*
877 * Take the instance off the instance array. Note that we will not
878 * decrement the max_index. We let this array be sparse array
879 */
880 for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
881 if (mrsas_mgmt_info.sc_ptr[i] == sc) {
882 mrsas_mgmt_info.count--;
883 mrsas_mgmt_info.sc_ptr[i] = NULL;
884 break;
885 }
886 }
887
861 if(sc->ocr_thread_active)
862 wakeup(&sc->ocr_chan);
863 while(sc->reset_in_progress){
864 i++;
865 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
866 mrsas_dprint(sc, MRSAS_INFO,
867 "[%2d]waiting for ocr to be finished\n",i);
868 }
869 pause("mr_shutdown", hz);
870 }
871 i = 0;
872 while(sc->ocr_thread_active){
873 i++;
874 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
875 mrsas_dprint(sc, MRSAS_INFO,
876 "[%2d]waiting for "
877 "mrsas_ocr thread to quit ocr %d\n",i,
878 sc->ocr_thread_active);
879 }
880 pause("mr_shutdown", hz);
881 }
882 mrsas_flush_cache(sc);
883 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
884 mrsas_disable_intr(sc);
885 mrsas_cam_detach(sc);
886 mrsas_teardown_intr(sc);
887 mrsas_free_mem(sc);
888 mtx_destroy(&sc->sim_lock);
889 mtx_destroy(&sc->aen_lock);
890 mtx_destroy(&sc->pci_lock);
891 mtx_destroy(&sc->io_lock);
892 mtx_destroy(&sc->ioctl_lock);
893 mtx_destroy(&sc->mpt_cmd_pool_lock);
894 mtx_destroy(&sc->mfi_cmd_pool_lock);
895 mtx_destroy(&sc->raidmap_lock);
896 if (sc->reg_res){
897 bus_release_resource(sc->mrsas_dev,
898 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
899 }
900 destroy_dev(sc->mrsas_cdev);
901 if (sc->sysctl_tree != NULL)
902 sysctl_ctx_free(&sc->sysctl_ctx);
903 return (0);
904}
905
906/**
907 * mrsas_free_mem: Frees allocated memory
908 * input: Adapter instance soft state
909 *
910 * This function is called from mrsas_detach() to free previously allocated
911 * memory.
912 */
913void mrsas_free_mem(struct mrsas_softc *sc)
914{
915 int i;
916 u_int32_t max_cmd;
917 struct mrsas_mfi_cmd *mfi_cmd;
918 struct mrsas_mpt_cmd *mpt_cmd;
919
920 /*
921 * Free RAID map memory
922 */
923 for (i=0; i < 2; i++)
924 {
925 if (sc->raidmap_phys_addr[i])
926 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
927 if (sc->raidmap_mem[i] != NULL)
928 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
929 if (sc->raidmap_tag[i] != NULL)
930 bus_dma_tag_destroy(sc->raidmap_tag[i]);
931
932 if (sc->ld_drv_map[i] != NULL)
933 free(sc->ld_drv_map[i], M_MRSAS);
934 }
935
936 /*
937 * Free version buffer memroy
938 */
939 if (sc->verbuf_phys_addr)
940 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
941 if (sc->verbuf_mem != NULL)
942 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
943 if (sc->verbuf_tag != NULL)
944 bus_dma_tag_destroy(sc->verbuf_tag);
945
946
947 /*
948 * Free sense buffer memory
949 */
950 if (sc->sense_phys_addr)
951 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
952 if (sc->sense_mem != NULL)
953 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
954 if (sc->sense_tag != NULL)
955 bus_dma_tag_destroy(sc->sense_tag);
956
957 /*
958 * Free chain frame memory
959 */
960 if (sc->chain_frame_phys_addr)
961 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
962 if (sc->chain_frame_mem != NULL)
963 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
964 if (sc->chain_frame_tag != NULL)
965 bus_dma_tag_destroy(sc->chain_frame_tag);
966
967 /*
968 * Free IO Request memory
969 */
970 if (sc->io_request_phys_addr)
971 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
972 if (sc->io_request_mem != NULL)
973 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
974 if (sc->io_request_tag != NULL)
975 bus_dma_tag_destroy(sc->io_request_tag);
976
977 /*
978 * Free Reply Descriptor memory
979 */
980 if (sc->reply_desc_phys_addr)
981 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
982 if (sc->reply_desc_mem != NULL)
983 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
984 if (sc->reply_desc_tag != NULL)
985 bus_dma_tag_destroy(sc->reply_desc_tag);
986
987 /*
988 * Free event detail memory
989 */
990 if (sc->evt_detail_phys_addr)
991 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
992 if (sc->evt_detail_mem != NULL)
993 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
994 if (sc->evt_detail_tag != NULL)
995 bus_dma_tag_destroy(sc->evt_detail_tag);
996
997 /*
998 * Free MFI frames
999 */
1000 if (sc->mfi_cmd_list) {
1001 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1002 mfi_cmd = sc->mfi_cmd_list[i];
1003 mrsas_free_frame(sc, mfi_cmd);
1004 }
1005 }
1006 if (sc->mficmd_frame_tag != NULL)
1007 bus_dma_tag_destroy(sc->mficmd_frame_tag);
1008
1009 /*
1010 * Free MPT internal command list
1011 */
1012 max_cmd = sc->max_fw_cmds;
1013 if (sc->mpt_cmd_list) {
1014 for (i = 0; i < max_cmd; i++) {
1015 mpt_cmd = sc->mpt_cmd_list[i];
1016 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1017 free(sc->mpt_cmd_list[i], M_MRSAS);
1018 }
1019 free(sc->mpt_cmd_list, M_MRSAS);
1020 sc->mpt_cmd_list = NULL;
1021 }
1022
1023 /*
1024 * Free MFI internal command list
1025 */
1026
1027 if (sc->mfi_cmd_list) {
1028 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1029 free(sc->mfi_cmd_list[i], M_MRSAS);
1030 }
1031 free(sc->mfi_cmd_list, M_MRSAS);
1032 sc->mfi_cmd_list = NULL;
1033 }
1034
1035 /*
1036 * Free request descriptor memory
1037 */
1038 free(sc->req_desc, M_MRSAS);
1039 sc->req_desc = NULL;
1040
1041 /*
1042 * Destroy parent tag
1043 */
1044 if (sc->mrsas_parent_tag != NULL)
1045 bus_dma_tag_destroy(sc->mrsas_parent_tag);
1046}
1047
1048/**
1049 * mrsas_teardown_intr: Teardown interrupt
1050 * input: Adapter instance soft state
1051 *
1052 * This function is called from mrsas_detach() to teardown and release
1053 * bus interrupt resourse.
1054 */
1055void mrsas_teardown_intr(struct mrsas_softc *sc)
1056{
1057 if (sc->intr_handle)
1058 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq, sc->intr_handle);
1059 if (sc->mrsas_irq != NULL)
1060 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id, sc->mrsas_irq);
1061 sc->intr_handle = NULL;
1062}
1063
1064/**
1065 * mrsas_suspend: Suspend entry point
1066 * input: Device struct pointer
1067 *
1068 * This function is the entry point for system suspend from the OS.
1069 */
1070static int mrsas_suspend(device_t dev)
1071{
1072 struct mrsas_softc *sc;
1073
1074 sc = device_get_softc(dev);
1075 return (0);
1076}
1077
1078/**
1079 * mrsas_resume: Resume entry point
1080 * input: Device struct pointer
1081 *
1082 * This function is the entry point for system resume from the OS.
1083 */
1084static int mrsas_resume(device_t dev)
1085{
1086 struct mrsas_softc *sc;
1087
1088 sc = device_get_softc(dev);
1089 return (0);
1090}
1091
1092/**
1093 * mrsas_ioctl: IOCtl commands entry point.
1094 *
1095 * This function is the entry point for IOCtls from the OS. It calls the
1096 * appropriate function for processing depending on the command received.
1097 */
1098static int
1099mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1100{
1101 struct mrsas_softc *sc;
1102 int ret = 0, i = 0;
1103
888 if(sc->ocr_thread_active)
889 wakeup(&sc->ocr_chan);
890 while(sc->reset_in_progress){
891 i++;
892 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
893 mrsas_dprint(sc, MRSAS_INFO,
894 "[%2d]waiting for ocr to be finished\n",i);
895 }
896 pause("mr_shutdown", hz);
897 }
898 i = 0;
899 while(sc->ocr_thread_active){
900 i++;
901 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
902 mrsas_dprint(sc, MRSAS_INFO,
903 "[%2d]waiting for "
904 "mrsas_ocr thread to quit ocr %d\n",i,
905 sc->ocr_thread_active);
906 }
907 pause("mr_shutdown", hz);
908 }
909 mrsas_flush_cache(sc);
910 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
911 mrsas_disable_intr(sc);
912 mrsas_cam_detach(sc);
913 mrsas_teardown_intr(sc);
914 mrsas_free_mem(sc);
915 mtx_destroy(&sc->sim_lock);
916 mtx_destroy(&sc->aen_lock);
917 mtx_destroy(&sc->pci_lock);
918 mtx_destroy(&sc->io_lock);
919 mtx_destroy(&sc->ioctl_lock);
920 mtx_destroy(&sc->mpt_cmd_pool_lock);
921 mtx_destroy(&sc->mfi_cmd_pool_lock);
922 mtx_destroy(&sc->raidmap_lock);
923 if (sc->reg_res){
924 bus_release_resource(sc->mrsas_dev,
925 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
926 }
927 destroy_dev(sc->mrsas_cdev);
928 if (sc->sysctl_tree != NULL)
929 sysctl_ctx_free(&sc->sysctl_ctx);
930 return (0);
931}
932
933/**
934 * mrsas_free_mem: Frees allocated memory
935 * input: Adapter instance soft state
936 *
937 * This function is called from mrsas_detach() to free previously allocated
938 * memory.
939 */
940void mrsas_free_mem(struct mrsas_softc *sc)
941{
942 int i;
943 u_int32_t max_cmd;
944 struct mrsas_mfi_cmd *mfi_cmd;
945 struct mrsas_mpt_cmd *mpt_cmd;
946
947 /*
948 * Free RAID map memory
949 */
950 for (i=0; i < 2; i++)
951 {
952 if (sc->raidmap_phys_addr[i])
953 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
954 if (sc->raidmap_mem[i] != NULL)
955 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
956 if (sc->raidmap_tag[i] != NULL)
957 bus_dma_tag_destroy(sc->raidmap_tag[i]);
958
959 if (sc->ld_drv_map[i] != NULL)
960 free(sc->ld_drv_map[i], M_MRSAS);
961 }
962
963 /*
964 * Free version buffer memroy
965 */
966 if (sc->verbuf_phys_addr)
967 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
968 if (sc->verbuf_mem != NULL)
969 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
970 if (sc->verbuf_tag != NULL)
971 bus_dma_tag_destroy(sc->verbuf_tag);
972
973
974 /*
975 * Free sense buffer memory
976 */
977 if (sc->sense_phys_addr)
978 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
979 if (sc->sense_mem != NULL)
980 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
981 if (sc->sense_tag != NULL)
982 bus_dma_tag_destroy(sc->sense_tag);
983
984 /*
985 * Free chain frame memory
986 */
987 if (sc->chain_frame_phys_addr)
988 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
989 if (sc->chain_frame_mem != NULL)
990 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
991 if (sc->chain_frame_tag != NULL)
992 bus_dma_tag_destroy(sc->chain_frame_tag);
993
994 /*
995 * Free IO Request memory
996 */
997 if (sc->io_request_phys_addr)
998 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
999 if (sc->io_request_mem != NULL)
1000 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1001 if (sc->io_request_tag != NULL)
1002 bus_dma_tag_destroy(sc->io_request_tag);
1003
1004 /*
1005 * Free Reply Descriptor memory
1006 */
1007 if (sc->reply_desc_phys_addr)
1008 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1009 if (sc->reply_desc_mem != NULL)
1010 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1011 if (sc->reply_desc_tag != NULL)
1012 bus_dma_tag_destroy(sc->reply_desc_tag);
1013
1014 /*
1015 * Free event detail memory
1016 */
1017 if (sc->evt_detail_phys_addr)
1018 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1019 if (sc->evt_detail_mem != NULL)
1020 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1021 if (sc->evt_detail_tag != NULL)
1022 bus_dma_tag_destroy(sc->evt_detail_tag);
1023
1024 /*
1025 * Free MFI frames
1026 */
1027 if (sc->mfi_cmd_list) {
1028 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1029 mfi_cmd = sc->mfi_cmd_list[i];
1030 mrsas_free_frame(sc, mfi_cmd);
1031 }
1032 }
1033 if (sc->mficmd_frame_tag != NULL)
1034 bus_dma_tag_destroy(sc->mficmd_frame_tag);
1035
1036 /*
1037 * Free MPT internal command list
1038 */
1039 max_cmd = sc->max_fw_cmds;
1040 if (sc->mpt_cmd_list) {
1041 for (i = 0; i < max_cmd; i++) {
1042 mpt_cmd = sc->mpt_cmd_list[i];
1043 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1044 free(sc->mpt_cmd_list[i], M_MRSAS);
1045 }
1046 free(sc->mpt_cmd_list, M_MRSAS);
1047 sc->mpt_cmd_list = NULL;
1048 }
1049
1050 /*
1051 * Free MFI internal command list
1052 */
1053
1054 if (sc->mfi_cmd_list) {
1055 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1056 free(sc->mfi_cmd_list[i], M_MRSAS);
1057 }
1058 free(sc->mfi_cmd_list, M_MRSAS);
1059 sc->mfi_cmd_list = NULL;
1060 }
1061
1062 /*
1063 * Free request descriptor memory
1064 */
1065 free(sc->req_desc, M_MRSAS);
1066 sc->req_desc = NULL;
1067
1068 /*
1069 * Destroy parent tag
1070 */
1071 if (sc->mrsas_parent_tag != NULL)
1072 bus_dma_tag_destroy(sc->mrsas_parent_tag);
1073}
1074
1075/**
1076 * mrsas_teardown_intr: Teardown interrupt
1077 * input: Adapter instance soft state
1078 *
1079 * This function is called from mrsas_detach() to teardown and release
1080 * bus interrupt resourse.
1081 */
1082void mrsas_teardown_intr(struct mrsas_softc *sc)
1083{
1084 if (sc->intr_handle)
1085 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq, sc->intr_handle);
1086 if (sc->mrsas_irq != NULL)
1087 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id, sc->mrsas_irq);
1088 sc->intr_handle = NULL;
1089}
1090
1091/**
1092 * mrsas_suspend: Suspend entry point
1093 * input: Device struct pointer
1094 *
1095 * This function is the entry point for system suspend from the OS.
1096 */
1097static int mrsas_suspend(device_t dev)
1098{
1099 struct mrsas_softc *sc;
1100
1101 sc = device_get_softc(dev);
1102 return (0);
1103}
1104
1105/**
1106 * mrsas_resume: Resume entry point
1107 * input: Device struct pointer
1108 *
1109 * This function is the entry point for system resume from the OS.
1110 */
1111static int mrsas_resume(device_t dev)
1112{
1113 struct mrsas_softc *sc;
1114
1115 sc = device_get_softc(dev);
1116 return (0);
1117}
1118
1119/**
1120 * mrsas_ioctl: IOCtl commands entry point.
1121 *
1122 * This function is the entry point for IOCtls from the OS. It calls the
1123 * appropriate function for processing depending on the command received.
1124 */
1125static int
1126mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1127{
1128 struct mrsas_softc *sc;
1129 int ret = 0, i = 0;
1130
1104 sc = (struct mrsas_softc *)(dev->si_drv1);
1105
1131 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1132
1133 /* get the Host number & the softc from data sent by the Application */
1134 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1135
1136 if ((mrsas_mgmt_info.max_index == user_ioc->host_no) || (sc == NULL)) {
1137 printf ("Please check the controller number\n");
1138 if (sc == NULL)
1139 printf ("There is NO such Host no. %d\n", user_ioc->host_no);
1140
1141 return ENOENT;
1142 }
1143
1106 if (sc->remove_in_progress) {
1107 mrsas_dprint(sc, MRSAS_INFO,
1108 "Driver remove or shutdown called.\n");
1109 return ENOENT;
1110 }
1111
1112 mtx_lock_spin(&sc->ioctl_lock);
1113 if (!sc->reset_in_progress) {
1114 mtx_unlock_spin(&sc->ioctl_lock);
1115 goto do_ioctl;
1116 }
1117
1118 /* Release ioclt_lock, and wait for OCR
1119 * to be finished */
1120 mtx_unlock_spin(&sc->ioctl_lock);
1121 while(sc->reset_in_progress){
1122 i++;
1123 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1124 mrsas_dprint(sc, MRSAS_INFO,
1125 "[%2d]waiting for "
1126 "OCR to be finished %d\n",i,
1127 sc->ocr_thread_active);
1128 }
1129 pause("mr_ioctl", hz);
1130 }
1131
1132do_ioctl:
1133 switch (cmd) {
1144 if (sc->remove_in_progress) {
1145 mrsas_dprint(sc, MRSAS_INFO,
1146 "Driver remove or shutdown called.\n");
1147 return ENOENT;
1148 }
1149
1150 mtx_lock_spin(&sc->ioctl_lock);
1151 if (!sc->reset_in_progress) {
1152 mtx_unlock_spin(&sc->ioctl_lock);
1153 goto do_ioctl;
1154 }
1155
1156 /* Release ioclt_lock, and wait for OCR
1157 * to be finished */
1158 mtx_unlock_spin(&sc->ioctl_lock);
1159 while(sc->reset_in_progress){
1160 i++;
1161 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1162 mrsas_dprint(sc, MRSAS_INFO,
1163 "[%2d]waiting for "
1164 "OCR to be finished %d\n",i,
1165 sc->ocr_thread_active);
1166 }
1167 pause("mr_ioctl", hz);
1168 }
1169
1170do_ioctl:
1171 switch (cmd) {
1134 case MRSAS_IOC_FIRMWARE_PASS_THROUGH:
1135 ret = mrsas_passthru(sc, (void *)arg);
1172 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1173#ifdef COMPAT_FREEBSD32
1174 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1175#endif
1176 ret = mrsas_passthru(sc, (void *)arg, cmd);
1136 break;
1137 case MRSAS_IOC_SCAN_BUS:
1138 ret = mrsas_bus_scan(sc);
1139 break;
1177 break;
1178 case MRSAS_IOC_SCAN_BUS:
1179 ret = mrsas_bus_scan(sc);
1180 break;
1181 default:
1182 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1140 }
1141
1142 return (ret);
1143}
1144
1145/**
1146 * mrsas_setup_irq: Set up interrupt.
1147 * input: Adapter instance soft state
1148 *
1149 * This function sets up interrupts as a bus resource, with flags indicating
1150 * resource permitting contemporaneous sharing and for resource to activate
1151 * atomically.
1152 */
1153static int mrsas_setup_irq(struct mrsas_softc *sc)
1154{
1155 sc->irq_id = 0;
1156 sc->mrsas_irq = bus_alloc_resource_any(sc->mrsas_dev, SYS_RES_IRQ,
1157 &sc->irq_id, RF_SHAREABLE | RF_ACTIVE);
1158 if (sc->mrsas_irq == NULL){
1159 device_printf(sc->mrsas_dev, "Cannot allocate interrupt\n");
1160 return (FAIL);
1161 }
1162 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq, INTR_MPSAFE|INTR_TYPE_CAM,
1163 NULL, mrsas_isr, sc, &sc->intr_handle)) {
1164 device_printf(sc->mrsas_dev, "Cannot set up interrupt\n");
1165 return (FAIL);
1166 }
1167
1168 return (0);
1169}
1170
1171/*
1172 * mrsas_isr: ISR entry point
1173 * input: argument pointer
1174 *
1175 * This function is the interrupt service routine entry point. There
1176 * are two types of interrupts, state change interrupt and response
1177 * interrupt. If an interrupt is not ours, we just return.
1178 */
1179void mrsas_isr(void *arg)
1180{
1181 struct mrsas_softc *sc = (struct mrsas_softc *)arg;
1182 int status;
1183
1184 /* Clear FW state change interrupt */
1185 status = mrsas_clear_intr(sc);
1186
1187 /* Not our interrupt */
1188 if (!status)
1189 return;
1190
1191 /* If we are resetting, bail */
1192 if (test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1193 printf(" Entered into ISR when OCR is going active. \n");
1194 mrsas_clear_intr(sc);
1195 return;
1196 }
1197 /* Process for reply request and clear response interrupt */
1198 if (mrsas_complete_cmd(sc) != SUCCESS)
1199 mrsas_clear_intr(sc);
1200
1201 return;
1202}
1203
1204/*
1205 * mrsas_complete_cmd: Process reply request
1206 * input: Adapter instance soft state
1207 *
1208 * This function is called from mrsas_isr() to process reply request and
1209 * clear response interrupt. Processing of the reply request entails
1210 * walking through the reply descriptor array for the command request
1211 * pended from Firmware. We look at the Function field to determine
1212 * the command type and perform the appropriate action. Before we
1213 * return, we clear the response interrupt.
1214 */
1215static int mrsas_complete_cmd(struct mrsas_softc *sc)
1216{
1217 Mpi2ReplyDescriptorsUnion_t *desc;
1218 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1219 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1220 struct mrsas_mpt_cmd *cmd_mpt;
1221 struct mrsas_mfi_cmd *cmd_mfi;
1222 u_int8_t arm, reply_descript_type;
1223 u_int16_t smid, num_completed;
1224 u_int8_t status, extStatus;
1225 union desc_value desc_val;
1226 PLD_LOAD_BALANCE_INFO lbinfo;
1227 u_int32_t device_id;
1228 int threshold_reply_count = 0;
1229
1230
1231 /* If we have a hardware error, not need to continue */
1232 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1233 return (DONE);
1234
1235 desc = sc->reply_desc_mem;
1236 desc += sc->last_reply_idx;
1237
1238 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1239
1240 desc_val.word = desc->Words;
1241 num_completed = 0;
1242
1243 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1244
1245 /* Find our reply descriptor for the command and process */
1246 while((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF))
1247 {
1248 smid = reply_desc->SMID;
1249 cmd_mpt = sc->mpt_cmd_list[smid -1];
1250 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *)cmd_mpt->io_request;
1251
1252 status = scsi_io_req->RaidContext.status;
1253 extStatus = scsi_io_req->RaidContext.exStatus;
1254
1255 switch (scsi_io_req->Function)
1256 {
1257 case MPI2_FUNCTION_SCSI_IO_REQUEST : /*Fast Path IO.*/
1258 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1259 lbinfo = &sc->load_balance_info[device_id];
1260 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1261 arm = lbinfo->raid1DevHandle[0] == scsi_io_req->DevHandle ? 0 : 1;
1262 atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
1263 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1264 }
1265 //Fall thru and complete IO
1266 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1267 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1268 mrsas_cmd_done(sc, cmd_mpt);
1269 scsi_io_req->RaidContext.status = 0;
1270 scsi_io_req->RaidContext.exStatus = 0;
1271 atomic_dec(&sc->fw_outstanding);
1272 break;
1273 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
1274 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1275 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1276 cmd_mpt->flags = 0;
1277 mrsas_release_mpt_cmd(cmd_mpt);
1278 break;
1279 }
1280
1281 sc->last_reply_idx++;
1282 if (sc->last_reply_idx >= sc->reply_q_depth)
1283 sc->last_reply_idx = 0;
1284
1285 desc->Words = ~((uint64_t)0x00); /* set it back to all 0xFFFFFFFFs */
1286 num_completed++;
1287 threshold_reply_count++;
1288
1289 /* Get the next reply descriptor */
1290 if (!sc->last_reply_idx)
1291 desc = sc->reply_desc_mem;
1292 else
1293 desc++;
1294
1295 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1296 desc_val.word = desc->Words;
1297
1298 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1299
1300 if(reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1301 break;
1302
1303 /*
1304 * Write to reply post index after completing threshold reply count
1305 * and still there are more replies in reply queue pending to be
1306 * completed.
1307 */
1308 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1309 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),
1310 sc->last_reply_idx);
1311 threshold_reply_count = 0;
1312 }
1313 }
1314
1315 /* No match, just return */
1316 if (num_completed == 0)
1317 return (DONE);
1318
1319 /* Clear response interrupt */
1320 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),sc->last_reply_idx);
1321
1322 return(0);
1323}
1324
1325/*
1326 * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
1327 * input: Adapter instance soft state
1328 *
1329 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1330 * It checks the command status and maps the appropriate CAM status for the CCB.
1331 */
1332void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1333{
1334 struct mrsas_softc *sc = cmd->sc;
1335 u_int8_t *sense_data;
1336
1337 switch (status) {
1338 case MFI_STAT_OK:
1339 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1340 break;
1341 case MFI_STAT_SCSI_IO_FAILED:
1342 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1343 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1344 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1345 if (sense_data) {
1346 /* For now just copy 18 bytes back */
1347 memcpy(sense_data, cmd->sense, 18);
1348 cmd->ccb_ptr->csio.sense_len = 18;
1349 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1350 }
1351 break;
1352 case MFI_STAT_LD_OFFLINE:
1353 case MFI_STAT_DEVICE_NOT_FOUND:
1354 if (cmd->ccb_ptr->ccb_h.target_lun)
1355 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1356 else
1357 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1358 break;
1359 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1360 /*send status to CAM layer to retry sending command without
1361 * decrementing retry counter*/
1362 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1363 break;
1364 default:
1365 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1366 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1367 cmd->ccb_ptr->csio.scsi_status = status;
1368 }
1369 return;
1370}
1371
1372/*
1373 * mrsas_alloc_mem: Allocate DMAable memory.
1374 * input: Adapter instance soft state
1375 *
1376 * This function creates the parent DMA tag and allocates DMAable memory.
1377 * DMA tag describes constraints of DMA mapping. Memory allocated is mapped
1378 * into Kernel virtual address. Callback argument is physical memory address.
1379 */
1380static int mrsas_alloc_mem(struct mrsas_softc *sc)
1381{
1382 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1383 chain_frame_size, evt_detail_size;
1384
1385 /*
1386 * Allocate parent DMA tag
1387 */
1388 if (bus_dma_tag_create(NULL, /* parent */
1389 1, /* alignment */
1390 0, /* boundary */
1391 BUS_SPACE_MAXADDR, /* lowaddr */
1392 BUS_SPACE_MAXADDR, /* highaddr */
1393 NULL, NULL, /* filter, filterarg */
1394 MRSAS_MAX_IO_SIZE,/* maxsize */
1395 MRSAS_MAX_SGL, /* nsegments */
1396 MRSAS_MAX_IO_SIZE,/* maxsegsize */
1397 0, /* flags */
1398 NULL, NULL, /* lockfunc, lockarg */
1399 &sc->mrsas_parent_tag /* tag */
1400 )) {
1401 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1402 return(ENOMEM);
1403 }
1404
1405 /*
1406 * Allocate for version buffer
1407 */
1408 verbuf_size = MRSAS_MAX_NAME_LENGTH*(sizeof(bus_addr_t));
1409 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1410 1, 0, // algnmnt, boundary
1411 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1412 BUS_SPACE_MAXADDR, // highaddr
1413 NULL, NULL, // filter, filterarg
1414 verbuf_size, // maxsize
1415 1, // msegments
1416 verbuf_size, // maxsegsize
1417 BUS_DMA_ALLOCNOW, // flags
1418 NULL, NULL, // lockfunc, lockarg
1419 &sc->verbuf_tag)) {
1420 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1421 return (ENOMEM);
1422 }
1423 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1424 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1425 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1426 return (ENOMEM);
1427 }
1428 bzero(sc->verbuf_mem, verbuf_size);
1429 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1430 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, BUS_DMA_NOWAIT)){
1431 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1432 return(ENOMEM);
1433 }
1434
1435 /*
1436 * Allocate IO Request Frames
1437 */
1438 io_req_size = sc->io_frames_alloc_sz;
1439 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1440 16, 0, // algnmnt, boundary
1441 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1442 BUS_SPACE_MAXADDR, // highaddr
1443 NULL, NULL, // filter, filterarg
1444 io_req_size, // maxsize
1445 1, // msegments
1446 io_req_size, // maxsegsize
1447 BUS_DMA_ALLOCNOW, // flags
1448 NULL, NULL, // lockfunc, lockarg
1449 &sc->io_request_tag)) {
1450 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1451 return (ENOMEM);
1452 }
1453 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1454 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1455 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1456 return (ENOMEM);
1457 }
1458 bzero(sc->io_request_mem, io_req_size);
1459 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1460 sc->io_request_mem, io_req_size, mrsas_addr_cb,
1461 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1462 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1463 return (ENOMEM);
1464 }
1465
1466 /*
1467 * Allocate Chain Frames
1468 */
1469 chain_frame_size = sc->chain_frames_alloc_sz;
1470 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1471 4, 0, // algnmnt, boundary
1472 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1473 BUS_SPACE_MAXADDR, // highaddr
1474 NULL, NULL, // filter, filterarg
1475 chain_frame_size, // maxsize
1476 1, // msegments
1477 chain_frame_size, // maxsegsize
1478 BUS_DMA_ALLOCNOW, // flags
1479 NULL, NULL, // lockfunc, lockarg
1480 &sc->chain_frame_tag)) {
1481 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1482 return (ENOMEM);
1483 }
1484 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1485 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1486 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1487 return (ENOMEM);
1488 }
1489 bzero(sc->chain_frame_mem, chain_frame_size);
1490 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1491 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1492 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1493 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1494 return (ENOMEM);
1495 }
1496
1497 /*
1498 * Allocate Reply Descriptor Array
1499 */
1500 reply_desc_size = sc->reply_alloc_sz;
1501 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1502 16, 0, // algnmnt, boundary
1503 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1504 BUS_SPACE_MAXADDR, // highaddr
1505 NULL, NULL, // filter, filterarg
1506 reply_desc_size, // maxsize
1507 1, // msegments
1508 reply_desc_size, // maxsegsize
1509 BUS_DMA_ALLOCNOW, // flags
1510 NULL, NULL, // lockfunc, lockarg
1511 &sc->reply_desc_tag)) {
1512 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1513 return (ENOMEM);
1514 }
1515 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1516 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1517 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1518 return (ENOMEM);
1519 }
1520 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1521 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1522 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1523 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1524 return (ENOMEM);
1525 }
1526
1527 /*
1528 * Allocate Sense Buffer Array. Keep in lower 4GB
1529 */
1530 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1531 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1532 64, 0, // algnmnt, boundary
1533 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1534 BUS_SPACE_MAXADDR, // highaddr
1535 NULL, NULL, // filter, filterarg
1536 sense_size, // maxsize
1537 1, // nsegments
1538 sense_size, // maxsegsize
1539 BUS_DMA_ALLOCNOW, // flags
1540 NULL, NULL, // lockfunc, lockarg
1541 &sc->sense_tag)) {
1542 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1543 return (ENOMEM);
1544 }
1545 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1546 BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1547 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1548 return (ENOMEM);
1549 }
1550 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1551 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1552 BUS_DMA_NOWAIT)){
1553 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1554 return (ENOMEM);
1555 }
1556
1557 /*
1558 * Allocate for Event detail structure
1559 */
1560 evt_detail_size = sizeof(struct mrsas_evt_detail);
1561 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1562 1, 0, // algnmnt, boundary
1563 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1564 BUS_SPACE_MAXADDR, // highaddr
1565 NULL, NULL, // filter, filterarg
1566 evt_detail_size, // maxsize
1567 1, // msegments
1568 evt_detail_size, // maxsegsize
1569 BUS_DMA_ALLOCNOW, // flags
1570 NULL, NULL, // lockfunc, lockarg
1571 &sc->evt_detail_tag)) {
1572 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1573 return (ENOMEM);
1574 }
1575 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1576 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1577 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1578 return (ENOMEM);
1579 }
1580 bzero(sc->evt_detail_mem, evt_detail_size);
1581 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1582 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1583 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1584 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1585 return (ENOMEM);
1586 }
1587
1588
1589 /*
1590 * Create a dma tag for data buffers; size will be the maximum
1591 * possible I/O size (280kB).
1592 */
1593 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1594 1, // alignment
1595 0, // boundary
1596 BUS_SPACE_MAXADDR, // lowaddr
1597 BUS_SPACE_MAXADDR, // highaddr
1598 NULL, NULL, // filter, filterarg
1599 MRSAS_MAX_IO_SIZE, // maxsize
1600 MRSAS_MAX_SGL, // nsegments
1601 MRSAS_MAX_IO_SIZE, // maxsegsize
1602 BUS_DMA_ALLOCNOW, // flags
1603 busdma_lock_mutex, // lockfunc
1604 &sc->io_lock, // lockfuncarg
1605 &sc->data_tag)) {
1606 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1607 return(ENOMEM);
1608 }
1609
1610 return(0);
1611}
1612
1613/*
1614 * mrsas_addr_cb: Callback function of bus_dmamap_load()
1615 * input: callback argument,
1616 * machine dependent type that describes DMA segments,
1617 * number of segments,
1618 * error code.
1619 *
1620 * This function is for the driver to receive mapping information resultant
1621 * of the bus_dmamap_load(). The information is actually not being used,
1622 * but the address is saved anyway.
1623 */
1624void
1625mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1626{
1627 bus_addr_t *addr;
1628
1629 addr = arg;
1630 *addr = segs[0].ds_addr;
1631}
1632
1633/*
1634 * mrsas_setup_raidmap: Set up RAID map.
1635 * input: Adapter instance soft state
1636 *
1637 * Allocate DMA memory for the RAID maps and perform setup.
1638 */
1639static int mrsas_setup_raidmap(struct mrsas_softc *sc)
1640{
1641 int i;
1642
1643 sc->drv_supported_vd_count =
1644 MRSAS_MAX_LD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL;
1645 sc->drv_supported_pd_count =
1646 MRSAS_MAX_PD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL;
1647
1648 if(sc->max256vdSupport) {
1649 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
1650 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
1651 } else {
1652 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
1653 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
1654 }
1655
1656#if VD_EXT_DEBUG
1657 device_printf(sc->mrsas_dev, "FW supports: max256vdSupport = %s\n",
1658 sc->max256vdSupport ? "YES":"NO");
1659 device_printf(sc->mrsas_dev, "FW supports %dVDs %dPDs\n"
1660 "DRIVER supports %dVDs %dPDs \n",
1661 sc->fw_supported_vd_count, sc->fw_supported_pd_count,
1662 sc->drv_supported_vd_count, sc->drv_supported_pd_count);
1663#endif
1664
1665 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
1666 (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1));
1667 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
1668 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
1669 (sizeof(MR_LD_SPAN_MAP) * (sc->drv_supported_vd_count-1));
1670
1671 for (i = 0; i < 2; i++) {
1672 sc->ld_drv_map[i] =
1673 (void*) malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
1674 /* Do Error handling */
1675 if (!sc->ld_drv_map[i]) {
1676 device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
1677
1678 if (i == 1)
1679 free (sc->ld_drv_map[0], M_MRSAS);
1680 //ABORT driver initialization
1681 goto ABORT;
1682 }
1683 }
1684
1685 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
1686
1687 if(sc->max256vdSupport)
1688 sc->current_map_sz = sc->new_map_sz;
1689 else
1690 sc->current_map_sz = sc->old_map_sz;
1691
1692
1693 for (int i=0; i < 2; i++)
1694 {
1695 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1696 4, 0, // algnmnt, boundary
1697 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1698 BUS_SPACE_MAXADDR, // highaddr
1699 NULL, NULL, // filter, filterarg
1700 sc->max_map_sz, // maxsize
1701 1, // nsegments
1702 sc->max_map_sz, // maxsegsize
1703 BUS_DMA_ALLOCNOW, // flags
1704 NULL, NULL, // lockfunc, lockarg
1705 &sc->raidmap_tag[i])) {
1706 device_printf(sc->mrsas_dev,
1707 "Cannot allocate raid map tag.\n");
1708 return (ENOMEM);
1709 }
1710 if (bus_dmamem_alloc(sc->raidmap_tag[i],
1711 (void **)&sc->raidmap_mem[i],
1712 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
1713 device_printf(sc->mrsas_dev,
1714 "Cannot allocate raidmap memory.\n");
1715 return (ENOMEM);
1716 }
1717
1718 bzero (sc->raidmap_mem[i], sc->max_map_sz);
1719
1720 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
1721 sc->raidmap_mem[i], sc->max_map_sz,
1722 mrsas_addr_cb, &sc->raidmap_phys_addr[i],
1723 BUS_DMA_NOWAIT)){
1724 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
1725 return (ENOMEM);
1726 }
1727 if (!sc->raidmap_mem[i]) {
1728 device_printf(sc->mrsas_dev,
1729 "Cannot allocate memory for raid map.\n");
1730 return (ENOMEM);
1731 }
1732 }
1733
1734 if (!mrsas_get_map_info(sc))
1735 mrsas_sync_map_info(sc);
1736
1737 return (0);
1738
1739ABORT:
1740 return (1);
1741}
1742
1743/**
1744 * mrsas_init_fw: Initialize Firmware
1745 * input: Adapter soft state
1746 *
1747 * Calls transition_to_ready() to make sure Firmware is in operational
1748 * state and calls mrsas_init_adapter() to send IOC_INIT command to
1749 * Firmware. It issues internal commands to get the controller info
1750 * after the IOC_INIT command response is received by Firmware.
1751 * Note: code relating to get_pdlist, get_ld_list and max_sectors
1752 * are currently not being used, it is left here as placeholder.
1753 */
1754static int mrsas_init_fw(struct mrsas_softc *sc)
1755{
1756 u_int32_t max_sectors_1;
1757 u_int32_t max_sectors_2;
1758 u_int32_t tmp_sectors;
1759 struct mrsas_ctrl_info *ctrl_info;
1760
1761 int ret, ocr = 0;
1762
1763
1764 /* Make sure Firmware is ready */
1765 ret = mrsas_transition_to_ready(sc, ocr);
1766 if (ret != SUCCESS) {
1767 return(ret);
1768 }
1769
1770 /* Get operational params, sge flags, send init cmd to ctlr */
1771 if (mrsas_init_adapter(sc) != SUCCESS){
1772 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
1773 return(1);
1774 }
1775
1776 /* Allocate internal commands for pass-thru */
1777 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS){
1778 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
1779 return(1);
1780 }
1781
1782 /*
1783 * Get the controller info from FW, so that
1784 * the MAX VD support availability can be decided.
1785 */
1786 ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
1787 if (!ctrl_info)
1788 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
1789
1790 if (mrsas_get_ctrl_info(sc, ctrl_info)) {
1791 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
1792 }
1793
1794 sc->max256vdSupport =
1795 (u_int8_t) ctrl_info->adapterOperations3.supportMaxExtLDs;
1796
1797 if (ctrl_info->max_lds > 64){
1798 sc->max256vdSupport = 1;
1799 }
1800
1801 if (mrsas_setup_raidmap(sc) != SUCCESS) {
1802 device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
1803 return(1);
1804 }
1805
1806 /* For pass-thru, get PD/LD list and controller info */
1807 memset(sc->pd_list, 0,
1808 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
1809 mrsas_get_pd_list(sc);
1810
1811 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
1812 mrsas_get_ld_list(sc);
1813
1814 /*
1815 * Compute the max allowed sectors per IO: The controller info has two
1816 * limits on max sectors. Driver should use the minimum of these two.
1817 *
1818 * 1 << stripe_sz_ops.min = max sectors per strip
1819 *
1820 * Note that older firmwares ( < FW ver 30) didn't report information
1821 * to calculate max_sectors_1. So the number ended up as zero always.
1822 */
1823 tmp_sectors = 0;
1824 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
1825 ctrl_info->max_strips_per_io;
1826 max_sectors_2 = ctrl_info->max_request_size;
1827 tmp_sectors = min(max_sectors_1 , max_sectors_2);
1828 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
1829
1830 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
1831 sc->max_sectors_per_req = tmp_sectors;
1832
1833 sc->disableOnlineCtrlReset =
1834 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
1835 sc->UnevenSpanSupport =
1836 ctrl_info->adapterOperations2.supportUnevenSpans;
1837 if(sc->UnevenSpanSupport) {
1838 printf("FW supports: UnevenSpanSupport=%x\n\n",
1839 sc->UnevenSpanSupport);
1840
1841 if (MR_ValidateMapInfo(sc))
1842 sc->fast_path_io = 1;
1843 else
1844 sc->fast_path_io = 0;
1845 }
1846
1847 if (ctrl_info)
1848 free(ctrl_info, M_MRSAS);
1849
1850 return(0);
1851}
1852
1853/**
1854 * mrsas_init_adapter: Initializes the adapter/controller
1855 * input: Adapter soft state
1856 *
1857 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
1858 * ROC/controller. The FW register is read to determined the number of
1859 * commands that is supported. All memory allocations for IO is based on
1860 * max_cmd. Appropriate calculations are performed in this function.
1861 */
1862int mrsas_init_adapter(struct mrsas_softc *sc)
1863{
1864 uint32_t status;
1865 u_int32_t max_cmd;
1866 int ret;
1867
1868 /* Read FW status register */
1869 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
1870
1871 /* Get operational params from status register */
1872 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
1873
1874 /* Decrement the max supported by 1, to correlate with FW */
1875 sc->max_fw_cmds = sc->max_fw_cmds-1;
1876 max_cmd = sc->max_fw_cmds;
1877
1878 /* Determine allocation size of command frames */
1879 sc->reply_q_depth = ((max_cmd *2 +1 +15)/16*16);
1880 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
1881 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
1882 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
1883 sc->chain_frames_alloc_sz = 1024 * max_cmd;
1884 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1885 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL))/16;
1886
1887 sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
1888 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
1889
1890 /* Used for pass thru MFI frame (DCMD) */
1891 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/16;
1892
1893 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1894 sizeof(MPI2_SGE_IO_UNION))/16;
1895
1896 sc->last_reply_idx = 0;
1897
1898 ret = mrsas_alloc_mem(sc);
1899 if (ret != SUCCESS)
1900 return(ret);
1901
1902 ret = mrsas_alloc_mpt_cmds(sc);
1903 if (ret != SUCCESS)
1904 return(ret);
1905
1906 ret = mrsas_ioc_init(sc);
1907 if (ret != SUCCESS)
1908 return(ret);
1909
1910
1911 return(0);
1912}
1913
1914/**
1915 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
1916 * input: Adapter soft state
1917 *
1918 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
1919 */
1920int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
1921{
1922 int ioc_init_size;
1923
1924 /* Allocate IOC INIT command */
1925 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
1926 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1927 1, 0, // algnmnt, boundary
1928 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1929 BUS_SPACE_MAXADDR, // highaddr
1930 NULL, NULL, // filter, filterarg
1931 ioc_init_size, // maxsize
1932 1, // msegments
1933 ioc_init_size, // maxsegsize
1934 BUS_DMA_ALLOCNOW, // flags
1935 NULL, NULL, // lockfunc, lockarg
1936 &sc->ioc_init_tag)) {
1937 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
1938 return (ENOMEM);
1939 }
1940 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
1941 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
1942 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
1943 return (ENOMEM);
1944 }
1945 bzero(sc->ioc_init_mem, ioc_init_size);
1946 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
1947 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
1948 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
1949 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
1950 return (ENOMEM);
1951 }
1952
1953 return (0);
1954}
1955
1956/**
1957 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
1958 * input: Adapter soft state
1959 *
1960 * Deallocates memory of the IOC Init cmd.
1961 */
1962void mrsas_free_ioc_cmd(struct mrsas_softc *sc)
1963{
1964 if (sc->ioc_init_phys_mem)
1965 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
1966 if (sc->ioc_init_mem != NULL)
1967 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
1968 if (sc->ioc_init_tag != NULL)
1969 bus_dma_tag_destroy(sc->ioc_init_tag);
1970}
1971
1972/**
1973 * mrsas_ioc_init: Sends IOC Init command to FW
1974 * input: Adapter soft state
1975 *
1976 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
1977 */
1978int mrsas_ioc_init(struct mrsas_softc *sc)
1979{
1980 struct mrsas_init_frame *init_frame;
1981 pMpi2IOCInitRequest_t IOCInitMsg;
1982 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
1983 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
1984 bus_addr_t phys_addr;
1985 int i, retcode = 0;
1986
1987 /* Allocate memory for the IOC INIT command */
1988 if (mrsas_alloc_ioc_cmd(sc)) {
1989 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
1990 return(1);
1991 }
1992
1993 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) +1024);
1994 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
1995 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
1996 IOCInitMsg->MsgVersion = MPI2_VERSION;
1997 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
1998 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
1999 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
2000 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
2001 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
2002
2003 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2004 init_frame->cmd = MFI_CMD_INIT;
2005 init_frame->cmd_status = 0xFF;
2006 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2007
2008 if (sc->verbuf_mem) {
2009 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION)+2,"%s\n",
2010 MRSAS_VERSION);
2011 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2012 init_frame->driver_ver_hi = 0;
2013 }
2014
2015 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2016 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2017 init_frame->queue_info_new_phys_addr_lo = phys_addr;
2018 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
2019
2020 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
2021 req_desc.MFAIo.RequestFlags =
2022 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2023
2024 mrsas_disable_intr(sc);
2025 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2026 //device_printf(sc->mrsas_dev, "Issuing IOC INIT command to FW.\n");del?
2027 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2028
2029 /*
2030 * Poll response timer to wait for Firmware response. While this
2031 * timer with the DELAY call could block CPU, the time interval for
2032 * this is only 1 millisecond.
2033 */
2034 if (init_frame->cmd_status == 0xFF) {
2035 for (i=0; i < (max_wait * 1000); i++){
2036 if (init_frame->cmd_status == 0xFF)
2037 DELAY(1000);
2038 else
2039 break;
2040 }
2041 }
2042
2043 if (init_frame->cmd_status == 0)
2044 mrsas_dprint(sc, MRSAS_OCR,
2045 "IOC INIT response received from FW.\n");
2046 //device_printf(sc->mrsas_dev, "IOC INIT response received from FW.\n");del?
2047 else
2048 {
2049 if (init_frame->cmd_status == 0xFF)
2050 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2051 else
2052 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2053 retcode = 1;
2054 }
2055
2056 mrsas_free_ioc_cmd(sc);
2057 return (retcode);
2058}
2059
2060/**
2061 * mrsas_alloc_mpt_cmds: Allocates the command packets
2062 * input: Adapter instance soft state
2063 *
2064 * This function allocates the internal commands for IOs. Each command that is
2065 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd.
2066 * An array is allocated with mrsas_mpt_cmd context. The free commands are
2067 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2068 * max_fw_cmds.
2069 */
2070int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2071{
2072 int i, j;
2073 u_int32_t max_cmd;
2074 struct mrsas_mpt_cmd *cmd;
2075 pMpi2ReplyDescriptorsUnion_t reply_desc;
2076 u_int32_t offset, chain_offset, sense_offset;
2077 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2078 u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2079
2080 max_cmd = sc->max_fw_cmds;
2081
2082 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2083 if (!sc->req_desc) {
2084 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2085 return(ENOMEM);
2086 }
2087 memset(sc->req_desc, 0, sc->request_alloc_sz);
2088
2089 /*
2090 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. Allocate the
2091 * dynamic array first and then allocate individual commands.
2092 */
2093 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd*)*max_cmd, M_MRSAS, M_NOWAIT);
2094 if (!sc->mpt_cmd_list) {
2095 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2096 return(ENOMEM);
2097 }
2098 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *)*max_cmd);
2099 for (i = 0; i < max_cmd; i++) {
2100 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2101 M_MRSAS, M_NOWAIT);
2102 if (!sc->mpt_cmd_list[i]) {
2103 for (j = 0; j < i; j++)
2104 free(sc->mpt_cmd_list[j],M_MRSAS);
2105 free(sc->mpt_cmd_list, M_MRSAS);
2106 sc->mpt_cmd_list = NULL;
2107 return(ENOMEM);
2108 }
2109 }
2110
2111 io_req_base = (u_int8_t*)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2112 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2113 chain_frame_base = (u_int8_t*)sc->chain_frame_mem;
2114 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2115 sense_base = (u_int8_t*)sc->sense_mem;
2116 sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2117 for (i = 0; i < max_cmd; i++) {
2118 cmd = sc->mpt_cmd_list[i];
2119 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2120 chain_offset = 1024 * i;
2121 sense_offset = MRSAS_SENSE_LEN * i;
2122 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2123 cmd->index = i + 1;
2124 cmd->ccb_ptr = NULL;
2125 callout_init(&cmd->cm_callout, 0);
2126 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2127 cmd->sc = sc;
2128 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2129 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2130 cmd->io_request_phys_addr = io_req_base_phys + offset;
2131 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2132 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2133 cmd->sense = sense_base + sense_offset;
2134 cmd->sense_phys_addr = sense_base_phys + sense_offset;
2135 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2136 return(FAIL);
2137 }
2138 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2139 }
2140
2141 /* Initialize reply descriptor array to 0xFFFFFFFF */
2142 reply_desc = sc->reply_desc_mem;
2143 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2144 reply_desc->Words = MRSAS_ULONG_MAX;
2145 }
2146 return(0);
2147}
2148
2149/**
2150 * mrsas_fire_cmd: Sends command to FW
2151 * input: Adapter soft state
2152 * request descriptor address low
2153 * request descriptor address high
2154 *
2155 * This functions fires the command to Firmware by writing to the
2156 * inbound_low_queue_port and inbound_high_queue_port.
2157 */
2158void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2159 u_int32_t req_desc_hi)
2160{
2161 mtx_lock(&sc->pci_lock);
2162 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2163 req_desc_lo);
2164 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2165 req_desc_hi);
2166 mtx_unlock(&sc->pci_lock);
2167}
2168
2169/**
2170 * mrsas_transition_to_ready: Move FW to Ready state
2171 * input: Adapter instance soft state
2172 *
2173 * During the initialization, FW passes can potentially be in any one of
2174 * several possible states. If the FW in operational, waiting-for-handshake
2175 * states, driver must take steps to bring it to ready state. Otherwise, it
2176 * has to wait for the ready state.
2177 */
2178int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2179{
2180 int i;
2181 u_int8_t max_wait;
2182 u_int32_t val, fw_state;
2183 u_int32_t cur_state;
2184 u_int32_t abs_state, curr_abs_state;
2185
2186 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2187 fw_state = val & MFI_STATE_MASK;
2188 max_wait = MRSAS_RESET_WAIT_TIME;
2189
2190 if (fw_state != MFI_STATE_READY)
2191 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2192
2193 while (fw_state != MFI_STATE_READY) {
2194 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2195 switch (fw_state) {
2196 case MFI_STATE_FAULT:
2197 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2198 if (ocr) {
2199 cur_state = MFI_STATE_FAULT;
2200 break;
2201 }
2202 else
2203 return -ENODEV;
2204 case MFI_STATE_WAIT_HANDSHAKE:
2205 /* Set the CLR bit in inbound doorbell */
2206 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2207 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG);
2208 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2209 break;
2210 case MFI_STATE_BOOT_MESSAGE_PENDING:
2211 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2212 MFI_INIT_HOTPLUG);
2213 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2214 break;
2215 case MFI_STATE_OPERATIONAL:
2216 /* Bring it to READY state; assuming max wait 10 secs */
2217 mrsas_disable_intr(sc);
2218 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2219 for (i=0; i < max_wait * 1000; i++) {
2220 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2221 DELAY(1000);
2222 else
2223 break;
2224 }
2225 cur_state = MFI_STATE_OPERATIONAL;
2226 break;
2227 case MFI_STATE_UNDEFINED:
2228 /* This state should not last for more than 2 seconds */
2229 cur_state = MFI_STATE_UNDEFINED;
2230 break;
2231 case MFI_STATE_BB_INIT:
2232 cur_state = MFI_STATE_BB_INIT;
2233 break;
2234 case MFI_STATE_FW_INIT:
2235 cur_state = MFI_STATE_FW_INIT;
2236 break;
2237 case MFI_STATE_FW_INIT_2:
2238 cur_state = MFI_STATE_FW_INIT_2;
2239 break;
2240 case MFI_STATE_DEVICE_SCAN:
2241 cur_state = MFI_STATE_DEVICE_SCAN;
2242 break;
2243 case MFI_STATE_FLUSH_CACHE:
2244 cur_state = MFI_STATE_FLUSH_CACHE;
2245 break;
2246 default:
2247 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2248 return -ENODEV;
2249 }
2250
2251 /*
2252 * The cur_state should not last for more than max_wait secs
2253 */
2254 for (i = 0; i < (max_wait * 1000); i++) {
2255 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2256 outbound_scratch_pad))& MFI_STATE_MASK);
2257 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2258 outbound_scratch_pad));
2259 if (abs_state == curr_abs_state)
2260 DELAY(1000);
2261 else
2262 break;
2263 }
2264
2265 /*
2266 * Return error if fw_state hasn't changed after max_wait
2267 */
2268 if (curr_abs_state == abs_state) {
2269 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2270 "in %d secs\n", fw_state, max_wait);
2271 return -ENODEV;
2272 }
2273 }
2274 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2275 //device_printf(sc->mrsas_dev, "FW now in Ready state\n");del?
2276 return 0;
2277}
2278
2279/**
2280 * mrsas_get_mfi_cmd: Get a cmd from free command pool
2281 * input: Adapter soft state
2282 *
2283 * This function removes an MFI command from the command list.
2284 */
2285struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2286{
2287 struct mrsas_mfi_cmd *cmd = NULL;
2288
2289 mtx_lock(&sc->mfi_cmd_pool_lock);
2290 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)){
2291 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2292 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2293 }
2294 mtx_unlock(&sc->mfi_cmd_pool_lock);
2295
2296 return cmd;
2297}
2298
2299/**
2300 * mrsas_ocr_thread Thread to handle OCR/Kill Adapter.
2301 * input: Adapter Context.
2302 *
2303 * This function will check FW status register and flag
2304 * do_timeout_reset flag. It will do OCR/Kill adapter if
2305 * FW is in fault state or IO timed out has trigger reset.
2306 */
2307static void
2308mrsas_ocr_thread(void *arg)
2309{
2310 struct mrsas_softc *sc;
2311 u_int32_t fw_status, fw_state;
2312
2313 sc = (struct mrsas_softc *)arg;
2314
2315 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2316
2317 sc->ocr_thread_active = 1;
2318 mtx_lock(&sc->sim_lock);
2319 for (;;) {
2320 /* Sleep for 1 second and check the queue status*/
2321 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2322 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2323 if (sc->remove_in_progress) {
2324 mrsas_dprint(sc, MRSAS_OCR,
2325 "Exit due to shutdown from %s\n", __func__);
2326 break;
2327 }
2328 fw_status = mrsas_read_reg(sc,
2329 offsetof(mrsas_reg_set, outbound_scratch_pad));
2330 fw_state = fw_status & MFI_STATE_MASK;
2331 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
2332 device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
2333 sc->do_timedout_reset?"IO Timeout":
2334 "FW fault detected");
2335 mtx_lock_spin(&sc->ioctl_lock);
2336 sc->reset_in_progress = 1;
2337 sc->reset_count++;
2338 mtx_unlock_spin(&sc->ioctl_lock);
2339 mrsas_xpt_freeze(sc);
2340 mrsas_reset_ctrl(sc);
2341 mrsas_xpt_release(sc);
2342 sc->reset_in_progress = 0;
2343 sc->do_timedout_reset = 0;
2344 }
2345 }
2346 mtx_unlock(&sc->sim_lock);
2347 sc->ocr_thread_active = 0;
2348 mrsas_kproc_exit(0);
2349}
2350
2351/**
2352 * mrsas_reset_reply_desc Reset Reply descriptor as part of OCR.
2353 * input: Adapter Context.
2354 *
2355 * This function will clear reply descriptor so that post OCR
2356 * driver and FW will lost old history.
2357 */
2358void mrsas_reset_reply_desc(struct mrsas_softc *sc)
2359{
2360 int i;
2361 pMpi2ReplyDescriptorsUnion_t reply_desc;
2362
2363 sc->last_reply_idx = 0;
2364 reply_desc = sc->reply_desc_mem;
2365 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2366 reply_desc->Words = MRSAS_ULONG_MAX;
2367 }
2368}
2369
2370/**
2371 * mrsas_reset_ctrl Core function to OCR/Kill adapter.
2372 * input: Adapter Context.
2373 *
2374 * This function will run from thread context so that it can sleep.
2375 * 1. Do not handle OCR if FW is in HW critical error.
2376 * 2. Wait for outstanding command to complete for 180 seconds.
2377 * 3. If #2 does not find any outstanding command Controller is in working
2378 * state, so skip OCR.
2379 * Otherwise, do OCR/kill Adapter based on flag disableOnlineCtrlReset.
2380 * 4. Start of the OCR, return all SCSI command back to CAM layer which has
2381 * ccb_ptr.
2382 * 5. Post OCR, Re-fire Managment command and move Controller to Operation
2383 * state.
2384 */
2385int mrsas_reset_ctrl(struct mrsas_softc *sc)
2386{
2387 int retval = SUCCESS, i, j, retry = 0;
2388 u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2389 union ccb *ccb;
2390 struct mrsas_mfi_cmd *mfi_cmd;
2391 struct mrsas_mpt_cmd *mpt_cmd;
2392 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2393
2394 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2395 device_printf(sc->mrsas_dev,
2396 "mrsas: Hardware critical error, returning FAIL.\n");
2397 return FAIL;
2398 }
2399
2400 set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2401 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2402 mrsas_disable_intr(sc);
2403 DELAY(1000 * 1000);
2404
2405 /* First try waiting for commands to complete */
2406 if (mrsas_wait_for_outstanding(sc)) {
2407 mrsas_dprint(sc, MRSAS_OCR,
2408 "resetting adapter from %s.\n",
2409 __func__);
2410 /* Now return commands back to the CAM layer */
2411 for (i = 0 ; i < sc->max_fw_cmds; i++) {
2412 mpt_cmd = sc->mpt_cmd_list[i];
2413 if (mpt_cmd->ccb_ptr) {
2414 ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2415 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2416 mrsas_cmd_done(sc, mpt_cmd);
2417 atomic_dec(&sc->fw_outstanding);
2418 }
2419 }
2420
2421 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2422 outbound_scratch_pad));
2423 abs_state = status_reg & MFI_STATE_MASK;
2424 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2425 if (sc->disableOnlineCtrlReset ||
2426 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2427 /* Reset not supported, kill adapter */
2428 mrsas_dprint(sc, MRSAS_OCR,"Reset not supported, killing adapter.\n");
2429 mrsas_kill_hba(sc);
2430 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
2431 retval = FAIL;
2432 goto out;
2433 }
2434
2435 /* Now try to reset the chip */
2436 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2437 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2438 MPI2_WRSEQ_FLUSH_KEY_VALUE);
2439 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2440 MPI2_WRSEQ_1ST_KEY_VALUE);
2441 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2442 MPI2_WRSEQ_2ND_KEY_VALUE);
2443 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2444 MPI2_WRSEQ_3RD_KEY_VALUE);
2445 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2446 MPI2_WRSEQ_4TH_KEY_VALUE);
2447 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2448 MPI2_WRSEQ_5TH_KEY_VALUE);
2449 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2450 MPI2_WRSEQ_6TH_KEY_VALUE);
2451
2452 /* Check that the diag write enable (DRWE) bit is on */
2453 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2454 fusion_host_diag));
2455 retry = 0;
2456 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2457 DELAY(100 * 1000);
2458 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2459 fusion_host_diag));
2460 if (retry++ == 100) {
2461 mrsas_dprint(sc, MRSAS_OCR,
2462 "Host diag unlock failed!\n");
2463 break;
2464 }
2465 }
2466 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2467 continue;
2468
2469 /* Send chip reset command */
2470 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
2471 host_diag | HOST_DIAG_RESET_ADAPTER);
2472 DELAY(3000 * 1000);
2473
2474 /* Make sure reset adapter bit is cleared */
2475 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2476 fusion_host_diag));
2477 retry = 0;
2478 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2479 DELAY(100 * 1000);
2480 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2481 fusion_host_diag));
2482 if (retry++ == 1000) {
2483 mrsas_dprint(sc, MRSAS_OCR,
2484 "Diag reset adapter never cleared!\n");
2485 break;
2486 }
2487 }
2488 if (host_diag & HOST_DIAG_RESET_ADAPTER)
2489 continue;
2490
2491 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2492 outbound_scratch_pad)) & MFI_STATE_MASK;
2493 retry = 0;
2494
2495 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2496 DELAY(100 * 1000);
2497 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2498 outbound_scratch_pad)) & MFI_STATE_MASK;
2499 }
2500 if (abs_state <= MFI_STATE_FW_INIT) {
2501 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
2502 " state = 0x%x\n", abs_state);
2503 continue;
2504 }
2505
2506 /* Wait for FW to become ready */
2507 if (mrsas_transition_to_ready(sc, 1)) {
2508 mrsas_dprint(sc, MRSAS_OCR,
2509 "mrsas: Failed to transition controller to ready.\n");
2510 continue;
2511 }
2512
2513 mrsas_reset_reply_desc(sc);
2514 if (mrsas_ioc_init(sc)) {
2515 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
2516 continue;
2517 }
2518
2519 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2520 mrsas_enable_intr(sc);
2521 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2522
2523 /* Re-fire management commands */
2524 for (j = 0 ; j < sc->max_fw_cmds; j++) {
2525 mpt_cmd = sc->mpt_cmd_list[j];
2526 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2527 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
2528 if (mfi_cmd->frame->dcmd.opcode ==
2529 MR_DCMD_LD_MAP_GET_INFO) {
2530 mrsas_release_mfi_cmd(mfi_cmd);
2531 mrsas_release_mpt_cmd(mpt_cmd);
2532 } else {
2533 req_desc = mrsas_get_request_desc(sc,
2534 mfi_cmd->cmd_id.context.smid - 1);
2535 mrsas_dprint(sc, MRSAS_OCR,
2536 "Re-fire command DCMD opcode 0x%x index %d\n ",
2537 mfi_cmd->frame->dcmd.opcode, j);
2538 if (!req_desc)
2539 device_printf(sc->mrsas_dev,
2540 "Cannot build MPT cmd.\n");
2541 else
2542 mrsas_fire_cmd(sc, req_desc->addr.u.low,
2543 req_desc->addr.u.high);
2544 }
2545 }
2546 }
2547
2548 /* Reset load balance info */
2549 memset(sc->load_balance_info, 0,
2550 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
2551
2552 if (!mrsas_get_map_info(sc))
2553 mrsas_sync_map_info(sc);
2554
2555 /* Adapter reset completed successfully */
2556 device_printf(sc->mrsas_dev, "Reset successful\n");
2557 retval = SUCCESS;
2558 goto out;
2559 }
2560 /* Reset failed, kill the adapter */
2561 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
2562 mrsas_kill_hba(sc);
2563 retval = FAIL;
2564 } else {
2565 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2566 mrsas_enable_intr(sc);
2567 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2568 }
2569out:
2570 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2571 mrsas_dprint(sc, MRSAS_OCR,
2572 "Reset Exit with %d.\n", retval);
2573 return retval;
2574}
2575
2576/**
2577 * mrsas_kill_hba Kill HBA when OCR is not supported.
2578 * input: Adapter Context.
2579 *
2580 * This function will kill HBA when OCR is not supported.
2581 */
2582void mrsas_kill_hba (struct mrsas_softc *sc)
2583{
2584 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
2585 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2586 MFI_STOP_ADP);
2587 /* Flush */
2588 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
2589}
2590
2591/**
2592 * mrsas_wait_for_outstanding Wait for outstanding commands
2593 * input: Adapter Context.
2594 *
2595 * This function will wait for 180 seconds for outstanding
2596 * commands to be completed.
2597 */
2598int mrsas_wait_for_outstanding(struct mrsas_softc *sc)
2599{
2600 int i, outstanding, retval = 0;
2601 u_int32_t fw_state;
2602
2603 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
2604 if (sc->remove_in_progress) {
2605 mrsas_dprint(sc, MRSAS_OCR,
2606 "Driver remove or shutdown called.\n");
2607 retval = 1;
2608 goto out;
2609 }
2610 /* Check if firmware is in fault state */
2611 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2612 outbound_scratch_pad)) & MFI_STATE_MASK;
2613 if (fw_state == MFI_STATE_FAULT) {
2614 mrsas_dprint(sc, MRSAS_OCR,
2615 "Found FW in FAULT state, will reset adapter.\n");
2616 retval = 1;
2617 goto out;
2618 }
2619 outstanding = atomic_read(&sc->fw_outstanding);
2620 if (!outstanding)
2621 goto out;
2622
2623 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
2624 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
2625 "commands to complete\n",i,outstanding);
2626 mrsas_complete_cmd(sc);
2627 }
2628 DELAY(1000 * 1000);
2629 }
2630
2631 if (atomic_read(&sc->fw_outstanding)) {
2632 mrsas_dprint(sc, MRSAS_OCR,
2633 " pending commands remain after waiting,"
2634 " will reset adapter.\n");
2635 retval = 1;
2636 }
2637out:
2638 return retval;
2639}
2640
2641/**
2642 * mrsas_release_mfi_cmd: Return a cmd to free command pool
2643 * input: Command packet for return to free cmd pool
2644 *
2645 * This function returns the MFI command to the command list.
2646 */
2647void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
2648{
2649 struct mrsas_softc *sc = cmd->sc;
2650
2651 mtx_lock(&sc->mfi_cmd_pool_lock);
2652 cmd->ccb_ptr = NULL;
2653 cmd->cmd_id.frame_count = 0;
2654 TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
2655 mtx_unlock(&sc->mfi_cmd_pool_lock);
2656
2657 return;
2658}
2659
2660/**
2661 * mrsas_get_controller_info - Returns FW's controller structure
2662 * input: Adapter soft state
2663 * Controller information structure
2664 *
2665 * Issues an internal command (DCMD) to get the FW's controller structure.
2666 * This information is mainly used to find out the maximum IO transfer per
2667 * command supported by the FW.
2668 */
2669static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
2670 struct mrsas_ctrl_info *ctrl_info)
2671{
2672 int retcode = 0;
2673 struct mrsas_mfi_cmd *cmd;
2674 struct mrsas_dcmd_frame *dcmd;
2675
2676 cmd = mrsas_get_mfi_cmd(sc);
2677
2678 if (!cmd) {
2679 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
2680 return -ENOMEM;
2681 }
2682 dcmd = &cmd->frame->dcmd;
2683
2684 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
2685 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
2686 mrsas_release_mfi_cmd(cmd);
2687 return -ENOMEM;
2688 }
2689 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2690
2691 dcmd->cmd = MFI_CMD_DCMD;
2692 dcmd->cmd_status = 0xFF;
2693 dcmd->sge_count = 1;
2694 dcmd->flags = MFI_FRAME_DIR_READ;
2695 dcmd->timeout = 0;
2696 dcmd->pad_0 = 0;
2697 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
2698 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
2699 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
2700 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
2701
2702 if (!mrsas_issue_polled(sc, cmd))
2703 memcpy(ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
2704 else
2705 retcode = 1;
2706
2707 mrsas_free_ctlr_info_cmd(sc);
2708 mrsas_release_mfi_cmd(cmd);
2709 return(retcode);
2710}
2711
2712/**
2713 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
2714 * input: Adapter soft state
2715 *
2716 * Allocates DMAable memory for the controller info internal command.
2717 */
2718int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
2719{
2720 int ctlr_info_size;
2721
2722 /* Allocate get controller info command */
2723 ctlr_info_size = sizeof(struct mrsas_ctrl_info);
2724 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
2725 1, 0, // algnmnt, boundary
2726 BUS_SPACE_MAXADDR_32BIT,// lowaddr
2727 BUS_SPACE_MAXADDR, // highaddr
2728 NULL, NULL, // filter, filterarg
2729 ctlr_info_size, // maxsize
2730 1, // msegments
2731 ctlr_info_size, // maxsegsize
2732 BUS_DMA_ALLOCNOW, // flags
2733 NULL, NULL, // lockfunc, lockarg
2734 &sc->ctlr_info_tag)) {
2735 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
2736 return (ENOMEM);
2737 }
2738 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
2739 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
2740 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
2741 return (ENOMEM);
2742 }
2743 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
2744 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
2745 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
2746 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
2747 return (ENOMEM);
2748 }
2749
2750 memset(sc->ctlr_info_mem, 0, ctlr_info_size);
2751 return (0);
2752}
2753
2754/**
2755 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
2756 * input: Adapter soft state
2757 *
2758 * Deallocates memory of the get controller info cmd.
2759 */
2760void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
2761{
2762 if (sc->ctlr_info_phys_addr)
2763 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
2764 if (sc->ctlr_info_mem != NULL)
2765 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
2766 if (sc->ctlr_info_tag != NULL)
2767 bus_dma_tag_destroy(sc->ctlr_info_tag);
2768}
2769
2770/**
2771 * mrsas_issue_polled: Issues a polling command
2772 * inputs: Adapter soft state
2773 * Command packet to be issued
2774 *
2775 * This function is for posting of internal commands to Firmware. MFI
2776 * requires the cmd_status to be set to 0xFF before posting. The maximun
2777 * wait time of the poll response timer is 180 seconds.
2778 */
2779int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2780{
2781 struct mrsas_header *frame_hdr = &cmd->frame->hdr;
2782 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2783 int i, retcode = 0;
2784
2785 frame_hdr->cmd_status = 0xFF;
2786 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2787
2788 /* Issue the frame using inbound queue port */
2789 if (mrsas_issue_dcmd(sc, cmd)) {
2790 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
2791 return(1);
2792 }
2793
2794 /*
2795 * Poll response timer to wait for Firmware response. While this
2796 * timer with the DELAY call could block CPU, the time interval for
2797 * this is only 1 millisecond.
2798 */
2799 if (frame_hdr->cmd_status == 0xFF) {
2800 for (i=0; i < (max_wait * 1000); i++){
2801 if (frame_hdr->cmd_status == 0xFF)
2802 DELAY(1000);
2803 else
2804 break;
2805 }
2806 }
2807 if (frame_hdr->cmd_status != 0)
2808 {
2809 if (frame_hdr->cmd_status == 0xFF)
2810 device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
2811 else
2812 device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
2813 retcode = 1;
2814 }
2815 return(retcode);
2816}
2817
2818/**
2819 * mrsas_issue_dcmd - Issues a MFI Pass thru cmd
2820 * input: Adapter soft state
2821 * mfi cmd pointer
2822 *
2823 * This function is called by mrsas_issued_blocked_cmd() and
2824 * mrsas_issued_polled(), to build the MPT command and then fire the
2825 * command to Firmware.
2826 */
2827int
2828mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2829{
2830 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2831
2832 req_desc = mrsas_build_mpt_cmd(sc, cmd);
2833 if (!req_desc) {
2834 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
2835 return(1);
2836 }
2837
2838 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
2839
2840 return(0);
2841}
2842
2843/**
2844 * mrsas_build_mpt_cmd - Calls helper function to build Passthru cmd
2845 * input: Adapter soft state
2846 * mfi cmd to build
2847 *
2848 * This function is called by mrsas_issue_cmd() to build the MPT-MFI
2849 * passthru command and prepares the MPT command to send to Firmware.
2850 */
2851MRSAS_REQUEST_DESCRIPTOR_UNION *
2852mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2853{
2854 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2855 u_int16_t index;
2856
2857 if (mrsas_build_mptmfi_passthru(sc, cmd)) {
2858 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
2859 return NULL;
2860 }
2861
2862 index = cmd->cmd_id.context.smid;
2863
2864 req_desc = mrsas_get_request_desc(sc, index-1);
2865 if(!req_desc)
2866 return NULL;
2867
2868 req_desc->addr.Words = 0;
2869 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2870
2871 req_desc->SCSIIO.SMID = index;
2872
2873 return(req_desc);
2874}
2875
2876/**
2877 * mrsas_build_mptmfi_passthru - Builds a MPT MFI Passthru command
2878 * input: Adapter soft state
2879 * mfi cmd pointer
2880 *
2881 * The MPT command and the io_request are setup as a passthru command.
2882 * The SGE chain address is set to frame_phys_addr of the MFI command.
2883 */
2884u_int8_t
2885mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
2886{
2887 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
2888 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
2889 struct mrsas_mpt_cmd *mpt_cmd;
2890 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
2891
2892 mpt_cmd = mrsas_get_mpt_cmd(sc);
2893 if (!mpt_cmd)
2894 return(1);
2895
2896 /* Save the smid. To be used for returning the cmd */
2897 mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
2898
2899 mpt_cmd->sync_cmd_idx = mfi_cmd->index;
2900
2901 /*
2902 * For cmds where the flag is set, store the flag and check
2903 * on completion. For cmds with this flag, don't call
2904 * mrsas_complete_cmd.
2905 */
2906
2907 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
2908 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2909
2910 io_req = mpt_cmd->io_request;
2911
2912 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
2913 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t) &io_req->SGL;
2914 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
2915 sgl_ptr_end->Flags = 0;
2916 }
2917
2918 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
2919
2920 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2921 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
2922 io_req->ChainOffset = sc->chain_offset_mfi_pthru;
2923
2924 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
2925
2926 mpi25_ieee_chain->Flags= IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2927 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2928
2929 mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
2930
2931 return(0);
2932}
2933
2934/**
2935 * mrsas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
2936 * input: Adapter soft state
2937 * Command to be issued
2938 *
2939 * This function waits on an event for the command to be returned
2940 * from the ISR. Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs.
2941 * Used for issuing internal and ioctl commands.
2942 */
2943int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2944{
2945 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2946 unsigned long total_time = 0;
2947 int retcode = 0;
2948
2949 /* Initialize cmd_status */
2950 cmd->cmd_status = ECONNREFUSED;
2951
2952 /* Build MPT-MFI command for issue to FW */
2953 if (mrsas_issue_dcmd(sc, cmd)){
2954 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
2955 return(1);
2956 }
2957
2958 sc->chan = (void*)&cmd;
2959
2960 /* The following is for debug only... */
2961 //device_printf(sc->mrsas_dev,"DCMD issued to FW, about to sleep-wait...\n");
2962 //device_printf(sc->mrsas_dev,"sc->chan = %p\n", sc->chan);
2963
2964 while (1) {
2965 if (cmd->cmd_status == ECONNREFUSED){
2966 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
2967 }
2968 else
2969 break;
2970 total_time++;
2971 if (total_time >= max_wait) {
2972 device_printf(sc->mrsas_dev, "Internal command timed out after %d seconds.\n", max_wait);
2973 retcode = 1;
2974 break;
2975 }
2976 }
2977 return(retcode);
2978}
2979
2980/**
2981 * mrsas_complete_mptmfi_passthru - Completes a command
2982 * input: sc: Adapter soft state
2983 * cmd: Command to be completed
2984 * status: cmd completion status
2985 *
2986 * This function is called from mrsas_complete_cmd() after an interrupt
2987 * is received from Firmware, and io_request->Function is
2988 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
2989 */
2990void
2991mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
2992 u_int8_t status)
2993{
2994 struct mrsas_header *hdr = &cmd->frame->hdr;
2995 u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
2996
2997 /* Reset the retry counter for future re-tries */
2998 cmd->retry_for_fw_reset = 0;
2999
3000 if (cmd->ccb_ptr)
3001 cmd->ccb_ptr = NULL;
3002
3003 switch (hdr->cmd) {
3004 case MFI_CMD_INVALID:
3005 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
3006 break;
3007 case MFI_CMD_PD_SCSI_IO:
3008 case MFI_CMD_LD_SCSI_IO:
3009 /*
3010 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3011 * issued either through an IO path or an IOCTL path. If it
3012 * was via IOCTL, we will send it to internal completion.
3013 */
3014 if (cmd->sync_cmd) {
3015 cmd->sync_cmd = 0;
3016 mrsas_wakeup(sc, cmd);
3017 break;
3018 }
3019 case MFI_CMD_SMP:
3020 case MFI_CMD_STP:
3021 case MFI_CMD_DCMD:
3022 /* Check for LD map update */
3023 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
3024 (cmd->frame->dcmd.mbox.b[1] == 1)) {
3025 sc->fast_path_io = 0;
3026 mtx_lock(&sc->raidmap_lock);
3027 if (cmd_status != 0) {
3028 if (cmd_status != MFI_STAT_NOT_FOUND)
3029 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n",cmd_status);
3030 else {
3031 mrsas_release_mfi_cmd(cmd);
3032 mtx_unlock(&sc->raidmap_lock);
3033 break;
3034 }
3035 }
3036 else
3037 sc->map_id++;
3038 mrsas_release_mfi_cmd(cmd);
3039 if (MR_ValidateMapInfo(sc))
3040 sc->fast_path_io = 0;
3041 else
3042 sc->fast_path_io = 1;
3043 mrsas_sync_map_info(sc);
3044 mtx_unlock(&sc->raidmap_lock);
3045 break;
3046 }
3047#if 0 //currently not supporting event handling, so commenting out
3048 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3049 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
3050 mrsas_poll_wait_aen = 0;
3051 }
3052#endif
3053 /* See if got an event notification */
3054 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
3055 mrsas_complete_aen(sc, cmd);
3056 else
3057 mrsas_wakeup(sc, cmd);
3058 break;
3059 case MFI_CMD_ABORT:
3060 /* Command issued to abort another cmd return */
3061 mrsas_complete_abort(sc, cmd);
3062 break;
3063 default:
3064 device_printf(sc->mrsas_dev,"Unknown command completed! [0x%X]\n", hdr->cmd);
3065 break;
3066 }
3067}
3068
3069/**
3070 * mrsas_wakeup - Completes an internal command
3071 * input: Adapter soft state
3072 * Command to be completed
3073 *
3074 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware,
3075 * a wait timer is started. This function is called from
3076 * mrsas_complete_mptmfi_passthru() as it completes the command,
3077 * to wake up from the command wait.
3078 */
3079void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3080{
3081 cmd->cmd_status = cmd->frame->io.cmd_status;
3082
3083 if (cmd->cmd_status == ECONNREFUSED)
3084 cmd->cmd_status = 0;
3085
3086 /* For debug only ... */
3087 //device_printf(sc->mrsas_dev,"DCMD rec'd for wakeup, sc->chan=%p\n", sc->chan);
3088
3089 sc->chan = (void*)&cmd;
3090 wakeup_one((void *)&sc->chan);
3091 return;
3092}
3093
3094/**
3095 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller
3096 * input: Adapter soft state
3097 * Shutdown/Hibernate
3098 *
3099 * This function issues a DCMD internal command to Firmware to initiate
3100 * shutdown of the controller.
3101 */
3102static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3103{
3104 struct mrsas_mfi_cmd *cmd;
3105 struct mrsas_dcmd_frame *dcmd;
3106
3107 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3108 return;
3109
3110 cmd = mrsas_get_mfi_cmd(sc);
3111 if (!cmd) {
3112 device_printf(sc->mrsas_dev,"Cannot allocate for shutdown cmd.\n");
3113 return;
3114 }
3115
3116 if (sc->aen_cmd)
3117 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3118
3119 if (sc->map_update_cmd)
3120 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3121
3122 dcmd = &cmd->frame->dcmd;
3123 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3124
3125 dcmd->cmd = MFI_CMD_DCMD;
3126 dcmd->cmd_status = 0x0;
3127 dcmd->sge_count = 0;
3128 dcmd->flags = MFI_FRAME_DIR_NONE;
3129 dcmd->timeout = 0;
3130 dcmd->pad_0 = 0;
3131 dcmd->data_xfer_len = 0;
3132 dcmd->opcode = opcode;
3133
3134 device_printf(sc->mrsas_dev,"Preparing to shut down controller.\n");
3135
3136 mrsas_issue_blocked_cmd(sc, cmd);
3137 mrsas_release_mfi_cmd(cmd);
3138
3139 return;
3140}
3141
3142/**
3143 * mrsas_flush_cache: Requests FW to flush all its caches
3144 * input: Adapter soft state
3145 *
3146 * This function is issues a DCMD internal command to Firmware to initiate
3147 * flushing of all caches.
3148 */
3149static void mrsas_flush_cache(struct mrsas_softc *sc)
3150{
3151 struct mrsas_mfi_cmd *cmd;
3152 struct mrsas_dcmd_frame *dcmd;
3153
3154 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3155 return;
3156
3157 cmd = mrsas_get_mfi_cmd(sc);
3158 if (!cmd) {
3159 device_printf(sc->mrsas_dev,"Cannot allocate for flush cache cmd.\n");
3160 return;
3161 }
3162
3163 dcmd = &cmd->frame->dcmd;
3164 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3165
3166 dcmd->cmd = MFI_CMD_DCMD;
3167 dcmd->cmd_status = 0x0;
3168 dcmd->sge_count = 0;
3169 dcmd->flags = MFI_FRAME_DIR_NONE;
3170 dcmd->timeout = 0;
3171 dcmd->pad_0 = 0;
3172 dcmd->data_xfer_len = 0;
3173 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3174 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3175
3176 mrsas_issue_blocked_cmd(sc, cmd);
3177 mrsas_release_mfi_cmd(cmd);
3178
3179 return;
3180}
3181
3182/**
3183 * mrsas_get_map_info: Load and validate RAID map
3184 * input: Adapter instance soft state
3185 *
3186 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo()
3187 * to load and validate RAID map. It returns 0 if successful, 1 other-
3188 * wise.
3189 */
3190static int mrsas_get_map_info(struct mrsas_softc *sc)
3191{
3192 uint8_t retcode = 0;
3193
3194 sc->fast_path_io = 0;
3195 if (!mrsas_get_ld_map_info(sc)) {
3196 retcode = MR_ValidateMapInfo(sc);
3197 if (retcode == 0) {
3198 sc->fast_path_io = 1;
3199 return 0;
3200 }
3201 }
3202 return 1;
3203}
3204
3205/**
3206 * mrsas_get_ld_map_info: Get FW's ld_map structure
3207 * input: Adapter instance soft state
3208 *
3209 * Issues an internal command (DCMD) to get the FW's controller PD
3210 * list structure.
3211 */
3212static int mrsas_get_ld_map_info(struct mrsas_softc *sc)
3213{
3214 int retcode = 0;
3215 struct mrsas_mfi_cmd *cmd;
3216 struct mrsas_dcmd_frame *dcmd;
3217 void *map;
3218 bus_addr_t map_phys_addr = 0;
3219
3220 cmd = mrsas_get_mfi_cmd(sc);
3221 if (!cmd) {
3222 device_printf(sc->mrsas_dev,
3223 "Cannot alloc for ld map info cmd.\n");
3224 return 1;
3225 }
3226
3227 dcmd = &cmd->frame->dcmd;
3228
3229 map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
3230 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
3231 if (!map) {
3232 device_printf(sc->mrsas_dev,
3233 "Failed to alloc mem for ld map info.\n");
3234 mrsas_release_mfi_cmd(cmd);
3235 return (ENOMEM);
3236 }
3237 memset(map, 0, sizeof(sc->max_map_sz));
3238 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3239
3240 dcmd->cmd = MFI_CMD_DCMD;
3241 dcmd->cmd_status = 0xFF;
3242 dcmd->sge_count = 1;
3243 dcmd->flags = MFI_FRAME_DIR_READ;
3244 dcmd->timeout = 0;
3245 dcmd->pad_0 = 0;
3246 dcmd->data_xfer_len = sc->current_map_sz;
3247 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3248 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3249 dcmd->sgl.sge32[0].length = sc->current_map_sz;
3250
3251 if (!mrsas_issue_polled(sc, cmd))
3252 retcode = 0;
3253 else
3254 {
3255 device_printf(sc->mrsas_dev,
3256 "Fail to send get LD map info cmd.\n");
3257 retcode = 1;
3258 }
3259 mrsas_release_mfi_cmd(cmd);
3260
3261 return(retcode);
3262}
3263
3264/**
3265 * mrsas_sync_map_info: Get FW's ld_map structure
3266 * input: Adapter instance soft state
3267 *
3268 * Issues an internal command (DCMD) to get the FW's controller PD
3269 * list structure.
3270 */
3271static int mrsas_sync_map_info(struct mrsas_softc *sc)
3272{
3273 int retcode = 0, i;
3274 struct mrsas_mfi_cmd *cmd;
3275 struct mrsas_dcmd_frame *dcmd;
3276 uint32_t size_sync_info, num_lds;
3277 MR_LD_TARGET_SYNC *target_map = NULL;
3278 MR_DRV_RAID_MAP_ALL *map;
3279 MR_LD_RAID *raid;
3280 MR_LD_TARGET_SYNC *ld_sync;
3281 bus_addr_t map_phys_addr = 0;
3282
3283 cmd = mrsas_get_mfi_cmd(sc);
3284 if (!cmd) {
3285 device_printf(sc->mrsas_dev,
3286 "Cannot alloc for sync map info cmd\n");
3287 return 1;
3288 }
3289
3290 map = sc->ld_drv_map[sc->map_id & 1];
3291 num_lds = map->raidMap.ldCount;
3292
3293 dcmd = &cmd->frame->dcmd;
3294 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
3295 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3296
3297 target_map =
3298 (MR_LD_TARGET_SYNC *)sc->raidmap_mem[(sc->map_id - 1) & 1];
3299 memset(target_map, 0, sc->max_map_sz);
3300
3301 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
3302
3303 ld_sync = (MR_LD_TARGET_SYNC *)target_map;
3304
3305 for (i = 0; i < num_lds; i++, ld_sync++) {
3306 raid = MR_LdRaidGet(i, map);
3307 ld_sync->targetId = MR_GetLDTgtId(i, map);
3308 ld_sync->seqNum = raid->seqNum;
3309 }
3310
3311 dcmd->cmd = MFI_CMD_DCMD;
3312 dcmd->cmd_status = 0xFF;
3313 dcmd->sge_count = 1;
3314 dcmd->flags = MFI_FRAME_DIR_WRITE;
3315 dcmd->timeout = 0;
3316 dcmd->pad_0 = 0;
3317 dcmd->data_xfer_len = sc->current_map_sz;
3318 dcmd->mbox.b[0] = num_lds;
3319 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
3320 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3321 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3322 dcmd->sgl.sge32[0].length = sc->current_map_sz;
3323
3324 sc->map_update_cmd = cmd;
3325 if (mrsas_issue_dcmd(sc, cmd)) {
3326 device_printf(sc->mrsas_dev,
3327 "Fail to send sync map info command.\n");
3328 return(1);
3329 }
3330 return(retcode);
3331}
3332
3333/**
3334 * mrsas_get_pd_list: Returns FW's PD list structure
3335 * input: Adapter soft state
3336 *
3337 * Issues an internal command (DCMD) to get the FW's controller PD
3338 * list structure. This information is mainly used to find out about
3339 * system supported by Firmware.
3340 */
3341static int mrsas_get_pd_list(struct mrsas_softc *sc)
3342{
3343 int retcode = 0, pd_index = 0, pd_count=0, pd_list_size;
3344 struct mrsas_mfi_cmd *cmd;
3345 struct mrsas_dcmd_frame *dcmd;
3346 struct MR_PD_LIST *pd_list_mem;
3347 struct MR_PD_ADDRESS *pd_addr;
3348 bus_addr_t pd_list_phys_addr = 0;
3349 struct mrsas_tmp_dcmd *tcmd;
3350
3351 cmd = mrsas_get_mfi_cmd(sc);
3352 if (!cmd) {
3353 device_printf(sc->mrsas_dev,
3354 "Cannot alloc for get PD list cmd\n");
3355 return 1;
3356 }
3357
3358 dcmd = &cmd->frame->dcmd;
3359
3360 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3361 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3362 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
3363 device_printf(sc->mrsas_dev,
3364 "Cannot alloc dmamap for get PD list cmd\n");
3365 mrsas_release_mfi_cmd(cmd);
3366 return(ENOMEM);
3367 }
3368 else {
3369 pd_list_mem = tcmd->tmp_dcmd_mem;
3370 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3371 }
3372 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3373
3374 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
3375 dcmd->mbox.b[1] = 0;
3376 dcmd->cmd = MFI_CMD_DCMD;
3377 dcmd->cmd_status = 0xFF;
3378 dcmd->sge_count = 1;
3379 dcmd->flags = MFI_FRAME_DIR_READ;
3380 dcmd->timeout = 0;
3381 dcmd->pad_0 = 0;
3382 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3383 dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
3384 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
3385 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3386
3387 if (!mrsas_issue_polled(sc, cmd))
3388 retcode = 0;
3389 else
3390 retcode = 1;
3391
3392 /* Get the instance PD list */
3393 pd_count = MRSAS_MAX_PD;
3394 pd_addr = pd_list_mem->addr;
3395 if (retcode == 0 && pd_list_mem->count < pd_count) {
3396 memset(sc->local_pd_list, 0,
3397 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
3398 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
3399 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
3400 sc->local_pd_list[pd_addr->deviceId].driveType =
3401 pd_addr->scsiDevType;
3402 sc->local_pd_list[pd_addr->deviceId].driveState =
3403 MR_PD_STATE_SYSTEM;
3404 pd_addr++;
3405 }
3406 }
3407
3408 /* Use mutext/spinlock if pd_list component size increase more than 32 bit. */
3409 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
3410 mrsas_free_tmp_dcmd(tcmd);
3411 mrsas_release_mfi_cmd(cmd);
3412 free(tcmd, M_MRSAS);
3413 return(retcode);
3414}
3415
3416/**
3417 * mrsas_get_ld_list: Returns FW's LD list structure
3418 * input: Adapter soft state
3419 *
3420 * Issues an internal command (DCMD) to get the FW's controller PD
3421 * list structure. This information is mainly used to find out about
3422 * supported by the FW.
3423 */
3424static int mrsas_get_ld_list(struct mrsas_softc *sc)
3425{
3426 int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
3427 struct mrsas_mfi_cmd *cmd;
3428 struct mrsas_dcmd_frame *dcmd;
3429 struct MR_LD_LIST *ld_list_mem;
3430 bus_addr_t ld_list_phys_addr = 0;
3431 struct mrsas_tmp_dcmd *tcmd;
3432
3433 cmd = mrsas_get_mfi_cmd(sc);
3434 if (!cmd) {
3435 device_printf(sc->mrsas_dev,
3436 "Cannot alloc for get LD list cmd\n");
3437 return 1;
3438 }
3439
3440 dcmd = &cmd->frame->dcmd;
3441
3442 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3443 ld_list_size = sizeof(struct MR_LD_LIST);
3444 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
3445 device_printf(sc->mrsas_dev,
3446 "Cannot alloc dmamap for get LD list cmd\n");
3447 mrsas_release_mfi_cmd(cmd);
3448 return(ENOMEM);
3449 }
3450 else {
3451 ld_list_mem = tcmd->tmp_dcmd_mem;
3452 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3453 }
3454 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3455
3456 if (sc->max256vdSupport)
3457 dcmd->mbox.b[0]=1;
3458
3459 dcmd->cmd = MFI_CMD_DCMD;
3460 dcmd->cmd_status = 0xFF;
3461 dcmd->sge_count = 1;
3462 dcmd->flags = MFI_FRAME_DIR_READ;
3463 dcmd->timeout = 0;
3464 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
3465 dcmd->opcode = MR_DCMD_LD_GET_LIST;
3466 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
3467 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
3468 dcmd->pad_0 = 0;
3469
3470 if (!mrsas_issue_polled(sc, cmd))
3471 retcode = 0;
3472 else
3473 retcode = 1;
3474
3475#if VD_EXT_DEBUG
3476 printf ("Number of LDs %d\n", ld_list_mem->ldCount);
3477#endif
3478
3479 /* Get the instance LD list */
3480 if ((retcode == 0) &&
3481 (ld_list_mem->ldCount <= sc->fw_supported_vd_count)){
3482 sc->CurLdCount = ld_list_mem->ldCount;
3483 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
3484 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
3485 if (ld_list_mem->ldList[ld_index].state != 0) {
3486 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3487 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3488 }
3489 }
3490 }
3491
3492 mrsas_free_tmp_dcmd(tcmd);
3493 mrsas_release_mfi_cmd(cmd);
3494 free(tcmd, M_MRSAS);
3495 return(retcode);
3496}
3497
3498/**
3499 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command
3500 * input: Adapter soft state
3501 * Temp command
3502 * Size of alloction
3503 *
3504 * Allocates DMAable memory for a temporary internal command. The allocated
3505 * memory is initialized to all zeros upon successful loading of the dma
3506 * mapped memory.
3507 */
3508int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
3509 int size)
3510{
3511 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
3512 1, 0, // algnmnt, boundary
3513 BUS_SPACE_MAXADDR_32BIT,// lowaddr
3514 BUS_SPACE_MAXADDR, // highaddr
3515 NULL, NULL, // filter, filterarg
3516 size, // maxsize
3517 1, // msegments
3518 size, // maxsegsize
3519 BUS_DMA_ALLOCNOW, // flags
3520 NULL, NULL, // lockfunc, lockarg
3521 &tcmd->tmp_dcmd_tag)) {
3522 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
3523 return (ENOMEM);
3524 }
3525 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
3526 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
3527 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
3528 return (ENOMEM);
3529 }
3530 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
3531 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
3532 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
3533 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
3534 return (ENOMEM);
3535 }
3536
3537 memset(tcmd->tmp_dcmd_mem, 0, size);
3538 return (0);
3539}
3540
3541/**
3542 * mrsas_free_tmp_dcmd: Free memory for temporary command
3543 * input: temporary dcmd pointer
3544 *
3545 * Deallocates memory of the temporary command for use in the construction
3546 * of the internal DCMD.
3547 */
3548void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
3549{
3550 if (tmp->tmp_dcmd_phys_addr)
3551 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
3552 if (tmp->tmp_dcmd_mem != NULL)
3553 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
3554 if (tmp->tmp_dcmd_tag != NULL)
3555 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
3556}
3557
3558/**
3559 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd
3560 * input: Adapter soft state
3561 * Previously issued cmd to be aborted
3562 *
3563 * This function is used to abort previously issued commands, such as AEN and
3564 * RAID map sync map commands. The abort command is sent as a DCMD internal
3565 * command and subsequently the driver will wait for a return status. The
3566 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
3567 */
3568static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
3569 struct mrsas_mfi_cmd *cmd_to_abort)
3570{
3571 struct mrsas_mfi_cmd *cmd;
3572 struct mrsas_abort_frame *abort_fr;
3573 u_int8_t retcode = 0;
3574 unsigned long total_time = 0;
3575 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3576
3577 cmd = mrsas_get_mfi_cmd(sc);
3578 if (!cmd) {
3579 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
3580 return(1);
3581 }
3582
3583 abort_fr = &cmd->frame->abort;
3584
3585 /* Prepare and issue the abort frame */
3586 abort_fr->cmd = MFI_CMD_ABORT;
3587 abort_fr->cmd_status = 0xFF;
3588 abort_fr->flags = 0;
3589 abort_fr->abort_context = cmd_to_abort->index;
3590 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
3591 abort_fr->abort_mfi_phys_addr_hi = 0;
3592
3593 cmd->sync_cmd = 1;
3594 cmd->cmd_status = 0xFF;
3595
3596 if (mrsas_issue_dcmd(sc, cmd)) {
3597 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
3598 return(1);
3599 }
3600
3601 /* Wait for this cmd to complete */
3602 sc->chan = (void*)&cmd;
3603 while (1) {
3604 if (cmd->cmd_status == 0xFF){
3605 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3606 }
3607 else
3608 break;
3609 total_time++;
3610 if (total_time >= max_wait) {
3611 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
3612 retcode = 1;
3613 break;
3614 }
3615 }
3616
3617 cmd->sync_cmd = 0;
3618 mrsas_release_mfi_cmd(cmd);
3619 return(retcode);
3620}
3621
3622/**
3623 * mrsas_complete_abort: Completes aborting a command
3624 * input: Adapter soft state
3625 * Cmd that was issued to abort another cmd
3626 *
3627 * The mrsas_issue_blocked_abort_cmd() function waits for the command status
3628 * to change after sending the command. This function is called from
3629 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
3630 */
3631void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3632{
3633 if (cmd->sync_cmd) {
3634 cmd->sync_cmd = 0;
3635 cmd->cmd_status = 0;
3636 sc->chan = (void*)&cmd;
3637 wakeup_one((void *)&sc->chan);
3638 }
3639 return;
3640}
3641
3642/**
3643 * mrsas_aen_handler: Callback function for AEN processing from thread context.
3644 * input: Adapter soft state
3645 *
3646 */
3647void mrsas_aen_handler(struct mrsas_softc *sc)
3648{
3649 union mrsas_evt_class_locale class_locale;
3650 int doscan = 0;
3651 u_int32_t seq_num;
3652 int error;
3653
3654 if (!sc) {
3655 device_printf(sc->mrsas_dev, "invalid instance!\n");
3656 return;
3657 }
3658
3659 if (sc->evt_detail_mem) {
3660 switch (sc->evt_detail_mem->code) {
3661 case MR_EVT_PD_INSERTED:
3662 mrsas_get_pd_list(sc);
3663 mrsas_bus_scan_sim(sc, sc->sim_1);
3664 doscan = 0;
3665 break;
3666 case MR_EVT_PD_REMOVED:
3667 mrsas_get_pd_list(sc);
3668 mrsas_bus_scan_sim(sc, sc->sim_1);
3669 doscan = 0;
3670 break;
3671 case MR_EVT_LD_OFFLINE:
3672 case MR_EVT_CFG_CLEARED:
3673 case MR_EVT_LD_DELETED:
3674 mrsas_bus_scan_sim(sc, sc->sim_0);
3675 doscan = 0;
3676 break;
3677 case MR_EVT_LD_CREATED:
3678 mrsas_get_ld_list(sc);
3679 mrsas_bus_scan_sim(sc, sc->sim_0);
3680 doscan = 0;
3681 break;
3682 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
3683 case MR_EVT_FOREIGN_CFG_IMPORTED:
3684 case MR_EVT_LD_STATE_CHANGE:
3685 doscan = 1;
3686 break;
3687 default:
3688 doscan = 0;
3689 break;
3690 }
3691 } else {
3692 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
3693 return;
3694 }
3695 if (doscan) {
3696 mrsas_get_pd_list(sc);
3697 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
3698 mrsas_bus_scan_sim(sc, sc->sim_1);
3699 mrsas_get_ld_list(sc);
3700 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
3701 mrsas_bus_scan_sim(sc, sc->sim_0);
3702 }
3703
3704 seq_num = sc->evt_detail_mem->seq_num + 1;
3705
3706 // Register AEN with FW for latest sequence number plus 1
3707 class_locale.members.reserved = 0;
3708 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3709 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3710
3711 if (sc->aen_cmd != NULL )
3712 return ;
3713
3714 mtx_lock(&sc->aen_lock);
3715 error = mrsas_register_aen(sc, seq_num,
3716 class_locale.word);
3717 mtx_unlock(&sc->aen_lock);
3718
3719 if (error)
3720 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
3721
3722}
3723
3724
3725/**
3726 * mrsas_complete_aen: Completes AEN command
3727 * input: Adapter soft state
3728 * Cmd that was issued to abort another cmd
3729 *
3730 * This function will be called from ISR and will continue
3731 * event processing from thread context by enqueuing task
3732 * in ev_tq (callback function "mrsas_aen_handler").
3733 */
3734void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3735{
3736 /*
3737 * Don't signal app if it is just an aborted previously registered aen
3738 */
3739 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
3740 /* TO DO (?) */
3741 }
3742 else
3743 cmd->abort_aen = 0;
3744
3745 sc->aen_cmd = NULL;
3746 mrsas_release_mfi_cmd(cmd);
3747
3748 if (!sc->remove_in_progress)
3749 taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
3750
3751 return;
3752}
3753
3754static device_method_t mrsas_methods[] = {
3755 DEVMETHOD(device_probe, mrsas_probe),
3756 DEVMETHOD(device_attach, mrsas_attach),
3757 DEVMETHOD(device_detach, mrsas_detach),
3758 DEVMETHOD(device_suspend, mrsas_suspend),
3759 DEVMETHOD(device_resume, mrsas_resume),
3760 DEVMETHOD(bus_print_child, bus_generic_print_child),
3761 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
3762 { 0, 0 }
3763};
3764
3765static driver_t mrsas_driver = {
3766 "mrsas",
3767 mrsas_methods,
3768 sizeof(struct mrsas_softc)
3769};
3770
3771static devclass_t mrsas_devclass;
3772DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
3773MODULE_DEPEND(mrsas, cam, 1,1,1);
3774
1183 }
1184
1185 return (ret);
1186}
1187
1188/**
1189 * mrsas_setup_irq: Set up interrupt.
1190 * input: Adapter instance soft state
1191 *
1192 * This function sets up interrupts as a bus resource, with flags indicating
1193 * resource permitting contemporaneous sharing and for resource to activate
1194 * atomically.
1195 */
1196static int mrsas_setup_irq(struct mrsas_softc *sc)
1197{
1198 sc->irq_id = 0;
1199 sc->mrsas_irq = bus_alloc_resource_any(sc->mrsas_dev, SYS_RES_IRQ,
1200 &sc->irq_id, RF_SHAREABLE | RF_ACTIVE);
1201 if (sc->mrsas_irq == NULL){
1202 device_printf(sc->mrsas_dev, "Cannot allocate interrupt\n");
1203 return (FAIL);
1204 }
1205 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq, INTR_MPSAFE|INTR_TYPE_CAM,
1206 NULL, mrsas_isr, sc, &sc->intr_handle)) {
1207 device_printf(sc->mrsas_dev, "Cannot set up interrupt\n");
1208 return (FAIL);
1209 }
1210
1211 return (0);
1212}
1213
1214/*
1215 * mrsas_isr: ISR entry point
1216 * input: argument pointer
1217 *
1218 * This function is the interrupt service routine entry point. There
1219 * are two types of interrupts, state change interrupt and response
1220 * interrupt. If an interrupt is not ours, we just return.
1221 */
1222void mrsas_isr(void *arg)
1223{
1224 struct mrsas_softc *sc = (struct mrsas_softc *)arg;
1225 int status;
1226
1227 /* Clear FW state change interrupt */
1228 status = mrsas_clear_intr(sc);
1229
1230 /* Not our interrupt */
1231 if (!status)
1232 return;
1233
1234 /* If we are resetting, bail */
1235 if (test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1236 printf(" Entered into ISR when OCR is going active. \n");
1237 mrsas_clear_intr(sc);
1238 return;
1239 }
1240 /* Process for reply request and clear response interrupt */
1241 if (mrsas_complete_cmd(sc) != SUCCESS)
1242 mrsas_clear_intr(sc);
1243
1244 return;
1245}
1246
1247/*
1248 * mrsas_complete_cmd: Process reply request
1249 * input: Adapter instance soft state
1250 *
1251 * This function is called from mrsas_isr() to process reply request and
1252 * clear response interrupt. Processing of the reply request entails
1253 * walking through the reply descriptor array for the command request
1254 * pended from Firmware. We look at the Function field to determine
1255 * the command type and perform the appropriate action. Before we
1256 * return, we clear the response interrupt.
1257 */
1258static int mrsas_complete_cmd(struct mrsas_softc *sc)
1259{
1260 Mpi2ReplyDescriptorsUnion_t *desc;
1261 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1262 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1263 struct mrsas_mpt_cmd *cmd_mpt;
1264 struct mrsas_mfi_cmd *cmd_mfi;
1265 u_int8_t arm, reply_descript_type;
1266 u_int16_t smid, num_completed;
1267 u_int8_t status, extStatus;
1268 union desc_value desc_val;
1269 PLD_LOAD_BALANCE_INFO lbinfo;
1270 u_int32_t device_id;
1271 int threshold_reply_count = 0;
1272
1273
1274 /* If we have a hardware error, not need to continue */
1275 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1276 return (DONE);
1277
1278 desc = sc->reply_desc_mem;
1279 desc += sc->last_reply_idx;
1280
1281 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1282
1283 desc_val.word = desc->Words;
1284 num_completed = 0;
1285
1286 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1287
1288 /* Find our reply descriptor for the command and process */
1289 while((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF))
1290 {
1291 smid = reply_desc->SMID;
1292 cmd_mpt = sc->mpt_cmd_list[smid -1];
1293 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *)cmd_mpt->io_request;
1294
1295 status = scsi_io_req->RaidContext.status;
1296 extStatus = scsi_io_req->RaidContext.exStatus;
1297
1298 switch (scsi_io_req->Function)
1299 {
1300 case MPI2_FUNCTION_SCSI_IO_REQUEST : /*Fast Path IO.*/
1301 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1302 lbinfo = &sc->load_balance_info[device_id];
1303 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1304 arm = lbinfo->raid1DevHandle[0] == scsi_io_req->DevHandle ? 0 : 1;
1305 atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
1306 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1307 }
1308 //Fall thru and complete IO
1309 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1310 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1311 mrsas_cmd_done(sc, cmd_mpt);
1312 scsi_io_req->RaidContext.status = 0;
1313 scsi_io_req->RaidContext.exStatus = 0;
1314 atomic_dec(&sc->fw_outstanding);
1315 break;
1316 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
1317 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1318 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1319 cmd_mpt->flags = 0;
1320 mrsas_release_mpt_cmd(cmd_mpt);
1321 break;
1322 }
1323
1324 sc->last_reply_idx++;
1325 if (sc->last_reply_idx >= sc->reply_q_depth)
1326 sc->last_reply_idx = 0;
1327
1328 desc->Words = ~((uint64_t)0x00); /* set it back to all 0xFFFFFFFFs */
1329 num_completed++;
1330 threshold_reply_count++;
1331
1332 /* Get the next reply descriptor */
1333 if (!sc->last_reply_idx)
1334 desc = sc->reply_desc_mem;
1335 else
1336 desc++;
1337
1338 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1339 desc_val.word = desc->Words;
1340
1341 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1342
1343 if(reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1344 break;
1345
1346 /*
1347 * Write to reply post index after completing threshold reply count
1348 * and still there are more replies in reply queue pending to be
1349 * completed.
1350 */
1351 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1352 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),
1353 sc->last_reply_idx);
1354 threshold_reply_count = 0;
1355 }
1356 }
1357
1358 /* No match, just return */
1359 if (num_completed == 0)
1360 return (DONE);
1361
1362 /* Clear response interrupt */
1363 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),sc->last_reply_idx);
1364
1365 return(0);
1366}
1367
1368/*
1369 * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
1370 * input: Adapter instance soft state
1371 *
1372 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1373 * It checks the command status and maps the appropriate CAM status for the CCB.
1374 */
1375void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1376{
1377 struct mrsas_softc *sc = cmd->sc;
1378 u_int8_t *sense_data;
1379
1380 switch (status) {
1381 case MFI_STAT_OK:
1382 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1383 break;
1384 case MFI_STAT_SCSI_IO_FAILED:
1385 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1386 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1387 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1388 if (sense_data) {
1389 /* For now just copy 18 bytes back */
1390 memcpy(sense_data, cmd->sense, 18);
1391 cmd->ccb_ptr->csio.sense_len = 18;
1392 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1393 }
1394 break;
1395 case MFI_STAT_LD_OFFLINE:
1396 case MFI_STAT_DEVICE_NOT_FOUND:
1397 if (cmd->ccb_ptr->ccb_h.target_lun)
1398 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1399 else
1400 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1401 break;
1402 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1403 /*send status to CAM layer to retry sending command without
1404 * decrementing retry counter*/
1405 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1406 break;
1407 default:
1408 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1409 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1410 cmd->ccb_ptr->csio.scsi_status = status;
1411 }
1412 return;
1413}
1414
1415/*
1416 * mrsas_alloc_mem: Allocate DMAable memory.
1417 * input: Adapter instance soft state
1418 *
1419 * This function creates the parent DMA tag and allocates DMAable memory.
1420 * DMA tag describes constraints of DMA mapping. Memory allocated is mapped
1421 * into Kernel virtual address. Callback argument is physical memory address.
1422 */
1423static int mrsas_alloc_mem(struct mrsas_softc *sc)
1424{
1425 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1426 chain_frame_size, evt_detail_size;
1427
1428 /*
1429 * Allocate parent DMA tag
1430 */
1431 if (bus_dma_tag_create(NULL, /* parent */
1432 1, /* alignment */
1433 0, /* boundary */
1434 BUS_SPACE_MAXADDR, /* lowaddr */
1435 BUS_SPACE_MAXADDR, /* highaddr */
1436 NULL, NULL, /* filter, filterarg */
1437 MRSAS_MAX_IO_SIZE,/* maxsize */
1438 MRSAS_MAX_SGL, /* nsegments */
1439 MRSAS_MAX_IO_SIZE,/* maxsegsize */
1440 0, /* flags */
1441 NULL, NULL, /* lockfunc, lockarg */
1442 &sc->mrsas_parent_tag /* tag */
1443 )) {
1444 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1445 return(ENOMEM);
1446 }
1447
1448 /*
1449 * Allocate for version buffer
1450 */
1451 verbuf_size = MRSAS_MAX_NAME_LENGTH*(sizeof(bus_addr_t));
1452 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1453 1, 0, // algnmnt, boundary
1454 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1455 BUS_SPACE_MAXADDR, // highaddr
1456 NULL, NULL, // filter, filterarg
1457 verbuf_size, // maxsize
1458 1, // msegments
1459 verbuf_size, // maxsegsize
1460 BUS_DMA_ALLOCNOW, // flags
1461 NULL, NULL, // lockfunc, lockarg
1462 &sc->verbuf_tag)) {
1463 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1464 return (ENOMEM);
1465 }
1466 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1467 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1468 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1469 return (ENOMEM);
1470 }
1471 bzero(sc->verbuf_mem, verbuf_size);
1472 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1473 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, BUS_DMA_NOWAIT)){
1474 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1475 return(ENOMEM);
1476 }
1477
1478 /*
1479 * Allocate IO Request Frames
1480 */
1481 io_req_size = sc->io_frames_alloc_sz;
1482 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1483 16, 0, // algnmnt, boundary
1484 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1485 BUS_SPACE_MAXADDR, // highaddr
1486 NULL, NULL, // filter, filterarg
1487 io_req_size, // maxsize
1488 1, // msegments
1489 io_req_size, // maxsegsize
1490 BUS_DMA_ALLOCNOW, // flags
1491 NULL, NULL, // lockfunc, lockarg
1492 &sc->io_request_tag)) {
1493 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1494 return (ENOMEM);
1495 }
1496 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1497 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1498 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1499 return (ENOMEM);
1500 }
1501 bzero(sc->io_request_mem, io_req_size);
1502 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1503 sc->io_request_mem, io_req_size, mrsas_addr_cb,
1504 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1505 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1506 return (ENOMEM);
1507 }
1508
1509 /*
1510 * Allocate Chain Frames
1511 */
1512 chain_frame_size = sc->chain_frames_alloc_sz;
1513 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1514 4, 0, // algnmnt, boundary
1515 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1516 BUS_SPACE_MAXADDR, // highaddr
1517 NULL, NULL, // filter, filterarg
1518 chain_frame_size, // maxsize
1519 1, // msegments
1520 chain_frame_size, // maxsegsize
1521 BUS_DMA_ALLOCNOW, // flags
1522 NULL, NULL, // lockfunc, lockarg
1523 &sc->chain_frame_tag)) {
1524 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1525 return (ENOMEM);
1526 }
1527 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1528 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1529 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1530 return (ENOMEM);
1531 }
1532 bzero(sc->chain_frame_mem, chain_frame_size);
1533 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1534 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1535 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1536 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1537 return (ENOMEM);
1538 }
1539
1540 /*
1541 * Allocate Reply Descriptor Array
1542 */
1543 reply_desc_size = sc->reply_alloc_sz;
1544 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1545 16, 0, // algnmnt, boundary
1546 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1547 BUS_SPACE_MAXADDR, // highaddr
1548 NULL, NULL, // filter, filterarg
1549 reply_desc_size, // maxsize
1550 1, // msegments
1551 reply_desc_size, // maxsegsize
1552 BUS_DMA_ALLOCNOW, // flags
1553 NULL, NULL, // lockfunc, lockarg
1554 &sc->reply_desc_tag)) {
1555 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1556 return (ENOMEM);
1557 }
1558 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1559 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1560 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1561 return (ENOMEM);
1562 }
1563 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1564 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1565 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1566 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1567 return (ENOMEM);
1568 }
1569
1570 /*
1571 * Allocate Sense Buffer Array. Keep in lower 4GB
1572 */
1573 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1574 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1575 64, 0, // algnmnt, boundary
1576 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1577 BUS_SPACE_MAXADDR, // highaddr
1578 NULL, NULL, // filter, filterarg
1579 sense_size, // maxsize
1580 1, // nsegments
1581 sense_size, // maxsegsize
1582 BUS_DMA_ALLOCNOW, // flags
1583 NULL, NULL, // lockfunc, lockarg
1584 &sc->sense_tag)) {
1585 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1586 return (ENOMEM);
1587 }
1588 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1589 BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1590 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1591 return (ENOMEM);
1592 }
1593 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1594 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1595 BUS_DMA_NOWAIT)){
1596 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1597 return (ENOMEM);
1598 }
1599
1600 /*
1601 * Allocate for Event detail structure
1602 */
1603 evt_detail_size = sizeof(struct mrsas_evt_detail);
1604 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1605 1, 0, // algnmnt, boundary
1606 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1607 BUS_SPACE_MAXADDR, // highaddr
1608 NULL, NULL, // filter, filterarg
1609 evt_detail_size, // maxsize
1610 1, // msegments
1611 evt_detail_size, // maxsegsize
1612 BUS_DMA_ALLOCNOW, // flags
1613 NULL, NULL, // lockfunc, lockarg
1614 &sc->evt_detail_tag)) {
1615 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1616 return (ENOMEM);
1617 }
1618 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1619 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1620 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1621 return (ENOMEM);
1622 }
1623 bzero(sc->evt_detail_mem, evt_detail_size);
1624 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1625 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1626 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1627 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1628 return (ENOMEM);
1629 }
1630
1631
1632 /*
1633 * Create a dma tag for data buffers; size will be the maximum
1634 * possible I/O size (280kB).
1635 */
1636 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1637 1, // alignment
1638 0, // boundary
1639 BUS_SPACE_MAXADDR, // lowaddr
1640 BUS_SPACE_MAXADDR, // highaddr
1641 NULL, NULL, // filter, filterarg
1642 MRSAS_MAX_IO_SIZE, // maxsize
1643 MRSAS_MAX_SGL, // nsegments
1644 MRSAS_MAX_IO_SIZE, // maxsegsize
1645 BUS_DMA_ALLOCNOW, // flags
1646 busdma_lock_mutex, // lockfunc
1647 &sc->io_lock, // lockfuncarg
1648 &sc->data_tag)) {
1649 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1650 return(ENOMEM);
1651 }
1652
1653 return(0);
1654}
1655
1656/*
1657 * mrsas_addr_cb: Callback function of bus_dmamap_load()
1658 * input: callback argument,
1659 * machine dependent type that describes DMA segments,
1660 * number of segments,
1661 * error code.
1662 *
1663 * This function is for the driver to receive mapping information resultant
1664 * of the bus_dmamap_load(). The information is actually not being used,
1665 * but the address is saved anyway.
1666 */
1667void
1668mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1669{
1670 bus_addr_t *addr;
1671
1672 addr = arg;
1673 *addr = segs[0].ds_addr;
1674}
1675
1676/*
1677 * mrsas_setup_raidmap: Set up RAID map.
1678 * input: Adapter instance soft state
1679 *
1680 * Allocate DMA memory for the RAID maps and perform setup.
1681 */
1682static int mrsas_setup_raidmap(struct mrsas_softc *sc)
1683{
1684 int i;
1685
1686 sc->drv_supported_vd_count =
1687 MRSAS_MAX_LD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL;
1688 sc->drv_supported_pd_count =
1689 MRSAS_MAX_PD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL;
1690
1691 if(sc->max256vdSupport) {
1692 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
1693 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
1694 } else {
1695 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
1696 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
1697 }
1698
1699#if VD_EXT_DEBUG
1700 device_printf(sc->mrsas_dev, "FW supports: max256vdSupport = %s\n",
1701 sc->max256vdSupport ? "YES":"NO");
1702 device_printf(sc->mrsas_dev, "FW supports %dVDs %dPDs\n"
1703 "DRIVER supports %dVDs %dPDs \n",
1704 sc->fw_supported_vd_count, sc->fw_supported_pd_count,
1705 sc->drv_supported_vd_count, sc->drv_supported_pd_count);
1706#endif
1707
1708 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
1709 (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1));
1710 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
1711 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
1712 (sizeof(MR_LD_SPAN_MAP) * (sc->drv_supported_vd_count-1));
1713
1714 for (i = 0; i < 2; i++) {
1715 sc->ld_drv_map[i] =
1716 (void*) malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
1717 /* Do Error handling */
1718 if (!sc->ld_drv_map[i]) {
1719 device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
1720
1721 if (i == 1)
1722 free (sc->ld_drv_map[0], M_MRSAS);
1723 //ABORT driver initialization
1724 goto ABORT;
1725 }
1726 }
1727
1728 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
1729
1730 if(sc->max256vdSupport)
1731 sc->current_map_sz = sc->new_map_sz;
1732 else
1733 sc->current_map_sz = sc->old_map_sz;
1734
1735
1736 for (int i=0; i < 2; i++)
1737 {
1738 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1739 4, 0, // algnmnt, boundary
1740 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1741 BUS_SPACE_MAXADDR, // highaddr
1742 NULL, NULL, // filter, filterarg
1743 sc->max_map_sz, // maxsize
1744 1, // nsegments
1745 sc->max_map_sz, // maxsegsize
1746 BUS_DMA_ALLOCNOW, // flags
1747 NULL, NULL, // lockfunc, lockarg
1748 &sc->raidmap_tag[i])) {
1749 device_printf(sc->mrsas_dev,
1750 "Cannot allocate raid map tag.\n");
1751 return (ENOMEM);
1752 }
1753 if (bus_dmamem_alloc(sc->raidmap_tag[i],
1754 (void **)&sc->raidmap_mem[i],
1755 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
1756 device_printf(sc->mrsas_dev,
1757 "Cannot allocate raidmap memory.\n");
1758 return (ENOMEM);
1759 }
1760
1761 bzero (sc->raidmap_mem[i], sc->max_map_sz);
1762
1763 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
1764 sc->raidmap_mem[i], sc->max_map_sz,
1765 mrsas_addr_cb, &sc->raidmap_phys_addr[i],
1766 BUS_DMA_NOWAIT)){
1767 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
1768 return (ENOMEM);
1769 }
1770 if (!sc->raidmap_mem[i]) {
1771 device_printf(sc->mrsas_dev,
1772 "Cannot allocate memory for raid map.\n");
1773 return (ENOMEM);
1774 }
1775 }
1776
1777 if (!mrsas_get_map_info(sc))
1778 mrsas_sync_map_info(sc);
1779
1780 return (0);
1781
1782ABORT:
1783 return (1);
1784}
1785
1786/**
1787 * mrsas_init_fw: Initialize Firmware
1788 * input: Adapter soft state
1789 *
1790 * Calls transition_to_ready() to make sure Firmware is in operational
1791 * state and calls mrsas_init_adapter() to send IOC_INIT command to
1792 * Firmware. It issues internal commands to get the controller info
1793 * after the IOC_INIT command response is received by Firmware.
1794 * Note: code relating to get_pdlist, get_ld_list and max_sectors
1795 * are currently not being used, it is left here as placeholder.
1796 */
1797static int mrsas_init_fw(struct mrsas_softc *sc)
1798{
1799 u_int32_t max_sectors_1;
1800 u_int32_t max_sectors_2;
1801 u_int32_t tmp_sectors;
1802 struct mrsas_ctrl_info *ctrl_info;
1803
1804 int ret, ocr = 0;
1805
1806
1807 /* Make sure Firmware is ready */
1808 ret = mrsas_transition_to_ready(sc, ocr);
1809 if (ret != SUCCESS) {
1810 return(ret);
1811 }
1812
1813 /* Get operational params, sge flags, send init cmd to ctlr */
1814 if (mrsas_init_adapter(sc) != SUCCESS){
1815 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
1816 return(1);
1817 }
1818
1819 /* Allocate internal commands for pass-thru */
1820 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS){
1821 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
1822 return(1);
1823 }
1824
1825 /*
1826 * Get the controller info from FW, so that
1827 * the MAX VD support availability can be decided.
1828 */
1829 ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
1830 if (!ctrl_info)
1831 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
1832
1833 if (mrsas_get_ctrl_info(sc, ctrl_info)) {
1834 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
1835 }
1836
1837 sc->max256vdSupport =
1838 (u_int8_t) ctrl_info->adapterOperations3.supportMaxExtLDs;
1839
1840 if (ctrl_info->max_lds > 64){
1841 sc->max256vdSupport = 1;
1842 }
1843
1844 if (mrsas_setup_raidmap(sc) != SUCCESS) {
1845 device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
1846 return(1);
1847 }
1848
1849 /* For pass-thru, get PD/LD list and controller info */
1850 memset(sc->pd_list, 0,
1851 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
1852 mrsas_get_pd_list(sc);
1853
1854 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
1855 mrsas_get_ld_list(sc);
1856
1857 /*
1858 * Compute the max allowed sectors per IO: The controller info has two
1859 * limits on max sectors. Driver should use the minimum of these two.
1860 *
1861 * 1 << stripe_sz_ops.min = max sectors per strip
1862 *
1863 * Note that older firmwares ( < FW ver 30) didn't report information
1864 * to calculate max_sectors_1. So the number ended up as zero always.
1865 */
1866 tmp_sectors = 0;
1867 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
1868 ctrl_info->max_strips_per_io;
1869 max_sectors_2 = ctrl_info->max_request_size;
1870 tmp_sectors = min(max_sectors_1 , max_sectors_2);
1871 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
1872
1873 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
1874 sc->max_sectors_per_req = tmp_sectors;
1875
1876 sc->disableOnlineCtrlReset =
1877 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
1878 sc->UnevenSpanSupport =
1879 ctrl_info->adapterOperations2.supportUnevenSpans;
1880 if(sc->UnevenSpanSupport) {
1881 printf("FW supports: UnevenSpanSupport=%x\n\n",
1882 sc->UnevenSpanSupport);
1883
1884 if (MR_ValidateMapInfo(sc))
1885 sc->fast_path_io = 1;
1886 else
1887 sc->fast_path_io = 0;
1888 }
1889
1890 if (ctrl_info)
1891 free(ctrl_info, M_MRSAS);
1892
1893 return(0);
1894}
1895
1896/**
1897 * mrsas_init_adapter: Initializes the adapter/controller
1898 * input: Adapter soft state
1899 *
1900 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
1901 * ROC/controller. The FW register is read to determined the number of
1902 * commands that is supported. All memory allocations for IO is based on
1903 * max_cmd. Appropriate calculations are performed in this function.
1904 */
1905int mrsas_init_adapter(struct mrsas_softc *sc)
1906{
1907 uint32_t status;
1908 u_int32_t max_cmd;
1909 int ret;
1910
1911 /* Read FW status register */
1912 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
1913
1914 /* Get operational params from status register */
1915 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
1916
1917 /* Decrement the max supported by 1, to correlate with FW */
1918 sc->max_fw_cmds = sc->max_fw_cmds-1;
1919 max_cmd = sc->max_fw_cmds;
1920
1921 /* Determine allocation size of command frames */
1922 sc->reply_q_depth = ((max_cmd *2 +1 +15)/16*16);
1923 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
1924 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
1925 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
1926 sc->chain_frames_alloc_sz = 1024 * max_cmd;
1927 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1928 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL))/16;
1929
1930 sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
1931 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
1932
1933 /* Used for pass thru MFI frame (DCMD) */
1934 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/16;
1935
1936 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1937 sizeof(MPI2_SGE_IO_UNION))/16;
1938
1939 sc->last_reply_idx = 0;
1940
1941 ret = mrsas_alloc_mem(sc);
1942 if (ret != SUCCESS)
1943 return(ret);
1944
1945 ret = mrsas_alloc_mpt_cmds(sc);
1946 if (ret != SUCCESS)
1947 return(ret);
1948
1949 ret = mrsas_ioc_init(sc);
1950 if (ret != SUCCESS)
1951 return(ret);
1952
1953
1954 return(0);
1955}
1956
1957/**
1958 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
1959 * input: Adapter soft state
1960 *
1961 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
1962 */
1963int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
1964{
1965 int ioc_init_size;
1966
1967 /* Allocate IOC INIT command */
1968 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
1969 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1970 1, 0, // algnmnt, boundary
1971 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1972 BUS_SPACE_MAXADDR, // highaddr
1973 NULL, NULL, // filter, filterarg
1974 ioc_init_size, // maxsize
1975 1, // msegments
1976 ioc_init_size, // maxsegsize
1977 BUS_DMA_ALLOCNOW, // flags
1978 NULL, NULL, // lockfunc, lockarg
1979 &sc->ioc_init_tag)) {
1980 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
1981 return (ENOMEM);
1982 }
1983 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
1984 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
1985 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
1986 return (ENOMEM);
1987 }
1988 bzero(sc->ioc_init_mem, ioc_init_size);
1989 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
1990 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
1991 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
1992 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
1993 return (ENOMEM);
1994 }
1995
1996 return (0);
1997}
1998
1999/**
2000 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
2001 * input: Adapter soft state
2002 *
2003 * Deallocates memory of the IOC Init cmd.
2004 */
2005void mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2006{
2007 if (sc->ioc_init_phys_mem)
2008 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2009 if (sc->ioc_init_mem != NULL)
2010 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2011 if (sc->ioc_init_tag != NULL)
2012 bus_dma_tag_destroy(sc->ioc_init_tag);
2013}
2014
2015/**
2016 * mrsas_ioc_init: Sends IOC Init command to FW
2017 * input: Adapter soft state
2018 *
2019 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2020 */
2021int mrsas_ioc_init(struct mrsas_softc *sc)
2022{
2023 struct mrsas_init_frame *init_frame;
2024 pMpi2IOCInitRequest_t IOCInitMsg;
2025 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2026 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
2027 bus_addr_t phys_addr;
2028 int i, retcode = 0;
2029
2030 /* Allocate memory for the IOC INIT command */
2031 if (mrsas_alloc_ioc_cmd(sc)) {
2032 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2033 return(1);
2034 }
2035
2036 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) +1024);
2037 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2038 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2039 IOCInitMsg->MsgVersion = MPI2_VERSION;
2040 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
2041 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
2042 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
2043 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
2044 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
2045
2046 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2047 init_frame->cmd = MFI_CMD_INIT;
2048 init_frame->cmd_status = 0xFF;
2049 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2050
2051 if (sc->verbuf_mem) {
2052 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION)+2,"%s\n",
2053 MRSAS_VERSION);
2054 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2055 init_frame->driver_ver_hi = 0;
2056 }
2057
2058 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2059 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2060 init_frame->queue_info_new_phys_addr_lo = phys_addr;
2061 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
2062
2063 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
2064 req_desc.MFAIo.RequestFlags =
2065 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2066
2067 mrsas_disable_intr(sc);
2068 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2069 //device_printf(sc->mrsas_dev, "Issuing IOC INIT command to FW.\n");del?
2070 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2071
2072 /*
2073 * Poll response timer to wait for Firmware response. While this
2074 * timer with the DELAY call could block CPU, the time interval for
2075 * this is only 1 millisecond.
2076 */
2077 if (init_frame->cmd_status == 0xFF) {
2078 for (i=0; i < (max_wait * 1000); i++){
2079 if (init_frame->cmd_status == 0xFF)
2080 DELAY(1000);
2081 else
2082 break;
2083 }
2084 }
2085
2086 if (init_frame->cmd_status == 0)
2087 mrsas_dprint(sc, MRSAS_OCR,
2088 "IOC INIT response received from FW.\n");
2089 //device_printf(sc->mrsas_dev, "IOC INIT response received from FW.\n");del?
2090 else
2091 {
2092 if (init_frame->cmd_status == 0xFF)
2093 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2094 else
2095 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2096 retcode = 1;
2097 }
2098
2099 mrsas_free_ioc_cmd(sc);
2100 return (retcode);
2101}
2102
2103/**
2104 * mrsas_alloc_mpt_cmds: Allocates the command packets
2105 * input: Adapter instance soft state
2106 *
2107 * This function allocates the internal commands for IOs. Each command that is
2108 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd.
2109 * An array is allocated with mrsas_mpt_cmd context. The free commands are
2110 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2111 * max_fw_cmds.
2112 */
2113int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2114{
2115 int i, j;
2116 u_int32_t max_cmd;
2117 struct mrsas_mpt_cmd *cmd;
2118 pMpi2ReplyDescriptorsUnion_t reply_desc;
2119 u_int32_t offset, chain_offset, sense_offset;
2120 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2121 u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2122
2123 max_cmd = sc->max_fw_cmds;
2124
2125 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2126 if (!sc->req_desc) {
2127 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2128 return(ENOMEM);
2129 }
2130 memset(sc->req_desc, 0, sc->request_alloc_sz);
2131
2132 /*
2133 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. Allocate the
2134 * dynamic array first and then allocate individual commands.
2135 */
2136 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd*)*max_cmd, M_MRSAS, M_NOWAIT);
2137 if (!sc->mpt_cmd_list) {
2138 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2139 return(ENOMEM);
2140 }
2141 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *)*max_cmd);
2142 for (i = 0; i < max_cmd; i++) {
2143 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2144 M_MRSAS, M_NOWAIT);
2145 if (!sc->mpt_cmd_list[i]) {
2146 for (j = 0; j < i; j++)
2147 free(sc->mpt_cmd_list[j],M_MRSAS);
2148 free(sc->mpt_cmd_list, M_MRSAS);
2149 sc->mpt_cmd_list = NULL;
2150 return(ENOMEM);
2151 }
2152 }
2153
2154 io_req_base = (u_int8_t*)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2155 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2156 chain_frame_base = (u_int8_t*)sc->chain_frame_mem;
2157 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2158 sense_base = (u_int8_t*)sc->sense_mem;
2159 sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2160 for (i = 0; i < max_cmd; i++) {
2161 cmd = sc->mpt_cmd_list[i];
2162 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2163 chain_offset = 1024 * i;
2164 sense_offset = MRSAS_SENSE_LEN * i;
2165 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2166 cmd->index = i + 1;
2167 cmd->ccb_ptr = NULL;
2168 callout_init(&cmd->cm_callout, 0);
2169 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2170 cmd->sc = sc;
2171 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2172 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2173 cmd->io_request_phys_addr = io_req_base_phys + offset;
2174 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2175 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2176 cmd->sense = sense_base + sense_offset;
2177 cmd->sense_phys_addr = sense_base_phys + sense_offset;
2178 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2179 return(FAIL);
2180 }
2181 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2182 }
2183
2184 /* Initialize reply descriptor array to 0xFFFFFFFF */
2185 reply_desc = sc->reply_desc_mem;
2186 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2187 reply_desc->Words = MRSAS_ULONG_MAX;
2188 }
2189 return(0);
2190}
2191
2192/**
2193 * mrsas_fire_cmd: Sends command to FW
2194 * input: Adapter soft state
2195 * request descriptor address low
2196 * request descriptor address high
2197 *
2198 * This functions fires the command to Firmware by writing to the
2199 * inbound_low_queue_port and inbound_high_queue_port.
2200 */
2201void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2202 u_int32_t req_desc_hi)
2203{
2204 mtx_lock(&sc->pci_lock);
2205 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2206 req_desc_lo);
2207 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2208 req_desc_hi);
2209 mtx_unlock(&sc->pci_lock);
2210}
2211
2212/**
2213 * mrsas_transition_to_ready: Move FW to Ready state
2214 * input: Adapter instance soft state
2215 *
2216 * During the initialization, FW passes can potentially be in any one of
2217 * several possible states. If the FW in operational, waiting-for-handshake
2218 * states, driver must take steps to bring it to ready state. Otherwise, it
2219 * has to wait for the ready state.
2220 */
2221int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2222{
2223 int i;
2224 u_int8_t max_wait;
2225 u_int32_t val, fw_state;
2226 u_int32_t cur_state;
2227 u_int32_t abs_state, curr_abs_state;
2228
2229 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2230 fw_state = val & MFI_STATE_MASK;
2231 max_wait = MRSAS_RESET_WAIT_TIME;
2232
2233 if (fw_state != MFI_STATE_READY)
2234 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2235
2236 while (fw_state != MFI_STATE_READY) {
2237 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2238 switch (fw_state) {
2239 case MFI_STATE_FAULT:
2240 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2241 if (ocr) {
2242 cur_state = MFI_STATE_FAULT;
2243 break;
2244 }
2245 else
2246 return -ENODEV;
2247 case MFI_STATE_WAIT_HANDSHAKE:
2248 /* Set the CLR bit in inbound doorbell */
2249 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2250 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG);
2251 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2252 break;
2253 case MFI_STATE_BOOT_MESSAGE_PENDING:
2254 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2255 MFI_INIT_HOTPLUG);
2256 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2257 break;
2258 case MFI_STATE_OPERATIONAL:
2259 /* Bring it to READY state; assuming max wait 10 secs */
2260 mrsas_disable_intr(sc);
2261 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2262 for (i=0; i < max_wait * 1000; i++) {
2263 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2264 DELAY(1000);
2265 else
2266 break;
2267 }
2268 cur_state = MFI_STATE_OPERATIONAL;
2269 break;
2270 case MFI_STATE_UNDEFINED:
2271 /* This state should not last for more than 2 seconds */
2272 cur_state = MFI_STATE_UNDEFINED;
2273 break;
2274 case MFI_STATE_BB_INIT:
2275 cur_state = MFI_STATE_BB_INIT;
2276 break;
2277 case MFI_STATE_FW_INIT:
2278 cur_state = MFI_STATE_FW_INIT;
2279 break;
2280 case MFI_STATE_FW_INIT_2:
2281 cur_state = MFI_STATE_FW_INIT_2;
2282 break;
2283 case MFI_STATE_DEVICE_SCAN:
2284 cur_state = MFI_STATE_DEVICE_SCAN;
2285 break;
2286 case MFI_STATE_FLUSH_CACHE:
2287 cur_state = MFI_STATE_FLUSH_CACHE;
2288 break;
2289 default:
2290 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2291 return -ENODEV;
2292 }
2293
2294 /*
2295 * The cur_state should not last for more than max_wait secs
2296 */
2297 for (i = 0; i < (max_wait * 1000); i++) {
2298 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2299 outbound_scratch_pad))& MFI_STATE_MASK);
2300 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2301 outbound_scratch_pad));
2302 if (abs_state == curr_abs_state)
2303 DELAY(1000);
2304 else
2305 break;
2306 }
2307
2308 /*
2309 * Return error if fw_state hasn't changed after max_wait
2310 */
2311 if (curr_abs_state == abs_state) {
2312 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2313 "in %d secs\n", fw_state, max_wait);
2314 return -ENODEV;
2315 }
2316 }
2317 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2318 //device_printf(sc->mrsas_dev, "FW now in Ready state\n");del?
2319 return 0;
2320}
2321
2322/**
2323 * mrsas_get_mfi_cmd: Get a cmd from free command pool
2324 * input: Adapter soft state
2325 *
2326 * This function removes an MFI command from the command list.
2327 */
2328struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2329{
2330 struct mrsas_mfi_cmd *cmd = NULL;
2331
2332 mtx_lock(&sc->mfi_cmd_pool_lock);
2333 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)){
2334 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2335 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2336 }
2337 mtx_unlock(&sc->mfi_cmd_pool_lock);
2338
2339 return cmd;
2340}
2341
2342/**
2343 * mrsas_ocr_thread Thread to handle OCR/Kill Adapter.
2344 * input: Adapter Context.
2345 *
2346 * This function will check FW status register and flag
2347 * do_timeout_reset flag. It will do OCR/Kill adapter if
2348 * FW is in fault state or IO timed out has trigger reset.
2349 */
2350static void
2351mrsas_ocr_thread(void *arg)
2352{
2353 struct mrsas_softc *sc;
2354 u_int32_t fw_status, fw_state;
2355
2356 sc = (struct mrsas_softc *)arg;
2357
2358 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2359
2360 sc->ocr_thread_active = 1;
2361 mtx_lock(&sc->sim_lock);
2362 for (;;) {
2363 /* Sleep for 1 second and check the queue status*/
2364 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2365 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2366 if (sc->remove_in_progress) {
2367 mrsas_dprint(sc, MRSAS_OCR,
2368 "Exit due to shutdown from %s\n", __func__);
2369 break;
2370 }
2371 fw_status = mrsas_read_reg(sc,
2372 offsetof(mrsas_reg_set, outbound_scratch_pad));
2373 fw_state = fw_status & MFI_STATE_MASK;
2374 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
2375 device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
2376 sc->do_timedout_reset?"IO Timeout":
2377 "FW fault detected");
2378 mtx_lock_spin(&sc->ioctl_lock);
2379 sc->reset_in_progress = 1;
2380 sc->reset_count++;
2381 mtx_unlock_spin(&sc->ioctl_lock);
2382 mrsas_xpt_freeze(sc);
2383 mrsas_reset_ctrl(sc);
2384 mrsas_xpt_release(sc);
2385 sc->reset_in_progress = 0;
2386 sc->do_timedout_reset = 0;
2387 }
2388 }
2389 mtx_unlock(&sc->sim_lock);
2390 sc->ocr_thread_active = 0;
2391 mrsas_kproc_exit(0);
2392}
2393
2394/**
2395 * mrsas_reset_reply_desc Reset Reply descriptor as part of OCR.
2396 * input: Adapter Context.
2397 *
2398 * This function will clear reply descriptor so that post OCR
2399 * driver and FW will lost old history.
2400 */
2401void mrsas_reset_reply_desc(struct mrsas_softc *sc)
2402{
2403 int i;
2404 pMpi2ReplyDescriptorsUnion_t reply_desc;
2405
2406 sc->last_reply_idx = 0;
2407 reply_desc = sc->reply_desc_mem;
2408 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2409 reply_desc->Words = MRSAS_ULONG_MAX;
2410 }
2411}
2412
2413/**
2414 * mrsas_reset_ctrl Core function to OCR/Kill adapter.
2415 * input: Adapter Context.
2416 *
2417 * This function will run from thread context so that it can sleep.
2418 * 1. Do not handle OCR if FW is in HW critical error.
2419 * 2. Wait for outstanding command to complete for 180 seconds.
2420 * 3. If #2 does not find any outstanding command Controller is in working
2421 * state, so skip OCR.
2422 * Otherwise, do OCR/kill Adapter based on flag disableOnlineCtrlReset.
2423 * 4. Start of the OCR, return all SCSI command back to CAM layer which has
2424 * ccb_ptr.
2425 * 5. Post OCR, Re-fire Managment command and move Controller to Operation
2426 * state.
2427 */
2428int mrsas_reset_ctrl(struct mrsas_softc *sc)
2429{
2430 int retval = SUCCESS, i, j, retry = 0;
2431 u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2432 union ccb *ccb;
2433 struct mrsas_mfi_cmd *mfi_cmd;
2434 struct mrsas_mpt_cmd *mpt_cmd;
2435 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2436
2437 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2438 device_printf(sc->mrsas_dev,
2439 "mrsas: Hardware critical error, returning FAIL.\n");
2440 return FAIL;
2441 }
2442
2443 set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2444 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2445 mrsas_disable_intr(sc);
2446 DELAY(1000 * 1000);
2447
2448 /* First try waiting for commands to complete */
2449 if (mrsas_wait_for_outstanding(sc)) {
2450 mrsas_dprint(sc, MRSAS_OCR,
2451 "resetting adapter from %s.\n",
2452 __func__);
2453 /* Now return commands back to the CAM layer */
2454 for (i = 0 ; i < sc->max_fw_cmds; i++) {
2455 mpt_cmd = sc->mpt_cmd_list[i];
2456 if (mpt_cmd->ccb_ptr) {
2457 ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2458 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2459 mrsas_cmd_done(sc, mpt_cmd);
2460 atomic_dec(&sc->fw_outstanding);
2461 }
2462 }
2463
2464 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2465 outbound_scratch_pad));
2466 abs_state = status_reg & MFI_STATE_MASK;
2467 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2468 if (sc->disableOnlineCtrlReset ||
2469 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2470 /* Reset not supported, kill adapter */
2471 mrsas_dprint(sc, MRSAS_OCR,"Reset not supported, killing adapter.\n");
2472 mrsas_kill_hba(sc);
2473 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
2474 retval = FAIL;
2475 goto out;
2476 }
2477
2478 /* Now try to reset the chip */
2479 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2480 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2481 MPI2_WRSEQ_FLUSH_KEY_VALUE);
2482 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2483 MPI2_WRSEQ_1ST_KEY_VALUE);
2484 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2485 MPI2_WRSEQ_2ND_KEY_VALUE);
2486 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2487 MPI2_WRSEQ_3RD_KEY_VALUE);
2488 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2489 MPI2_WRSEQ_4TH_KEY_VALUE);
2490 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2491 MPI2_WRSEQ_5TH_KEY_VALUE);
2492 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2493 MPI2_WRSEQ_6TH_KEY_VALUE);
2494
2495 /* Check that the diag write enable (DRWE) bit is on */
2496 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2497 fusion_host_diag));
2498 retry = 0;
2499 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2500 DELAY(100 * 1000);
2501 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2502 fusion_host_diag));
2503 if (retry++ == 100) {
2504 mrsas_dprint(sc, MRSAS_OCR,
2505 "Host diag unlock failed!\n");
2506 break;
2507 }
2508 }
2509 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2510 continue;
2511
2512 /* Send chip reset command */
2513 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
2514 host_diag | HOST_DIAG_RESET_ADAPTER);
2515 DELAY(3000 * 1000);
2516
2517 /* Make sure reset adapter bit is cleared */
2518 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2519 fusion_host_diag));
2520 retry = 0;
2521 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2522 DELAY(100 * 1000);
2523 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2524 fusion_host_diag));
2525 if (retry++ == 1000) {
2526 mrsas_dprint(sc, MRSAS_OCR,
2527 "Diag reset adapter never cleared!\n");
2528 break;
2529 }
2530 }
2531 if (host_diag & HOST_DIAG_RESET_ADAPTER)
2532 continue;
2533
2534 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2535 outbound_scratch_pad)) & MFI_STATE_MASK;
2536 retry = 0;
2537
2538 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2539 DELAY(100 * 1000);
2540 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2541 outbound_scratch_pad)) & MFI_STATE_MASK;
2542 }
2543 if (abs_state <= MFI_STATE_FW_INIT) {
2544 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
2545 " state = 0x%x\n", abs_state);
2546 continue;
2547 }
2548
2549 /* Wait for FW to become ready */
2550 if (mrsas_transition_to_ready(sc, 1)) {
2551 mrsas_dprint(sc, MRSAS_OCR,
2552 "mrsas: Failed to transition controller to ready.\n");
2553 continue;
2554 }
2555
2556 mrsas_reset_reply_desc(sc);
2557 if (mrsas_ioc_init(sc)) {
2558 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
2559 continue;
2560 }
2561
2562 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2563 mrsas_enable_intr(sc);
2564 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2565
2566 /* Re-fire management commands */
2567 for (j = 0 ; j < sc->max_fw_cmds; j++) {
2568 mpt_cmd = sc->mpt_cmd_list[j];
2569 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2570 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
2571 if (mfi_cmd->frame->dcmd.opcode ==
2572 MR_DCMD_LD_MAP_GET_INFO) {
2573 mrsas_release_mfi_cmd(mfi_cmd);
2574 mrsas_release_mpt_cmd(mpt_cmd);
2575 } else {
2576 req_desc = mrsas_get_request_desc(sc,
2577 mfi_cmd->cmd_id.context.smid - 1);
2578 mrsas_dprint(sc, MRSAS_OCR,
2579 "Re-fire command DCMD opcode 0x%x index %d\n ",
2580 mfi_cmd->frame->dcmd.opcode, j);
2581 if (!req_desc)
2582 device_printf(sc->mrsas_dev,
2583 "Cannot build MPT cmd.\n");
2584 else
2585 mrsas_fire_cmd(sc, req_desc->addr.u.low,
2586 req_desc->addr.u.high);
2587 }
2588 }
2589 }
2590
2591 /* Reset load balance info */
2592 memset(sc->load_balance_info, 0,
2593 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
2594
2595 if (!mrsas_get_map_info(sc))
2596 mrsas_sync_map_info(sc);
2597
2598 /* Adapter reset completed successfully */
2599 device_printf(sc->mrsas_dev, "Reset successful\n");
2600 retval = SUCCESS;
2601 goto out;
2602 }
2603 /* Reset failed, kill the adapter */
2604 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
2605 mrsas_kill_hba(sc);
2606 retval = FAIL;
2607 } else {
2608 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2609 mrsas_enable_intr(sc);
2610 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2611 }
2612out:
2613 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2614 mrsas_dprint(sc, MRSAS_OCR,
2615 "Reset Exit with %d.\n", retval);
2616 return retval;
2617}
2618
2619/**
2620 * mrsas_kill_hba Kill HBA when OCR is not supported.
2621 * input: Adapter Context.
2622 *
2623 * This function will kill HBA when OCR is not supported.
2624 */
2625void mrsas_kill_hba (struct mrsas_softc *sc)
2626{
2627 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
2628 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2629 MFI_STOP_ADP);
2630 /* Flush */
2631 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
2632}
2633
2634/**
2635 * mrsas_wait_for_outstanding Wait for outstanding commands
2636 * input: Adapter Context.
2637 *
2638 * This function will wait for 180 seconds for outstanding
2639 * commands to be completed.
2640 */
2641int mrsas_wait_for_outstanding(struct mrsas_softc *sc)
2642{
2643 int i, outstanding, retval = 0;
2644 u_int32_t fw_state;
2645
2646 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
2647 if (sc->remove_in_progress) {
2648 mrsas_dprint(sc, MRSAS_OCR,
2649 "Driver remove or shutdown called.\n");
2650 retval = 1;
2651 goto out;
2652 }
2653 /* Check if firmware is in fault state */
2654 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2655 outbound_scratch_pad)) & MFI_STATE_MASK;
2656 if (fw_state == MFI_STATE_FAULT) {
2657 mrsas_dprint(sc, MRSAS_OCR,
2658 "Found FW in FAULT state, will reset adapter.\n");
2659 retval = 1;
2660 goto out;
2661 }
2662 outstanding = atomic_read(&sc->fw_outstanding);
2663 if (!outstanding)
2664 goto out;
2665
2666 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
2667 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
2668 "commands to complete\n",i,outstanding);
2669 mrsas_complete_cmd(sc);
2670 }
2671 DELAY(1000 * 1000);
2672 }
2673
2674 if (atomic_read(&sc->fw_outstanding)) {
2675 mrsas_dprint(sc, MRSAS_OCR,
2676 " pending commands remain after waiting,"
2677 " will reset adapter.\n");
2678 retval = 1;
2679 }
2680out:
2681 return retval;
2682}
2683
2684/**
2685 * mrsas_release_mfi_cmd: Return a cmd to free command pool
2686 * input: Command packet for return to free cmd pool
2687 *
2688 * This function returns the MFI command to the command list.
2689 */
2690void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
2691{
2692 struct mrsas_softc *sc = cmd->sc;
2693
2694 mtx_lock(&sc->mfi_cmd_pool_lock);
2695 cmd->ccb_ptr = NULL;
2696 cmd->cmd_id.frame_count = 0;
2697 TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
2698 mtx_unlock(&sc->mfi_cmd_pool_lock);
2699
2700 return;
2701}
2702
2703/**
2704 * mrsas_get_controller_info - Returns FW's controller structure
2705 * input: Adapter soft state
2706 * Controller information structure
2707 *
2708 * Issues an internal command (DCMD) to get the FW's controller structure.
2709 * This information is mainly used to find out the maximum IO transfer per
2710 * command supported by the FW.
2711 */
2712static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
2713 struct mrsas_ctrl_info *ctrl_info)
2714{
2715 int retcode = 0;
2716 struct mrsas_mfi_cmd *cmd;
2717 struct mrsas_dcmd_frame *dcmd;
2718
2719 cmd = mrsas_get_mfi_cmd(sc);
2720
2721 if (!cmd) {
2722 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
2723 return -ENOMEM;
2724 }
2725 dcmd = &cmd->frame->dcmd;
2726
2727 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
2728 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
2729 mrsas_release_mfi_cmd(cmd);
2730 return -ENOMEM;
2731 }
2732 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2733
2734 dcmd->cmd = MFI_CMD_DCMD;
2735 dcmd->cmd_status = 0xFF;
2736 dcmd->sge_count = 1;
2737 dcmd->flags = MFI_FRAME_DIR_READ;
2738 dcmd->timeout = 0;
2739 dcmd->pad_0 = 0;
2740 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
2741 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
2742 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
2743 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
2744
2745 if (!mrsas_issue_polled(sc, cmd))
2746 memcpy(ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
2747 else
2748 retcode = 1;
2749
2750 mrsas_free_ctlr_info_cmd(sc);
2751 mrsas_release_mfi_cmd(cmd);
2752 return(retcode);
2753}
2754
2755/**
2756 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
2757 * input: Adapter soft state
2758 *
2759 * Allocates DMAable memory for the controller info internal command.
2760 */
2761int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
2762{
2763 int ctlr_info_size;
2764
2765 /* Allocate get controller info command */
2766 ctlr_info_size = sizeof(struct mrsas_ctrl_info);
2767 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
2768 1, 0, // algnmnt, boundary
2769 BUS_SPACE_MAXADDR_32BIT,// lowaddr
2770 BUS_SPACE_MAXADDR, // highaddr
2771 NULL, NULL, // filter, filterarg
2772 ctlr_info_size, // maxsize
2773 1, // msegments
2774 ctlr_info_size, // maxsegsize
2775 BUS_DMA_ALLOCNOW, // flags
2776 NULL, NULL, // lockfunc, lockarg
2777 &sc->ctlr_info_tag)) {
2778 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
2779 return (ENOMEM);
2780 }
2781 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
2782 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
2783 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
2784 return (ENOMEM);
2785 }
2786 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
2787 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
2788 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
2789 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
2790 return (ENOMEM);
2791 }
2792
2793 memset(sc->ctlr_info_mem, 0, ctlr_info_size);
2794 return (0);
2795}
2796
2797/**
2798 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
2799 * input: Adapter soft state
2800 *
2801 * Deallocates memory of the get controller info cmd.
2802 */
2803void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
2804{
2805 if (sc->ctlr_info_phys_addr)
2806 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
2807 if (sc->ctlr_info_mem != NULL)
2808 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
2809 if (sc->ctlr_info_tag != NULL)
2810 bus_dma_tag_destroy(sc->ctlr_info_tag);
2811}
2812
2813/**
2814 * mrsas_issue_polled: Issues a polling command
2815 * inputs: Adapter soft state
2816 * Command packet to be issued
2817 *
2818 * This function is for posting of internal commands to Firmware. MFI
2819 * requires the cmd_status to be set to 0xFF before posting. The maximun
2820 * wait time of the poll response timer is 180 seconds.
2821 */
2822int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2823{
2824 struct mrsas_header *frame_hdr = &cmd->frame->hdr;
2825 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2826 int i, retcode = 0;
2827
2828 frame_hdr->cmd_status = 0xFF;
2829 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2830
2831 /* Issue the frame using inbound queue port */
2832 if (mrsas_issue_dcmd(sc, cmd)) {
2833 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
2834 return(1);
2835 }
2836
2837 /*
2838 * Poll response timer to wait for Firmware response. While this
2839 * timer with the DELAY call could block CPU, the time interval for
2840 * this is only 1 millisecond.
2841 */
2842 if (frame_hdr->cmd_status == 0xFF) {
2843 for (i=0; i < (max_wait * 1000); i++){
2844 if (frame_hdr->cmd_status == 0xFF)
2845 DELAY(1000);
2846 else
2847 break;
2848 }
2849 }
2850 if (frame_hdr->cmd_status != 0)
2851 {
2852 if (frame_hdr->cmd_status == 0xFF)
2853 device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
2854 else
2855 device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
2856 retcode = 1;
2857 }
2858 return(retcode);
2859}
2860
2861/**
2862 * mrsas_issue_dcmd - Issues a MFI Pass thru cmd
2863 * input: Adapter soft state
2864 * mfi cmd pointer
2865 *
2866 * This function is called by mrsas_issued_blocked_cmd() and
2867 * mrsas_issued_polled(), to build the MPT command and then fire the
2868 * command to Firmware.
2869 */
2870int
2871mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2872{
2873 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2874
2875 req_desc = mrsas_build_mpt_cmd(sc, cmd);
2876 if (!req_desc) {
2877 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
2878 return(1);
2879 }
2880
2881 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
2882
2883 return(0);
2884}
2885
2886/**
2887 * mrsas_build_mpt_cmd - Calls helper function to build Passthru cmd
2888 * input: Adapter soft state
2889 * mfi cmd to build
2890 *
2891 * This function is called by mrsas_issue_cmd() to build the MPT-MFI
2892 * passthru command and prepares the MPT command to send to Firmware.
2893 */
2894MRSAS_REQUEST_DESCRIPTOR_UNION *
2895mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2896{
2897 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2898 u_int16_t index;
2899
2900 if (mrsas_build_mptmfi_passthru(sc, cmd)) {
2901 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
2902 return NULL;
2903 }
2904
2905 index = cmd->cmd_id.context.smid;
2906
2907 req_desc = mrsas_get_request_desc(sc, index-1);
2908 if(!req_desc)
2909 return NULL;
2910
2911 req_desc->addr.Words = 0;
2912 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2913
2914 req_desc->SCSIIO.SMID = index;
2915
2916 return(req_desc);
2917}
2918
2919/**
2920 * mrsas_build_mptmfi_passthru - Builds a MPT MFI Passthru command
2921 * input: Adapter soft state
2922 * mfi cmd pointer
2923 *
2924 * The MPT command and the io_request are setup as a passthru command.
2925 * The SGE chain address is set to frame_phys_addr of the MFI command.
2926 */
2927u_int8_t
2928mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
2929{
2930 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
2931 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
2932 struct mrsas_mpt_cmd *mpt_cmd;
2933 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
2934
2935 mpt_cmd = mrsas_get_mpt_cmd(sc);
2936 if (!mpt_cmd)
2937 return(1);
2938
2939 /* Save the smid. To be used for returning the cmd */
2940 mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
2941
2942 mpt_cmd->sync_cmd_idx = mfi_cmd->index;
2943
2944 /*
2945 * For cmds where the flag is set, store the flag and check
2946 * on completion. For cmds with this flag, don't call
2947 * mrsas_complete_cmd.
2948 */
2949
2950 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
2951 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2952
2953 io_req = mpt_cmd->io_request;
2954
2955 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
2956 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t) &io_req->SGL;
2957 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
2958 sgl_ptr_end->Flags = 0;
2959 }
2960
2961 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
2962
2963 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2964 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
2965 io_req->ChainOffset = sc->chain_offset_mfi_pthru;
2966
2967 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
2968
2969 mpi25_ieee_chain->Flags= IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2970 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2971
2972 mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
2973
2974 return(0);
2975}
2976
2977/**
2978 * mrsas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
2979 * input: Adapter soft state
2980 * Command to be issued
2981 *
2982 * This function waits on an event for the command to be returned
2983 * from the ISR. Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs.
2984 * Used for issuing internal and ioctl commands.
2985 */
2986int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2987{
2988 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2989 unsigned long total_time = 0;
2990 int retcode = 0;
2991
2992 /* Initialize cmd_status */
2993 cmd->cmd_status = ECONNREFUSED;
2994
2995 /* Build MPT-MFI command for issue to FW */
2996 if (mrsas_issue_dcmd(sc, cmd)){
2997 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
2998 return(1);
2999 }
3000
3001 sc->chan = (void*)&cmd;
3002
3003 /* The following is for debug only... */
3004 //device_printf(sc->mrsas_dev,"DCMD issued to FW, about to sleep-wait...\n");
3005 //device_printf(sc->mrsas_dev,"sc->chan = %p\n", sc->chan);
3006
3007 while (1) {
3008 if (cmd->cmd_status == ECONNREFUSED){
3009 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3010 }
3011 else
3012 break;
3013 total_time++;
3014 if (total_time >= max_wait) {
3015 device_printf(sc->mrsas_dev, "Internal command timed out after %d seconds.\n", max_wait);
3016 retcode = 1;
3017 break;
3018 }
3019 }
3020 return(retcode);
3021}
3022
3023/**
3024 * mrsas_complete_mptmfi_passthru - Completes a command
3025 * input: sc: Adapter soft state
3026 * cmd: Command to be completed
3027 * status: cmd completion status
3028 *
3029 * This function is called from mrsas_complete_cmd() after an interrupt
3030 * is received from Firmware, and io_request->Function is
3031 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
3032 */
3033void
3034mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
3035 u_int8_t status)
3036{
3037 struct mrsas_header *hdr = &cmd->frame->hdr;
3038 u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
3039
3040 /* Reset the retry counter for future re-tries */
3041 cmd->retry_for_fw_reset = 0;
3042
3043 if (cmd->ccb_ptr)
3044 cmd->ccb_ptr = NULL;
3045
3046 switch (hdr->cmd) {
3047 case MFI_CMD_INVALID:
3048 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
3049 break;
3050 case MFI_CMD_PD_SCSI_IO:
3051 case MFI_CMD_LD_SCSI_IO:
3052 /*
3053 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3054 * issued either through an IO path or an IOCTL path. If it
3055 * was via IOCTL, we will send it to internal completion.
3056 */
3057 if (cmd->sync_cmd) {
3058 cmd->sync_cmd = 0;
3059 mrsas_wakeup(sc, cmd);
3060 break;
3061 }
3062 case MFI_CMD_SMP:
3063 case MFI_CMD_STP:
3064 case MFI_CMD_DCMD:
3065 /* Check for LD map update */
3066 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
3067 (cmd->frame->dcmd.mbox.b[1] == 1)) {
3068 sc->fast_path_io = 0;
3069 mtx_lock(&sc->raidmap_lock);
3070 if (cmd_status != 0) {
3071 if (cmd_status != MFI_STAT_NOT_FOUND)
3072 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n",cmd_status);
3073 else {
3074 mrsas_release_mfi_cmd(cmd);
3075 mtx_unlock(&sc->raidmap_lock);
3076 break;
3077 }
3078 }
3079 else
3080 sc->map_id++;
3081 mrsas_release_mfi_cmd(cmd);
3082 if (MR_ValidateMapInfo(sc))
3083 sc->fast_path_io = 0;
3084 else
3085 sc->fast_path_io = 1;
3086 mrsas_sync_map_info(sc);
3087 mtx_unlock(&sc->raidmap_lock);
3088 break;
3089 }
3090#if 0 //currently not supporting event handling, so commenting out
3091 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3092 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
3093 mrsas_poll_wait_aen = 0;
3094 }
3095#endif
3096 /* See if got an event notification */
3097 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
3098 mrsas_complete_aen(sc, cmd);
3099 else
3100 mrsas_wakeup(sc, cmd);
3101 break;
3102 case MFI_CMD_ABORT:
3103 /* Command issued to abort another cmd return */
3104 mrsas_complete_abort(sc, cmd);
3105 break;
3106 default:
3107 device_printf(sc->mrsas_dev,"Unknown command completed! [0x%X]\n", hdr->cmd);
3108 break;
3109 }
3110}
3111
3112/**
3113 * mrsas_wakeup - Completes an internal command
3114 * input: Adapter soft state
3115 * Command to be completed
3116 *
3117 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware,
3118 * a wait timer is started. This function is called from
3119 * mrsas_complete_mptmfi_passthru() as it completes the command,
3120 * to wake up from the command wait.
3121 */
3122void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3123{
3124 cmd->cmd_status = cmd->frame->io.cmd_status;
3125
3126 if (cmd->cmd_status == ECONNREFUSED)
3127 cmd->cmd_status = 0;
3128
3129 /* For debug only ... */
3130 //device_printf(sc->mrsas_dev,"DCMD rec'd for wakeup, sc->chan=%p\n", sc->chan);
3131
3132 sc->chan = (void*)&cmd;
3133 wakeup_one((void *)&sc->chan);
3134 return;
3135}
3136
3137/**
3138 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller
3139 * input: Adapter soft state
3140 * Shutdown/Hibernate
3141 *
3142 * This function issues a DCMD internal command to Firmware to initiate
3143 * shutdown of the controller.
3144 */
3145static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3146{
3147 struct mrsas_mfi_cmd *cmd;
3148 struct mrsas_dcmd_frame *dcmd;
3149
3150 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3151 return;
3152
3153 cmd = mrsas_get_mfi_cmd(sc);
3154 if (!cmd) {
3155 device_printf(sc->mrsas_dev,"Cannot allocate for shutdown cmd.\n");
3156 return;
3157 }
3158
3159 if (sc->aen_cmd)
3160 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3161
3162 if (sc->map_update_cmd)
3163 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3164
3165 dcmd = &cmd->frame->dcmd;
3166 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3167
3168 dcmd->cmd = MFI_CMD_DCMD;
3169 dcmd->cmd_status = 0x0;
3170 dcmd->sge_count = 0;
3171 dcmd->flags = MFI_FRAME_DIR_NONE;
3172 dcmd->timeout = 0;
3173 dcmd->pad_0 = 0;
3174 dcmd->data_xfer_len = 0;
3175 dcmd->opcode = opcode;
3176
3177 device_printf(sc->mrsas_dev,"Preparing to shut down controller.\n");
3178
3179 mrsas_issue_blocked_cmd(sc, cmd);
3180 mrsas_release_mfi_cmd(cmd);
3181
3182 return;
3183}
3184
3185/**
3186 * mrsas_flush_cache: Requests FW to flush all its caches
3187 * input: Adapter soft state
3188 *
3189 * This function is issues a DCMD internal command to Firmware to initiate
3190 * flushing of all caches.
3191 */
3192static void mrsas_flush_cache(struct mrsas_softc *sc)
3193{
3194 struct mrsas_mfi_cmd *cmd;
3195 struct mrsas_dcmd_frame *dcmd;
3196
3197 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3198 return;
3199
3200 cmd = mrsas_get_mfi_cmd(sc);
3201 if (!cmd) {
3202 device_printf(sc->mrsas_dev,"Cannot allocate for flush cache cmd.\n");
3203 return;
3204 }
3205
3206 dcmd = &cmd->frame->dcmd;
3207 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3208
3209 dcmd->cmd = MFI_CMD_DCMD;
3210 dcmd->cmd_status = 0x0;
3211 dcmd->sge_count = 0;
3212 dcmd->flags = MFI_FRAME_DIR_NONE;
3213 dcmd->timeout = 0;
3214 dcmd->pad_0 = 0;
3215 dcmd->data_xfer_len = 0;
3216 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3217 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3218
3219 mrsas_issue_blocked_cmd(sc, cmd);
3220 mrsas_release_mfi_cmd(cmd);
3221
3222 return;
3223}
3224
3225/**
3226 * mrsas_get_map_info: Load and validate RAID map
3227 * input: Adapter instance soft state
3228 *
3229 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo()
3230 * to load and validate RAID map. It returns 0 if successful, 1 other-
3231 * wise.
3232 */
3233static int mrsas_get_map_info(struct mrsas_softc *sc)
3234{
3235 uint8_t retcode = 0;
3236
3237 sc->fast_path_io = 0;
3238 if (!mrsas_get_ld_map_info(sc)) {
3239 retcode = MR_ValidateMapInfo(sc);
3240 if (retcode == 0) {
3241 sc->fast_path_io = 1;
3242 return 0;
3243 }
3244 }
3245 return 1;
3246}
3247
3248/**
3249 * mrsas_get_ld_map_info: Get FW's ld_map structure
3250 * input: Adapter instance soft state
3251 *
3252 * Issues an internal command (DCMD) to get the FW's controller PD
3253 * list structure.
3254 */
3255static int mrsas_get_ld_map_info(struct mrsas_softc *sc)
3256{
3257 int retcode = 0;
3258 struct mrsas_mfi_cmd *cmd;
3259 struct mrsas_dcmd_frame *dcmd;
3260 void *map;
3261 bus_addr_t map_phys_addr = 0;
3262
3263 cmd = mrsas_get_mfi_cmd(sc);
3264 if (!cmd) {
3265 device_printf(sc->mrsas_dev,
3266 "Cannot alloc for ld map info cmd.\n");
3267 return 1;
3268 }
3269
3270 dcmd = &cmd->frame->dcmd;
3271
3272 map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
3273 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
3274 if (!map) {
3275 device_printf(sc->mrsas_dev,
3276 "Failed to alloc mem for ld map info.\n");
3277 mrsas_release_mfi_cmd(cmd);
3278 return (ENOMEM);
3279 }
3280 memset(map, 0, sizeof(sc->max_map_sz));
3281 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3282
3283 dcmd->cmd = MFI_CMD_DCMD;
3284 dcmd->cmd_status = 0xFF;
3285 dcmd->sge_count = 1;
3286 dcmd->flags = MFI_FRAME_DIR_READ;
3287 dcmd->timeout = 0;
3288 dcmd->pad_0 = 0;
3289 dcmd->data_xfer_len = sc->current_map_sz;
3290 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3291 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3292 dcmd->sgl.sge32[0].length = sc->current_map_sz;
3293
3294 if (!mrsas_issue_polled(sc, cmd))
3295 retcode = 0;
3296 else
3297 {
3298 device_printf(sc->mrsas_dev,
3299 "Fail to send get LD map info cmd.\n");
3300 retcode = 1;
3301 }
3302 mrsas_release_mfi_cmd(cmd);
3303
3304 return(retcode);
3305}
3306
3307/**
3308 * mrsas_sync_map_info: Get FW's ld_map structure
3309 * input: Adapter instance soft state
3310 *
3311 * Issues an internal command (DCMD) to get the FW's controller PD
3312 * list structure.
3313 */
3314static int mrsas_sync_map_info(struct mrsas_softc *sc)
3315{
3316 int retcode = 0, i;
3317 struct mrsas_mfi_cmd *cmd;
3318 struct mrsas_dcmd_frame *dcmd;
3319 uint32_t size_sync_info, num_lds;
3320 MR_LD_TARGET_SYNC *target_map = NULL;
3321 MR_DRV_RAID_MAP_ALL *map;
3322 MR_LD_RAID *raid;
3323 MR_LD_TARGET_SYNC *ld_sync;
3324 bus_addr_t map_phys_addr = 0;
3325
3326 cmd = mrsas_get_mfi_cmd(sc);
3327 if (!cmd) {
3328 device_printf(sc->mrsas_dev,
3329 "Cannot alloc for sync map info cmd\n");
3330 return 1;
3331 }
3332
3333 map = sc->ld_drv_map[sc->map_id & 1];
3334 num_lds = map->raidMap.ldCount;
3335
3336 dcmd = &cmd->frame->dcmd;
3337 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
3338 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3339
3340 target_map =
3341 (MR_LD_TARGET_SYNC *)sc->raidmap_mem[(sc->map_id - 1) & 1];
3342 memset(target_map, 0, sc->max_map_sz);
3343
3344 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
3345
3346 ld_sync = (MR_LD_TARGET_SYNC *)target_map;
3347
3348 for (i = 0; i < num_lds; i++, ld_sync++) {
3349 raid = MR_LdRaidGet(i, map);
3350 ld_sync->targetId = MR_GetLDTgtId(i, map);
3351 ld_sync->seqNum = raid->seqNum;
3352 }
3353
3354 dcmd->cmd = MFI_CMD_DCMD;
3355 dcmd->cmd_status = 0xFF;
3356 dcmd->sge_count = 1;
3357 dcmd->flags = MFI_FRAME_DIR_WRITE;
3358 dcmd->timeout = 0;
3359 dcmd->pad_0 = 0;
3360 dcmd->data_xfer_len = sc->current_map_sz;
3361 dcmd->mbox.b[0] = num_lds;
3362 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
3363 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3364 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3365 dcmd->sgl.sge32[0].length = sc->current_map_sz;
3366
3367 sc->map_update_cmd = cmd;
3368 if (mrsas_issue_dcmd(sc, cmd)) {
3369 device_printf(sc->mrsas_dev,
3370 "Fail to send sync map info command.\n");
3371 return(1);
3372 }
3373 return(retcode);
3374}
3375
3376/**
3377 * mrsas_get_pd_list: Returns FW's PD list structure
3378 * input: Adapter soft state
3379 *
3380 * Issues an internal command (DCMD) to get the FW's controller PD
3381 * list structure. This information is mainly used to find out about
3382 * system supported by Firmware.
3383 */
3384static int mrsas_get_pd_list(struct mrsas_softc *sc)
3385{
3386 int retcode = 0, pd_index = 0, pd_count=0, pd_list_size;
3387 struct mrsas_mfi_cmd *cmd;
3388 struct mrsas_dcmd_frame *dcmd;
3389 struct MR_PD_LIST *pd_list_mem;
3390 struct MR_PD_ADDRESS *pd_addr;
3391 bus_addr_t pd_list_phys_addr = 0;
3392 struct mrsas_tmp_dcmd *tcmd;
3393
3394 cmd = mrsas_get_mfi_cmd(sc);
3395 if (!cmd) {
3396 device_printf(sc->mrsas_dev,
3397 "Cannot alloc for get PD list cmd\n");
3398 return 1;
3399 }
3400
3401 dcmd = &cmd->frame->dcmd;
3402
3403 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3404 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3405 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
3406 device_printf(sc->mrsas_dev,
3407 "Cannot alloc dmamap for get PD list cmd\n");
3408 mrsas_release_mfi_cmd(cmd);
3409 return(ENOMEM);
3410 }
3411 else {
3412 pd_list_mem = tcmd->tmp_dcmd_mem;
3413 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3414 }
3415 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3416
3417 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
3418 dcmd->mbox.b[1] = 0;
3419 dcmd->cmd = MFI_CMD_DCMD;
3420 dcmd->cmd_status = 0xFF;
3421 dcmd->sge_count = 1;
3422 dcmd->flags = MFI_FRAME_DIR_READ;
3423 dcmd->timeout = 0;
3424 dcmd->pad_0 = 0;
3425 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3426 dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
3427 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
3428 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3429
3430 if (!mrsas_issue_polled(sc, cmd))
3431 retcode = 0;
3432 else
3433 retcode = 1;
3434
3435 /* Get the instance PD list */
3436 pd_count = MRSAS_MAX_PD;
3437 pd_addr = pd_list_mem->addr;
3438 if (retcode == 0 && pd_list_mem->count < pd_count) {
3439 memset(sc->local_pd_list, 0,
3440 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
3441 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
3442 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
3443 sc->local_pd_list[pd_addr->deviceId].driveType =
3444 pd_addr->scsiDevType;
3445 sc->local_pd_list[pd_addr->deviceId].driveState =
3446 MR_PD_STATE_SYSTEM;
3447 pd_addr++;
3448 }
3449 }
3450
3451 /* Use mutext/spinlock if pd_list component size increase more than 32 bit. */
3452 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
3453 mrsas_free_tmp_dcmd(tcmd);
3454 mrsas_release_mfi_cmd(cmd);
3455 free(tcmd, M_MRSAS);
3456 return(retcode);
3457}
3458
3459/**
3460 * mrsas_get_ld_list: Returns FW's LD list structure
3461 * input: Adapter soft state
3462 *
3463 * Issues an internal command (DCMD) to get the FW's controller PD
3464 * list structure. This information is mainly used to find out about
3465 * supported by the FW.
3466 */
3467static int mrsas_get_ld_list(struct mrsas_softc *sc)
3468{
3469 int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
3470 struct mrsas_mfi_cmd *cmd;
3471 struct mrsas_dcmd_frame *dcmd;
3472 struct MR_LD_LIST *ld_list_mem;
3473 bus_addr_t ld_list_phys_addr = 0;
3474 struct mrsas_tmp_dcmd *tcmd;
3475
3476 cmd = mrsas_get_mfi_cmd(sc);
3477 if (!cmd) {
3478 device_printf(sc->mrsas_dev,
3479 "Cannot alloc for get LD list cmd\n");
3480 return 1;
3481 }
3482
3483 dcmd = &cmd->frame->dcmd;
3484
3485 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3486 ld_list_size = sizeof(struct MR_LD_LIST);
3487 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
3488 device_printf(sc->mrsas_dev,
3489 "Cannot alloc dmamap for get LD list cmd\n");
3490 mrsas_release_mfi_cmd(cmd);
3491 return(ENOMEM);
3492 }
3493 else {
3494 ld_list_mem = tcmd->tmp_dcmd_mem;
3495 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3496 }
3497 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3498
3499 if (sc->max256vdSupport)
3500 dcmd->mbox.b[0]=1;
3501
3502 dcmd->cmd = MFI_CMD_DCMD;
3503 dcmd->cmd_status = 0xFF;
3504 dcmd->sge_count = 1;
3505 dcmd->flags = MFI_FRAME_DIR_READ;
3506 dcmd->timeout = 0;
3507 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
3508 dcmd->opcode = MR_DCMD_LD_GET_LIST;
3509 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
3510 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
3511 dcmd->pad_0 = 0;
3512
3513 if (!mrsas_issue_polled(sc, cmd))
3514 retcode = 0;
3515 else
3516 retcode = 1;
3517
3518#if VD_EXT_DEBUG
3519 printf ("Number of LDs %d\n", ld_list_mem->ldCount);
3520#endif
3521
3522 /* Get the instance LD list */
3523 if ((retcode == 0) &&
3524 (ld_list_mem->ldCount <= sc->fw_supported_vd_count)){
3525 sc->CurLdCount = ld_list_mem->ldCount;
3526 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
3527 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
3528 if (ld_list_mem->ldList[ld_index].state != 0) {
3529 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3530 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3531 }
3532 }
3533 }
3534
3535 mrsas_free_tmp_dcmd(tcmd);
3536 mrsas_release_mfi_cmd(cmd);
3537 free(tcmd, M_MRSAS);
3538 return(retcode);
3539}
3540
3541/**
3542 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command
3543 * input: Adapter soft state
3544 * Temp command
3545 * Size of alloction
3546 *
3547 * Allocates DMAable memory for a temporary internal command. The allocated
3548 * memory is initialized to all zeros upon successful loading of the dma
3549 * mapped memory.
3550 */
3551int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
3552 int size)
3553{
3554 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
3555 1, 0, // algnmnt, boundary
3556 BUS_SPACE_MAXADDR_32BIT,// lowaddr
3557 BUS_SPACE_MAXADDR, // highaddr
3558 NULL, NULL, // filter, filterarg
3559 size, // maxsize
3560 1, // msegments
3561 size, // maxsegsize
3562 BUS_DMA_ALLOCNOW, // flags
3563 NULL, NULL, // lockfunc, lockarg
3564 &tcmd->tmp_dcmd_tag)) {
3565 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
3566 return (ENOMEM);
3567 }
3568 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
3569 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
3570 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
3571 return (ENOMEM);
3572 }
3573 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
3574 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
3575 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
3576 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
3577 return (ENOMEM);
3578 }
3579
3580 memset(tcmd->tmp_dcmd_mem, 0, size);
3581 return (0);
3582}
3583
3584/**
3585 * mrsas_free_tmp_dcmd: Free memory for temporary command
3586 * input: temporary dcmd pointer
3587 *
3588 * Deallocates memory of the temporary command for use in the construction
3589 * of the internal DCMD.
3590 */
3591void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
3592{
3593 if (tmp->tmp_dcmd_phys_addr)
3594 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
3595 if (tmp->tmp_dcmd_mem != NULL)
3596 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
3597 if (tmp->tmp_dcmd_tag != NULL)
3598 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
3599}
3600
3601/**
3602 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd
3603 * input: Adapter soft state
3604 * Previously issued cmd to be aborted
3605 *
3606 * This function is used to abort previously issued commands, such as AEN and
3607 * RAID map sync map commands. The abort command is sent as a DCMD internal
3608 * command and subsequently the driver will wait for a return status. The
3609 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
3610 */
3611static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
3612 struct mrsas_mfi_cmd *cmd_to_abort)
3613{
3614 struct mrsas_mfi_cmd *cmd;
3615 struct mrsas_abort_frame *abort_fr;
3616 u_int8_t retcode = 0;
3617 unsigned long total_time = 0;
3618 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3619
3620 cmd = mrsas_get_mfi_cmd(sc);
3621 if (!cmd) {
3622 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
3623 return(1);
3624 }
3625
3626 abort_fr = &cmd->frame->abort;
3627
3628 /* Prepare and issue the abort frame */
3629 abort_fr->cmd = MFI_CMD_ABORT;
3630 abort_fr->cmd_status = 0xFF;
3631 abort_fr->flags = 0;
3632 abort_fr->abort_context = cmd_to_abort->index;
3633 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
3634 abort_fr->abort_mfi_phys_addr_hi = 0;
3635
3636 cmd->sync_cmd = 1;
3637 cmd->cmd_status = 0xFF;
3638
3639 if (mrsas_issue_dcmd(sc, cmd)) {
3640 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
3641 return(1);
3642 }
3643
3644 /* Wait for this cmd to complete */
3645 sc->chan = (void*)&cmd;
3646 while (1) {
3647 if (cmd->cmd_status == 0xFF){
3648 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3649 }
3650 else
3651 break;
3652 total_time++;
3653 if (total_time >= max_wait) {
3654 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
3655 retcode = 1;
3656 break;
3657 }
3658 }
3659
3660 cmd->sync_cmd = 0;
3661 mrsas_release_mfi_cmd(cmd);
3662 return(retcode);
3663}
3664
3665/**
3666 * mrsas_complete_abort: Completes aborting a command
3667 * input: Adapter soft state
3668 * Cmd that was issued to abort another cmd
3669 *
3670 * The mrsas_issue_blocked_abort_cmd() function waits for the command status
3671 * to change after sending the command. This function is called from
3672 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
3673 */
3674void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3675{
3676 if (cmd->sync_cmd) {
3677 cmd->sync_cmd = 0;
3678 cmd->cmd_status = 0;
3679 sc->chan = (void*)&cmd;
3680 wakeup_one((void *)&sc->chan);
3681 }
3682 return;
3683}
3684
3685/**
3686 * mrsas_aen_handler: Callback function for AEN processing from thread context.
3687 * input: Adapter soft state
3688 *
3689 */
3690void mrsas_aen_handler(struct mrsas_softc *sc)
3691{
3692 union mrsas_evt_class_locale class_locale;
3693 int doscan = 0;
3694 u_int32_t seq_num;
3695 int error;
3696
3697 if (!sc) {
3698 device_printf(sc->mrsas_dev, "invalid instance!\n");
3699 return;
3700 }
3701
3702 if (sc->evt_detail_mem) {
3703 switch (sc->evt_detail_mem->code) {
3704 case MR_EVT_PD_INSERTED:
3705 mrsas_get_pd_list(sc);
3706 mrsas_bus_scan_sim(sc, sc->sim_1);
3707 doscan = 0;
3708 break;
3709 case MR_EVT_PD_REMOVED:
3710 mrsas_get_pd_list(sc);
3711 mrsas_bus_scan_sim(sc, sc->sim_1);
3712 doscan = 0;
3713 break;
3714 case MR_EVT_LD_OFFLINE:
3715 case MR_EVT_CFG_CLEARED:
3716 case MR_EVT_LD_DELETED:
3717 mrsas_bus_scan_sim(sc, sc->sim_0);
3718 doscan = 0;
3719 break;
3720 case MR_EVT_LD_CREATED:
3721 mrsas_get_ld_list(sc);
3722 mrsas_bus_scan_sim(sc, sc->sim_0);
3723 doscan = 0;
3724 break;
3725 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
3726 case MR_EVT_FOREIGN_CFG_IMPORTED:
3727 case MR_EVT_LD_STATE_CHANGE:
3728 doscan = 1;
3729 break;
3730 default:
3731 doscan = 0;
3732 break;
3733 }
3734 } else {
3735 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
3736 return;
3737 }
3738 if (doscan) {
3739 mrsas_get_pd_list(sc);
3740 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
3741 mrsas_bus_scan_sim(sc, sc->sim_1);
3742 mrsas_get_ld_list(sc);
3743 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
3744 mrsas_bus_scan_sim(sc, sc->sim_0);
3745 }
3746
3747 seq_num = sc->evt_detail_mem->seq_num + 1;
3748
3749 // Register AEN with FW for latest sequence number plus 1
3750 class_locale.members.reserved = 0;
3751 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3752 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3753
3754 if (sc->aen_cmd != NULL )
3755 return ;
3756
3757 mtx_lock(&sc->aen_lock);
3758 error = mrsas_register_aen(sc, seq_num,
3759 class_locale.word);
3760 mtx_unlock(&sc->aen_lock);
3761
3762 if (error)
3763 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
3764
3765}
3766
3767
3768/**
3769 * mrsas_complete_aen: Completes AEN command
3770 * input: Adapter soft state
3771 * Cmd that was issued to abort another cmd
3772 *
3773 * This function will be called from ISR and will continue
3774 * event processing from thread context by enqueuing task
3775 * in ev_tq (callback function "mrsas_aen_handler").
3776 */
3777void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3778{
3779 /*
3780 * Don't signal app if it is just an aborted previously registered aen
3781 */
3782 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
3783 /* TO DO (?) */
3784 }
3785 else
3786 cmd->abort_aen = 0;
3787
3788 sc->aen_cmd = NULL;
3789 mrsas_release_mfi_cmd(cmd);
3790
3791 if (!sc->remove_in_progress)
3792 taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
3793
3794 return;
3795}
3796
3797static device_method_t mrsas_methods[] = {
3798 DEVMETHOD(device_probe, mrsas_probe),
3799 DEVMETHOD(device_attach, mrsas_attach),
3800 DEVMETHOD(device_detach, mrsas_detach),
3801 DEVMETHOD(device_suspend, mrsas_suspend),
3802 DEVMETHOD(device_resume, mrsas_resume),
3803 DEVMETHOD(bus_print_child, bus_generic_print_child),
3804 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
3805 { 0, 0 }
3806};
3807
3808static driver_t mrsas_driver = {
3809 "mrsas",
3810 mrsas_methods,
3811 sizeof(struct mrsas_softc)
3812};
3813
3814static devclass_t mrsas_devclass;
3815DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
3816MODULE_DEPEND(mrsas, cam, 1,1,1);
3817