Deleted Added
full compact
1/*
2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 2. Redistributions
12 * in binary form must reproduce the above copyright notice, this list of
13 * conditions and the following disclaimer in the documentation and/or other
14 * materials provided with the distribution. 3. Neither the name of the
15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16 * promote products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * The views and conclusions contained in the software and documentation are
32 * those of the authors and should not be interpreted as representing
33 * official policies,either expressed or implied, of the FreeBSD Project.
34 *
35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
37 *
38 */
39
40#include <sys/cdefs.h>
41__FBSDID("$FreeBSD: head/sys/dev/mrsas/mrsas.c 299669 2016-05-13 12:15:20Z kadesai $");
41__FBSDID("$FreeBSD: head/sys/dev/mrsas/mrsas.c 299670 2016-05-13 12:18:12Z kadesai $");
42
43#include <dev/mrsas/mrsas.h>
44#include <dev/mrsas/mrsas_ioctl.h>
45
46#include <cam/cam.h>
47#include <cam/cam_ccb.h>
48
49#include <sys/sysctl.h>
50#include <sys/types.h>
51#include <sys/sysent.h>
52#include <sys/kthread.h>
53#include <sys/taskqueue.h>
54#include <sys/smp.h>
55
56
57/*
58 * Function prototypes
59 */
60static d_open_t mrsas_open;
61static d_close_t mrsas_close;
62static d_read_t mrsas_read;
63static d_write_t mrsas_write;
64static d_ioctl_t mrsas_ioctl;
65static d_poll_t mrsas_poll;
66
67static void mrsas_ich_startup(void *arg);
68static struct mrsas_mgmt_info mrsas_mgmt_info;
69static struct mrsas_ident *mrsas_find_ident(device_t);
70static int mrsas_setup_msix(struct mrsas_softc *sc);
71static int mrsas_allocate_msix(struct mrsas_softc *sc);
72static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
73static void mrsas_flush_cache(struct mrsas_softc *sc);
74static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
75static void mrsas_ocr_thread(void *arg);
76static int mrsas_get_map_info(struct mrsas_softc *sc);
77static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
78static int mrsas_sync_map_info(struct mrsas_softc *sc);
79static int mrsas_get_pd_list(struct mrsas_softc *sc);
80static int mrsas_get_ld_list(struct mrsas_softc *sc);
81static int mrsas_setup_irq(struct mrsas_softc *sc);
82static int mrsas_alloc_mem(struct mrsas_softc *sc);
83static int mrsas_init_fw(struct mrsas_softc *sc);
84static int mrsas_setup_raidmap(struct mrsas_softc *sc);
85static void megasas_setup_jbod_map(struct mrsas_softc *sc);
86static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
87static int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
88static int mrsas_clear_intr(struct mrsas_softc *sc);
89static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
90static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
91static int
92mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
93 struct mrsas_mfi_cmd *cmd_to_abort);
94static struct mrsas_softc *
95mrsas_get_softc_instance(struct cdev *dev,
96 u_long cmd, caddr_t arg);
97u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
98u_int8_t
99mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
100 struct mrsas_mfi_cmd *mfi_cmd);
101void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
102int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
103int mrsas_init_adapter(struct mrsas_softc *sc);
104int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
105int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
106int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
107int mrsas_ioc_init(struct mrsas_softc *sc);
108int mrsas_bus_scan(struct mrsas_softc *sc);
109int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
111int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
112int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
113int
114mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
115 struct mrsas_mfi_cmd *cmd);
116int
117mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
118 int size);
119void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
120void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
121void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
122void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
123void mrsas_disable_intr(struct mrsas_softc *sc);
124void mrsas_enable_intr(struct mrsas_softc *sc);
125void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
126void mrsas_free_mem(struct mrsas_softc *sc);
127void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
128void mrsas_isr(void *arg);
129void mrsas_teardown_intr(struct mrsas_softc *sc);
130void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
131void mrsas_kill_hba(struct mrsas_softc *sc);
132void mrsas_aen_handler(struct mrsas_softc *sc);
133void
134mrsas_write_reg(struct mrsas_softc *sc, int offset,
135 u_int32_t value);
136void
137mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
138 u_int32_t req_desc_hi);
139void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
140void
141mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
142 struct mrsas_mfi_cmd *cmd, u_int8_t status);
143void
144mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
145 u_int8_t extStatus);
146struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
147
148MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
149 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
150
151extern int mrsas_cam_attach(struct mrsas_softc *sc);
152extern void mrsas_cam_detach(struct mrsas_softc *sc);
153extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
154extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
155extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
156extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
157extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
158extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
159extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
160extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
161extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
162extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
163extern void mrsas_xpt_release(struct mrsas_softc *sc);
164extern MRSAS_REQUEST_DESCRIPTOR_UNION *
165mrsas_get_request_desc(struct mrsas_softc *sc,
166 u_int16_t index);
167extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
168static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
169static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
170
171SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
172
173/*
174 * PCI device struct and table
175 *
176 */
177typedef struct mrsas_ident {
178 uint16_t vendor;
179 uint16_t device;
180 uint16_t subvendor;
181 uint16_t subdevice;
182 const char *desc;
183} MRSAS_CTLR_ID;
184
185MRSAS_CTLR_ID device_table[] = {
186 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
187 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
188 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
189 {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
190 {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
191 {0, 0, 0, 0, NULL}
192};
193
194/*
195 * Character device entry points
196 *
197 */
198static struct cdevsw mrsas_cdevsw = {
199 .d_version = D_VERSION,
200 .d_open = mrsas_open,
201 .d_close = mrsas_close,
202 .d_read = mrsas_read,
203 .d_write = mrsas_write,
204 .d_ioctl = mrsas_ioctl,
205 .d_poll = mrsas_poll,
206 .d_name = "mrsas",
207};
208
209MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
210
211/*
212 * In the cdevsw routines, we find our softc by using the si_drv1 member of
213 * struct cdev. We set this variable to point to our softc in our attach
214 * routine when we create the /dev entry.
215 */
216int
217mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
218{
219 struct mrsas_softc *sc;
220
221 sc = dev->si_drv1;
222 return (0);
223}
224
225int
226mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
227{
228 struct mrsas_softc *sc;
229
230 sc = dev->si_drv1;
231 return (0);
232}
233
234int
235mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
236{
237 struct mrsas_softc *sc;
238
239 sc = dev->si_drv1;
240 return (0);
241}
242int
243mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
244{
245 struct mrsas_softc *sc;
246
247 sc = dev->si_drv1;
248 return (0);
249}
250
251/*
252 * Register Read/Write Functions
253 *
254 */
255void
256mrsas_write_reg(struct mrsas_softc *sc, int offset,
257 u_int32_t value)
258{
259 bus_space_tag_t bus_tag = sc->bus_tag;
260 bus_space_handle_t bus_handle = sc->bus_handle;
261
262 bus_space_write_4(bus_tag, bus_handle, offset, value);
263}
264
265u_int32_t
266mrsas_read_reg(struct mrsas_softc *sc, int offset)
267{
268 bus_space_tag_t bus_tag = sc->bus_tag;
269 bus_space_handle_t bus_handle = sc->bus_handle;
270
271 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
272}
273
274
275/*
276 * Interrupt Disable/Enable/Clear Functions
277 *
278 */
279void
280mrsas_disable_intr(struct mrsas_softc *sc)
281{
282 u_int32_t mask = 0xFFFFFFFF;
283 u_int32_t status;
284
285 sc->mask_interrupts = 1;
286 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
287 /* Dummy read to force pci flush */
288 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
289}
290
291void
292mrsas_enable_intr(struct mrsas_softc *sc)
293{
294 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
295 u_int32_t status;
296
297 sc->mask_interrupts = 0;
298 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
299 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
300
301 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
302 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
303}
304
305static int
306mrsas_clear_intr(struct mrsas_softc *sc)
307{
308 u_int32_t status, fw_status, fw_state;
309
310 /* Read received interrupt */
311 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
312
313 /*
314 * If FW state change interrupt is received, write to it again to
315 * clear
316 */
317 if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
318 fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
319 outbound_scratch_pad));
320 fw_state = fw_status & MFI_STATE_MASK;
321 if (fw_state == MFI_STATE_FAULT) {
322 device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
323 if (sc->ocr_thread_active)
324 wakeup(&sc->ocr_chan);
325 }
326 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
327 mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
328 return (1);
329 }
330 /* Not our interrupt, so just return */
331 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
332 return (0);
333
334 /* We got a reply interrupt */
335 return (1);
336}
337
338/*
339 * PCI Support Functions
340 *
341 */
342static struct mrsas_ident *
343mrsas_find_ident(device_t dev)
344{
345 struct mrsas_ident *pci_device;
346
347 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
348 if ((pci_device->vendor == pci_get_vendor(dev)) &&
349 (pci_device->device == pci_get_device(dev)) &&
350 ((pci_device->subvendor == pci_get_subvendor(dev)) ||
351 (pci_device->subvendor == 0xffff)) &&
352 ((pci_device->subdevice == pci_get_subdevice(dev)) ||
353 (pci_device->subdevice == 0xffff)))
354 return (pci_device);
355 }
356 return (NULL);
357}
358
359static int
360mrsas_probe(device_t dev)
361{
362 static u_int8_t first_ctrl = 1;
363 struct mrsas_ident *id;
364
365 if ((id = mrsas_find_ident(dev)) != NULL) {
366 if (first_ctrl) {
367 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
368 MRSAS_VERSION);
369 first_ctrl = 0;
370 }
371 device_set_desc(dev, id->desc);
372 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
373 return (-30);
374 }
375 return (ENXIO);
376}
377
378/*
379 * mrsas_setup_sysctl: setup sysctl values for mrsas
380 * input: Adapter instance soft state
381 *
382 * Setup sysctl entries for mrsas driver.
383 */
384static void
385mrsas_setup_sysctl(struct mrsas_softc *sc)
386{
387 struct sysctl_ctx_list *sysctl_ctx = NULL;
388 struct sysctl_oid *sysctl_tree = NULL;
389 char tmpstr[80], tmpstr2[80];
390
391 /*
392 * Setup the sysctl variable so the user can change the debug level
393 * on the fly.
394 */
395 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
396 device_get_unit(sc->mrsas_dev));
397 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
398
399 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
400 if (sysctl_ctx != NULL)
401 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
402
403 if (sysctl_tree == NULL) {
404 sysctl_ctx_init(&sc->sysctl_ctx);
405 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
406 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
407 CTLFLAG_RD, 0, tmpstr);
408 if (sc->sysctl_tree == NULL)
409 return;
410 sysctl_ctx = &sc->sysctl_ctx;
411 sysctl_tree = sc->sysctl_tree;
412 }
413 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
414 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
415 "Disable the use of OCR");
416
417 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
418 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
419 strlen(MRSAS_VERSION), "driver version");
420
421 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
422 OID_AUTO, "reset_count", CTLFLAG_RD,
423 &sc->reset_count, 0, "number of ocr from start of the day");
424
425 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
426 OID_AUTO, "fw_outstanding", CTLFLAG_RD,
427 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
428
429 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
430 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
431 &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
432
433 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
434 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
435 "Driver debug level");
436
437 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
438 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
439 0, "Driver IO timeout value in mili-second.");
440
441 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
442 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
443 &sc->mrsas_fw_fault_check_delay,
444 0, "FW fault check thread delay in seconds. <default is 1 sec>");
445
446 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
447 OID_AUTO, "reset_in_progress", CTLFLAG_RD,
448 &sc->reset_in_progress, 0, "ocr in progress status");
449
450}
451
452/*
453 * mrsas_get_tunables: get tunable parameters.
454 * input: Adapter instance soft state
455 *
456 * Get tunable parameters. This will help to debug driver at boot time.
457 */
458static void
459mrsas_get_tunables(struct mrsas_softc *sc)
460{
461 char tmpstr[80];
462
463 /* XXX default to some debugging for now */
464 sc->mrsas_debug = MRSAS_FAULT;
465 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
466 sc->mrsas_fw_fault_check_delay = 1;
467 sc->reset_count = 0;
468 sc->reset_in_progress = 0;
469
470 /*
471 * Grab the global variables.
472 */
473 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
474
475 /*
476 * Grab the global variables.
477 */
478 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
479
480 /* Grab the unit-instance variables */
481 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
482 device_get_unit(sc->mrsas_dev));
483 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
484}
485
486/*
487 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
488 * Used to get sequence number at driver load time.
489 * input: Adapter soft state
490 *
491 * Allocates DMAable memory for the event log info internal command.
492 */
493int
494mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
495{
496 int el_info_size;
497
498 /* Allocate get event log info command */
499 el_info_size = sizeof(struct mrsas_evt_log_info);
500 if (bus_dma_tag_create(sc->mrsas_parent_tag,
501 1, 0,
502 BUS_SPACE_MAXADDR_32BIT,
503 BUS_SPACE_MAXADDR,
504 NULL, NULL,
505 el_info_size,
506 1,
507 el_info_size,
508 BUS_DMA_ALLOCNOW,
509 NULL, NULL,
510 &sc->el_info_tag)) {
511 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
512 return (ENOMEM);
513 }
514 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
515 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
516 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
517 return (ENOMEM);
518 }
519 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
520 sc->el_info_mem, el_info_size, mrsas_addr_cb,
521 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
522 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
523 return (ENOMEM);
524 }
525 memset(sc->el_info_mem, 0, el_info_size);
526 return (0);
527}
528
529/*
530 * mrsas_free_evt_info_cmd: Free memory for Event log info command
531 * input: Adapter soft state
532 *
533 * Deallocates memory for the event log info internal command.
534 */
535void
536mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
537{
538 if (sc->el_info_phys_addr)
539 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
540 if (sc->el_info_mem != NULL)
541 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
542 if (sc->el_info_tag != NULL)
543 bus_dma_tag_destroy(sc->el_info_tag);
544}
545
546/*
547 * mrsas_get_seq_num: Get latest event sequence number
548 * @sc: Adapter soft state
549 * @eli: Firmware event log sequence number information.
550 *
551 * Firmware maintains a log of all events in a non-volatile area.
552 * Driver get the sequence number using DCMD
553 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
554 */
555
556static int
557mrsas_get_seq_num(struct mrsas_softc *sc,
558 struct mrsas_evt_log_info *eli)
559{
560 struct mrsas_mfi_cmd *cmd;
561 struct mrsas_dcmd_frame *dcmd;
562 u_int8_t do_ocr = 1, retcode = 0;
563
564 cmd = mrsas_get_mfi_cmd(sc);
565
566 if (!cmd) {
567 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
568 return -ENOMEM;
569 }
570 dcmd = &cmd->frame->dcmd;
571
572 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
573 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
574 mrsas_release_mfi_cmd(cmd);
575 return -ENOMEM;
576 }
577 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
578
579 dcmd->cmd = MFI_CMD_DCMD;
580 dcmd->cmd_status = 0x0;
581 dcmd->sge_count = 1;
582 dcmd->flags = MFI_FRAME_DIR_READ;
583 dcmd->timeout = 0;
584 dcmd->pad_0 = 0;
585 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
586 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
587 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
588 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
589
590 retcode = mrsas_issue_blocked_cmd(sc, cmd);
591 if (retcode == ETIMEDOUT)
592 goto dcmd_timeout;
593
594 do_ocr = 0;
595 /*
596 * Copy the data back into callers buffer
597 */
598 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
599 mrsas_free_evt_log_info_cmd(sc);
600
601dcmd_timeout:
602 if (do_ocr)
603 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
604 else
605 mrsas_release_mfi_cmd(cmd);
606
607 return retcode;
608}
609
610
611/*
612 * mrsas_register_aen: Register for asynchronous event notification
613 * @sc: Adapter soft state
614 * @seq_num: Starting sequence number
615 * @class_locale: Class of the event
616 *
617 * This function subscribes for events beyond the @seq_num
618 * and type @class_locale.
619 *
620 */
621static int
622mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
623 u_int32_t class_locale_word)
624{
625 int ret_val;
626 struct mrsas_mfi_cmd *cmd;
627 struct mrsas_dcmd_frame *dcmd;
628 union mrsas_evt_class_locale curr_aen;
629 union mrsas_evt_class_locale prev_aen;
630
631 /*
632 * If there an AEN pending already (aen_cmd), check if the
633 * class_locale of that pending AEN is inclusive of the new AEN
634 * request we currently have. If it is, then we don't have to do
635 * anything. In other words, whichever events the current AEN request
636 * is subscribing to, have already been subscribed to. If the old_cmd
637 * is _not_ inclusive, then we have to abort that command, form a
638 * class_locale that is superset of both old and current and re-issue
639 * to the FW
640 */
641
642 curr_aen.word = class_locale_word;
643
644 if (sc->aen_cmd) {
645
646 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
647
648 /*
649 * A class whose enum value is smaller is inclusive of all
650 * higher values. If a PROGRESS (= -1) was previously
651 * registered, then a new registration requests for higher
652 * classes need not be sent to FW. They are automatically
653 * included. Locale numbers don't have such hierarchy. They
654 * are bitmap values
655 */
656 if ((prev_aen.members.class <= curr_aen.members.class) &&
657 !((prev_aen.members.locale & curr_aen.members.locale) ^
658 curr_aen.members.locale)) {
659 /*
660 * Previously issued event registration includes
661 * current request. Nothing to do.
662 */
663 return 0;
664 } else {
665 curr_aen.members.locale |= prev_aen.members.locale;
666
667 if (prev_aen.members.class < curr_aen.members.class)
668 curr_aen.members.class = prev_aen.members.class;
669
670 sc->aen_cmd->abort_aen = 1;
671 ret_val = mrsas_issue_blocked_abort_cmd(sc,
672 sc->aen_cmd);
673
674 if (ret_val) {
675 printf("mrsas: Failed to abort "
676 "previous AEN command\n");
677 return ret_val;
678 }
679 }
680 }
681 cmd = mrsas_get_mfi_cmd(sc);
682
683 if (!cmd)
684 return -ENOMEM;
685
686 dcmd = &cmd->frame->dcmd;
687
688 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
689
690 /*
691 * Prepare DCMD for aen registration
692 */
693 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
694
695 dcmd->cmd = MFI_CMD_DCMD;
696 dcmd->cmd_status = 0x0;
697 dcmd->sge_count = 1;
698 dcmd->flags = MFI_FRAME_DIR_READ;
699 dcmd->timeout = 0;
700 dcmd->pad_0 = 0;
701 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
702 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
703 dcmd->mbox.w[0] = seq_num;
704 sc->last_seq_num = seq_num;
705 dcmd->mbox.w[1] = curr_aen.word;
706 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr;
707 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
708
709 if (sc->aen_cmd != NULL) {
710 mrsas_release_mfi_cmd(cmd);
711 return 0;
712 }
713 /*
714 * Store reference to the cmd used to register for AEN. When an
715 * application wants us to register for AEN, we have to abort this
716 * cmd and re-register with a new EVENT LOCALE supplied by that app
717 */
718 sc->aen_cmd = cmd;
719
720 /*
721 * Issue the aen registration frame
722 */
723 if (mrsas_issue_dcmd(sc, cmd)) {
724 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
725 return (1);
726 }
727 return 0;
728}
729
730/*
731 * mrsas_start_aen: Subscribes to AEN during driver load time
732 * @instance: Adapter soft state
733 */
734static int
735mrsas_start_aen(struct mrsas_softc *sc)
736{
737 struct mrsas_evt_log_info eli;
738 union mrsas_evt_class_locale class_locale;
739
740
741 /* Get the latest sequence number from FW */
742
743 memset(&eli, 0, sizeof(eli));
744
745 if (mrsas_get_seq_num(sc, &eli))
746 return -1;
747
748 /* Register AEN with FW for latest sequence number plus 1 */
749 class_locale.members.reserved = 0;
750 class_locale.members.locale = MR_EVT_LOCALE_ALL;
751 class_locale.members.class = MR_EVT_CLASS_DEBUG;
752
753 return mrsas_register_aen(sc, eli.newest_seq_num + 1,
754 class_locale.word);
755
756}
757
758/*
759 * mrsas_setup_msix: Allocate MSI-x vectors
760 * @sc: adapter soft state
761 */
762static int
763mrsas_setup_msix(struct mrsas_softc *sc)
764{
765 int i;
766
767 for (i = 0; i < sc->msix_vectors; i++) {
768 sc->irq_context[i].sc = sc;
769 sc->irq_context[i].MSIxIndex = i;
770 sc->irq_id[i] = i + 1;
771 sc->mrsas_irq[i] = bus_alloc_resource_any
772 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
773 ,RF_ACTIVE);
774 if (sc->mrsas_irq[i] == NULL) {
775 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
776 goto irq_alloc_failed;
777 }
778 if (bus_setup_intr(sc->mrsas_dev,
779 sc->mrsas_irq[i],
780 INTR_MPSAFE | INTR_TYPE_CAM,
781 NULL, mrsas_isr, &sc->irq_context[i],
782 &sc->intr_handle[i])) {
783 device_printf(sc->mrsas_dev,
784 "Cannot set up MSI-x interrupt handler\n");
785 goto irq_alloc_failed;
786 }
787 }
788 return SUCCESS;
789
790irq_alloc_failed:
791 mrsas_teardown_intr(sc);
792 return (FAIL);
793}
794
795/*
796 * mrsas_allocate_msix: Setup MSI-x vectors
797 * @sc: adapter soft state
798 */
799static int
800mrsas_allocate_msix(struct mrsas_softc *sc)
801{
802 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
803 device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
804 " of vectors\n", sc->msix_vectors);
805 } else {
806 device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
807 goto irq_alloc_failed;
808 }
809 return SUCCESS;
810
811irq_alloc_failed:
812 mrsas_teardown_intr(sc);
813 return (FAIL);
814}
815
816/*
817 * mrsas_attach: PCI entry point
818 * input: pointer to device struct
819 *
820 * Performs setup of PCI and registers, initializes mutexes and linked lists,
821 * registers interrupts and CAM, and initializes the adapter/controller to
822 * its proper state.
823 */
824static int
825mrsas_attach(device_t dev)
826{
827 struct mrsas_softc *sc = device_get_softc(dev);
828 uint32_t cmd, bar, error;
829
830 /* Look up our softc and initialize its fields. */
831 sc->mrsas_dev = dev;
832 sc->device_id = pci_get_device(dev);
833
834 mrsas_get_tunables(sc);
835
836 /*
837 * Set up PCI and registers
838 */
839 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
840 if ((cmd & PCIM_CMD_PORTEN) == 0) {
841 return (ENXIO);
842 }
843 /* Force the busmaster enable bit on. */
844 cmd |= PCIM_CMD_BUSMASTEREN;
845 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
846
847 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
848
849 sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */
850 if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
851 &(sc->reg_res_id), RF_ACTIVE))
852 == NULL) {
853 device_printf(dev, "Cannot allocate PCI registers\n");
854 goto attach_fail;
855 }
856 sc->bus_tag = rman_get_bustag(sc->reg_res);
857 sc->bus_handle = rman_get_bushandle(sc->reg_res);
858
859 /* Intialize mutexes */
860 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
861 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
862 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
863 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
864 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
865 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
866 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
867 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
868
869 /* Intialize linked list */
870 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
871 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
872
873 mrsas_atomic_set(&sc->fw_outstanding, 0);
874
875 sc->io_cmds_highwater = 0;
876
877 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
878 sc->UnevenSpanSupport = 0;
879
880 sc->msix_enable = 0;
881
882 /* Initialize Firmware */
883 if (mrsas_init_fw(sc) != SUCCESS) {
884 goto attach_fail_fw;
885 }
886 /* Register mrsas to CAM layer */
887 if ((mrsas_cam_attach(sc) != SUCCESS)) {
888 goto attach_fail_cam;
889 }
890 /* Register IRQs */
891 if (mrsas_setup_irq(sc) != SUCCESS) {
892 goto attach_fail_irq;
893 }
894 error = mrsas_kproc_create(mrsas_ocr_thread, sc,
895 &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
896 device_get_unit(sc->mrsas_dev));
897 if (error) {
898 device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
899 goto attach_fail_ocr_thread;
900 }
901 /*
902 * After FW initialization and OCR thread creation
903 * we will defer the cdev creation, AEN setup on ICH callback
904 */
905 sc->mrsas_ich.ich_func = mrsas_ich_startup;
906 sc->mrsas_ich.ich_arg = sc;
907 if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
908 device_printf(sc->mrsas_dev, "Config hook is already established\n");
909 }
910 mrsas_setup_sysctl(sc);
911 return SUCCESS;
912
913attach_fail_ocr_thread:
914 if (sc->ocr_thread_active)
915 wakeup(&sc->ocr_chan);
916attach_fail_irq:
917 mrsas_teardown_intr(sc);
918attach_fail_cam:
919 mrsas_cam_detach(sc);
920attach_fail_fw:
921 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */
922 if (sc->msix_enable == 1)
923 pci_release_msi(sc->mrsas_dev);
924 mrsas_free_mem(sc);
925 mtx_destroy(&sc->sim_lock);
926 mtx_destroy(&sc->aen_lock);
927 mtx_destroy(&sc->pci_lock);
928 mtx_destroy(&sc->io_lock);
929 mtx_destroy(&sc->ioctl_lock);
930 mtx_destroy(&sc->mpt_cmd_pool_lock);
931 mtx_destroy(&sc->mfi_cmd_pool_lock);
932 mtx_destroy(&sc->raidmap_lock);
933attach_fail:
934 if (sc->reg_res) {
935 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
936 sc->reg_res_id, sc->reg_res);
937 }
938 return (ENXIO);
939}
940
941/*
942 * Interrupt config hook
943 */
944static void
945mrsas_ich_startup(void *arg)
946{
947 struct mrsas_softc *sc = (struct mrsas_softc *)arg;
948
949 /*
950 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
951 */
952 sema_init(&sc->ioctl_count_sema,
953 MRSAS_MAX_MFI_CMDS - 5,
954 IOCTL_SEMA_DESCRIPTION);
955
956 /* Create a /dev entry for mrsas controller. */
957 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
958 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
959 device_get_unit(sc->mrsas_dev));
960
961 if (device_get_unit(sc->mrsas_dev) == 0) {
962 make_dev_alias_p(MAKEDEV_CHECKNAME,
963 &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
964 "megaraid_sas_ioctl_node");
965 }
966 if (sc->mrsas_cdev)
967 sc->mrsas_cdev->si_drv1 = sc;
968
969 /*
970 * Add this controller to mrsas_mgmt_info structure so that it can be
971 * exported to management applications
972 */
973 if (device_get_unit(sc->mrsas_dev) == 0)
974 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
975
976 mrsas_mgmt_info.count++;
977 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
978 mrsas_mgmt_info.max_index++;
979
980 /* Enable Interrupts */
981 mrsas_enable_intr(sc);
982
983 /* Initiate AEN (Asynchronous Event Notification) */
984 if (mrsas_start_aen(sc)) {
985 device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
986 "Further events from the controller will not be communicated.\n"
987 "Either there is some problem in the controller"
988 "or the controller does not support AEN.\n"
989 "Please contact to the SUPPORT TEAM if the problem persists\n");
990 }
991 if (sc->mrsas_ich.ich_arg != NULL) {
992 device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
993 config_intrhook_disestablish(&sc->mrsas_ich);
994 sc->mrsas_ich.ich_arg = NULL;
995 }
996}
997
998/*
999 * mrsas_detach: De-allocates and teardown resources
1000 * input: pointer to device struct
1001 *
1002 * This function is the entry point for device disconnect and detach.
1003 * It performs memory de-allocations, shutdown of the controller and various
1004 * teardown and destroy resource functions.
1005 */
1006static int
1007mrsas_detach(device_t dev)
1008{
1009 struct mrsas_softc *sc;
1010 int i = 0;
1011
1012 sc = device_get_softc(dev);
1013 sc->remove_in_progress = 1;
1014
1015 /* Destroy the character device so no other IOCTL will be handled */
1016 if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1017 destroy_dev(sc->mrsas_linux_emulator_cdev);
1018 destroy_dev(sc->mrsas_cdev);
1019
1020 /*
1021 * Take the instance off the instance array. Note that we will not
1022 * decrement the max_index. We let this array be sparse array
1023 */
1024 for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1025 if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1026 mrsas_mgmt_info.count--;
1027 mrsas_mgmt_info.sc_ptr[i] = NULL;
1028 break;
1029 }
1030 }
1031
1032 if (sc->ocr_thread_active)
1033 wakeup(&sc->ocr_chan);
1034 while (sc->reset_in_progress) {
1035 i++;
1036 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1037 mrsas_dprint(sc, MRSAS_INFO,
1038 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1039 }
1040 pause("mr_shutdown", hz);
1041 }
1042 i = 0;
1043 while (sc->ocr_thread_active) {
1044 i++;
1045 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1046 mrsas_dprint(sc, MRSAS_INFO,
1047 "[%2d]waiting for "
1048 "mrsas_ocr thread to quit ocr %d\n", i,
1049 sc->ocr_thread_active);
1050 }
1051 pause("mr_shutdown", hz);
1052 }
1053 mrsas_flush_cache(sc);
1054 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1055 mrsas_disable_intr(sc);
1056 mrsas_cam_detach(sc);
1057 mrsas_teardown_intr(sc);
1058 mrsas_free_mem(sc);
1059 mtx_destroy(&sc->sim_lock);
1060 mtx_destroy(&sc->aen_lock);
1061 mtx_destroy(&sc->pci_lock);
1062 mtx_destroy(&sc->io_lock);
1063 mtx_destroy(&sc->ioctl_lock);
1064 mtx_destroy(&sc->mpt_cmd_pool_lock);
1065 mtx_destroy(&sc->mfi_cmd_pool_lock);
1066 mtx_destroy(&sc->raidmap_lock);
1067
1068 /* Wait for all the semaphores to be released */
1069 while (sema_value(&sc->ioctl_count_sema) != (MRSAS_MAX_MFI_CMDS - 5))
1070 pause("mr_shutdown", hz);
1071
1072 /* Destroy the counting semaphore created for Ioctl */
1073 sema_destroy(&sc->ioctl_count_sema);
1074
1075 if (sc->reg_res) {
1076 bus_release_resource(sc->mrsas_dev,
1077 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1078 }
1079 if (sc->sysctl_tree != NULL)
1080 sysctl_ctx_free(&sc->sysctl_ctx);
1081
1082 return (0);
1083}
1084
1085/*
1086 * mrsas_free_mem: Frees allocated memory
1087 * input: Adapter instance soft state
1088 *
1089 * This function is called from mrsas_detach() to free previously allocated
1090 * memory.
1091 */
1092void
1093mrsas_free_mem(struct mrsas_softc *sc)
1094{
1095 int i;
1096 u_int32_t max_cmd;
1097 struct mrsas_mfi_cmd *mfi_cmd;
1098 struct mrsas_mpt_cmd *mpt_cmd;
1099
1100 /*
1101 * Free RAID map memory
1102 */
1103 for (i = 0; i < 2; i++) {
1104 if (sc->raidmap_phys_addr[i])
1105 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1106 if (sc->raidmap_mem[i] != NULL)
1107 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1108 if (sc->raidmap_tag[i] != NULL)
1109 bus_dma_tag_destroy(sc->raidmap_tag[i]);
1110
1111 if (sc->ld_drv_map[i] != NULL)
1112 free(sc->ld_drv_map[i], M_MRSAS);
1113 }
1114 for (i = 0; i < 2; i++) {
1115 if (sc->jbodmap_phys_addr[i])
1116 bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1117 if (sc->jbodmap_mem[i] != NULL)
1118 bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1119 if (sc->jbodmap_tag[i] != NULL)
1120 bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1121 }
1122 /*
1123 * Free version buffer memory
1124 */
1125 if (sc->verbuf_phys_addr)
1126 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1127 if (sc->verbuf_mem != NULL)
1128 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1129 if (sc->verbuf_tag != NULL)
1130 bus_dma_tag_destroy(sc->verbuf_tag);
1131
1132
1133 /*
1134 * Free sense buffer memory
1135 */
1136 if (sc->sense_phys_addr)
1137 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1138 if (sc->sense_mem != NULL)
1139 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1140 if (sc->sense_tag != NULL)
1141 bus_dma_tag_destroy(sc->sense_tag);
1142
1143 /*
1144 * Free chain frame memory
1145 */
1146 if (sc->chain_frame_phys_addr)
1147 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1148 if (sc->chain_frame_mem != NULL)
1149 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1150 if (sc->chain_frame_tag != NULL)
1151 bus_dma_tag_destroy(sc->chain_frame_tag);
1152
1153 /*
1154 * Free IO Request memory
1155 */
1156 if (sc->io_request_phys_addr)
1157 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1158 if (sc->io_request_mem != NULL)
1159 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1160 if (sc->io_request_tag != NULL)
1161 bus_dma_tag_destroy(sc->io_request_tag);
1162
1163 /*
1164 * Free Reply Descriptor memory
1165 */
1166 if (sc->reply_desc_phys_addr)
1167 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1168 if (sc->reply_desc_mem != NULL)
1169 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1170 if (sc->reply_desc_tag != NULL)
1171 bus_dma_tag_destroy(sc->reply_desc_tag);
1172
1173 /*
1174 * Free event detail memory
1175 */
1176 if (sc->evt_detail_phys_addr)
1177 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1178 if (sc->evt_detail_mem != NULL)
1179 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1180 if (sc->evt_detail_tag != NULL)
1181 bus_dma_tag_destroy(sc->evt_detail_tag);
1182
1183 /*
1184 * Free MFI frames
1185 */
1186 if (sc->mfi_cmd_list) {
1187 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1188 mfi_cmd = sc->mfi_cmd_list[i];
1189 mrsas_free_frame(sc, mfi_cmd);
1190 }
1191 }
1192 if (sc->mficmd_frame_tag != NULL)
1193 bus_dma_tag_destroy(sc->mficmd_frame_tag);
1194
1195 /*
1196 * Free MPT internal command list
1197 */
1198 max_cmd = sc->max_fw_cmds;
1199 if (sc->mpt_cmd_list) {
1200 for (i = 0; i < max_cmd; i++) {
1201 mpt_cmd = sc->mpt_cmd_list[i];
1202 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1203 free(sc->mpt_cmd_list[i], M_MRSAS);
1204 }
1205 free(sc->mpt_cmd_list, M_MRSAS);
1206 sc->mpt_cmd_list = NULL;
1207 }
1208 /*
1209 * Free MFI internal command list
1210 */
1211
1212 if (sc->mfi_cmd_list) {
1213 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1214 free(sc->mfi_cmd_list[i], M_MRSAS);
1215 }
1216 free(sc->mfi_cmd_list, M_MRSAS);
1217 sc->mfi_cmd_list = NULL;
1218 }
1219 /*
1220 * Free request descriptor memory
1221 */
1222 free(sc->req_desc, M_MRSAS);
1223 sc->req_desc = NULL;
1224
1225 /*
1226 * Destroy parent tag
1227 */
1228 if (sc->mrsas_parent_tag != NULL)
1229 bus_dma_tag_destroy(sc->mrsas_parent_tag);
1230
1231 /*
1232 * Free ctrl_info memory
1233 */
1234 if (sc->ctrl_info != NULL)
1235 free(sc->ctrl_info, M_MRSAS);
1236}
1237
1238/*
1239 * mrsas_teardown_intr: Teardown interrupt
1240 * input: Adapter instance soft state
1241 *
1242 * This function is called from mrsas_detach() to teardown and release bus
1243 * interrupt resourse.
1244 */
1245void
1246mrsas_teardown_intr(struct mrsas_softc *sc)
1247{
1248 int i;
1249
1250 if (!sc->msix_enable) {
1251 if (sc->intr_handle[0])
1252 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1253 if (sc->mrsas_irq[0] != NULL)
1254 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1255 sc->irq_id[0], sc->mrsas_irq[0]);
1256 sc->intr_handle[0] = NULL;
1257 } else {
1258 for (i = 0; i < sc->msix_vectors; i++) {
1259 if (sc->intr_handle[i])
1260 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1261 sc->intr_handle[i]);
1262
1263 if (sc->mrsas_irq[i] != NULL)
1264 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1265 sc->irq_id[i], sc->mrsas_irq[i]);
1266
1267 sc->intr_handle[i] = NULL;
1268 }
1269 pci_release_msi(sc->mrsas_dev);
1270 }
1271
1272}
1273
1274/*
1275 * mrsas_suspend: Suspend entry point
1276 * input: Device struct pointer
1277 *
1278 * This function is the entry point for system suspend from the OS.
1279 */
1280static int
1281mrsas_suspend(device_t dev)
1282{
1283 struct mrsas_softc *sc;
1284
1285 sc = device_get_softc(dev);
1286 return (0);
1287}
1288
1289/*
1290 * mrsas_resume: Resume entry point
1291 * input: Device struct pointer
1292 *
1293 * This function is the entry point for system resume from the OS.
1294 */
1295static int
1296mrsas_resume(device_t dev)
1297{
1298 struct mrsas_softc *sc;
1299
1300 sc = device_get_softc(dev);
1301 return (0);
1302}
1303
1304/**
1305 * mrsas_get_softc_instance: Find softc instance based on cmd type
1306 *
1307 * This function will return softc instance based on cmd type.
1308 * In some case, application fire ioctl on required management instance and
1309 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1310 * case, else get the softc instance from host_no provided by application in
1311 * user data.
1312 */
1313
1314static struct mrsas_softc *
1315mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1316{
1317 struct mrsas_softc *sc = NULL;
1318 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1319
1320 if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1321 sc = dev->si_drv1;
1322 } else {
1323 /*
1324 * get the Host number & the softc from data sent by the
1325 * Application
1326 */
1327 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1328 if (sc == NULL)
1329 printf("There is no Controller number %d\n",
1330 user_ioc->host_no);
1331 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1332 mrsas_dprint(sc, MRSAS_FAULT,
1333 "Invalid Controller number %d\n", user_ioc->host_no);
1334 }
1335
1336 return sc;
1337}
1338
1339/*
1340 * mrsas_ioctl: IOCtl commands entry point.
1341 *
1342 * This function is the entry point for IOCtls from the OS. It calls the
1343 * appropriate function for processing depending on the command received.
1344 */
1345static int
1346mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
1347 struct thread *td)
1348{
1349 struct mrsas_softc *sc;
1350 int ret = 0, i = 0;
1351 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1352
1353 sc = mrsas_get_softc_instance(dev, cmd, arg);
1354 if (!sc)
1355 return ENOENT;
1356
1357 if (sc->remove_in_progress) {
1358 mrsas_dprint(sc, MRSAS_INFO,
1359 "Driver remove or shutdown called.\n");
1360 return ENOENT;
1361 }
1362 mtx_lock_spin(&sc->ioctl_lock);
1363 if (!sc->reset_in_progress) {
1364 mtx_unlock_spin(&sc->ioctl_lock);
1365 goto do_ioctl;
1366 }
1367 mtx_unlock_spin(&sc->ioctl_lock);
1368 while (sc->reset_in_progress) {
1369 i++;
1370 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1371 mrsas_dprint(sc, MRSAS_INFO,
1372 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1373 }
1374 pause("mr_ioctl", hz);
1375 }
1376
1377do_ioctl:
1378 switch (cmd) {
1379 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1380#ifdef COMPAT_FREEBSD32
1381 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1382#endif
1383 /*
1384 * Decrement the Ioctl counting Semaphore before getting an
1385 * mfi command
1386 */
1387 sema_wait(&sc->ioctl_count_sema);
1388
1389 ret = mrsas_passthru(sc, (void *)arg, cmd);
1390
1391 /* Increment the Ioctl counting semaphore value */
1392 sema_post(&sc->ioctl_count_sema);
1393
1394 break;
1395 case MRSAS_IOC_SCAN_BUS:
1396 ret = mrsas_bus_scan(sc);
1397 break;
1398
1399 case MRSAS_IOC_GET_PCI_INFO:
1400 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1401 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1402 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1403 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1404 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1405 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1406 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1407 "pci device no: %d, pci function no: %d,"
1408 "pci domain ID: %d\n",
1409 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1410 pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1411 ret = 0;
1412 break;
1413
1414 default:
1415 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1416 ret = ENOENT;
1417 }
1418
1419 return (ret);
1420}
1421
1422/*
1423 * mrsas_poll: poll entry point for mrsas driver fd
1424 *
1425 * This function is the entry point for poll from the OS. It waits for some AEN
1426 * events to be triggered from the controller and notifies back.
1427 */
1428static int
1429mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1430{
1431 struct mrsas_softc *sc;
1432 int revents = 0;
1433
1434 sc = dev->si_drv1;
1435
1436 if (poll_events & (POLLIN | POLLRDNORM)) {
1437 if (sc->mrsas_aen_triggered) {
1438 revents |= poll_events & (POLLIN | POLLRDNORM);
1439 }
1440 }
1441 if (revents == 0) {
1442 if (poll_events & (POLLIN | POLLRDNORM)) {
1443 mtx_lock(&sc->aen_lock);
1444 sc->mrsas_poll_waiting = 1;
1445 selrecord(td, &sc->mrsas_select);
1446 mtx_unlock(&sc->aen_lock);
1447 }
1448 }
1449 return revents;
1450}
1451
1452/*
1453 * mrsas_setup_irq: Set up interrupt
1454 * input: Adapter instance soft state
1455 *
1456 * This function sets up interrupts as a bus resource, with flags indicating
1457 * resource permitting contemporaneous sharing and for resource to activate
1458 * atomically.
1459 */
1460static int
1461mrsas_setup_irq(struct mrsas_softc *sc)
1462{
1463 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1464 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1465
1466 else {
1467 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1468 sc->irq_context[0].sc = sc;
1469 sc->irq_context[0].MSIxIndex = 0;
1470 sc->irq_id[0] = 0;
1471 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1472 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1473 if (sc->mrsas_irq[0] == NULL) {
1474 device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1475 "interrupt\n");
1476 return (FAIL);
1477 }
1478 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1479 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1480 &sc->irq_context[0], &sc->intr_handle[0])) {
1481 device_printf(sc->mrsas_dev, "Cannot set up legacy"
1482 "interrupt\n");
1483 return (FAIL);
1484 }
1485 }
1486 return (0);
1487}
1488
1489/*
1490 * mrsas_isr: ISR entry point
1491 * input: argument pointer
1492 *
1493 * This function is the interrupt service routine entry point. There are two
1494 * types of interrupts, state change interrupt and response interrupt. If an
1495 * interrupt is not ours, we just return.
1496 */
1497void
1498mrsas_isr(void *arg)
1499{
1500 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1501 struct mrsas_softc *sc = irq_context->sc;
1502 int status = 0;
1503
1504 if (sc->mask_interrupts)
1505 return;
1506
1507 if (!sc->msix_vectors) {
1508 status = mrsas_clear_intr(sc);
1509 if (!status)
1510 return;
1511 }
1512 /* If we are resetting, bail */
1513 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1514 printf(" Entered into ISR when OCR is going active. \n");
1515 mrsas_clear_intr(sc);
1516 return;
1517 }
1518 /* Process for reply request and clear response interrupt */
1519 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1520 mrsas_clear_intr(sc);
1521
1522 return;
1523}
1524
1525/*
1526 * mrsas_complete_cmd: Process reply request
1527 * input: Adapter instance soft state
1528 *
1529 * This function is called from mrsas_isr() to process reply request and clear
1530 * response interrupt. Processing of the reply request entails walking
1531 * through the reply descriptor array for the command request pended from
1532 * Firmware. We look at the Function field to determine the command type and
1533 * perform the appropriate action. Before we return, we clear the response
1534 * interrupt.
1535 */
1536static int
1537mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1538{
1539 Mpi2ReplyDescriptorsUnion_t *desc;
1540 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1541 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1542 struct mrsas_mpt_cmd *cmd_mpt;
1543 struct mrsas_mfi_cmd *cmd_mfi;
1544 u_int8_t reply_descript_type;
1545 u_int16_t smid, num_completed;
1546 u_int8_t status, extStatus;
1547 union desc_value desc_val;
1548 PLD_LOAD_BALANCE_INFO lbinfo;
1549 u_int32_t device_id;
1550 int threshold_reply_count = 0;
1551
1552
1553 /* If we have a hardware error, not need to continue */
1554 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1555 return (DONE);
1556
1557 desc = sc->reply_desc_mem;
1558 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1559 + sc->last_reply_idx[MSIxIndex];
1560
1561 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1562
1563 desc_val.word = desc->Words;
1564 num_completed = 0;
1565
1566 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1567
1568 /* Find our reply descriptor for the command and process */
1569 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1570 smid = reply_desc->SMID;
1571 cmd_mpt = sc->mpt_cmd_list[smid - 1];
1572 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1573
1574 status = scsi_io_req->RaidContext.status;
1575 extStatus = scsi_io_req->RaidContext.exStatus;
1576
1577 switch (scsi_io_req->Function) {
1578 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */
1579 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1580 lbinfo = &sc->load_balance_info[device_id];
1581 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1582 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1583 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1584 }
1585 /* Fall thru and complete IO */
1586 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1587 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1588 mrsas_cmd_done(sc, cmd_mpt);
1589 scsi_io_req->RaidContext.status = 0;
1590 scsi_io_req->RaidContext.exStatus = 0;
1591 mrsas_atomic_dec(&sc->fw_outstanding);
1592 break;
1593 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */
1594 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1595 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1596 cmd_mpt->flags = 0;
1597 mrsas_release_mpt_cmd(cmd_mpt);
1598 break;
1599 }
1600
1601 sc->last_reply_idx[MSIxIndex]++;
1602 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1603 sc->last_reply_idx[MSIxIndex] = 0;
1604
1605 desc->Words = ~((uint64_t)0x00); /* set it back to all
1606 * 0xFFFFFFFFs */
1607 num_completed++;
1608 threshold_reply_count++;
1609
1610 /* Get the next reply descriptor */
1611 if (!sc->last_reply_idx[MSIxIndex]) {
1612 desc = sc->reply_desc_mem;
1613 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1614 } else
1615 desc++;
1616
1617 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1618 desc_val.word = desc->Words;
1619
1620 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1621
1622 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1623 break;
1624
1625 /*
1626 * Write to reply post index after completing threshold reply
1627 * count and still there are more replies in reply queue
1628 * pending to be completed.
1629 */
1630 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1631 if (sc->msix_enable) {
1632 if ((sc->device_id == MRSAS_INVADER) ||
1631 (sc->device_id == MRSAS_FURY))
1633 (sc->device_id == MRSAS_FURY) ||
1634 (sc->device_id == MRSAS_INTRUDER) ||
1635 (sc->device_id == MRSAS_INTRUDER_24))
1636 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1637 ((MSIxIndex & 0x7) << 24) |
1638 sc->last_reply_idx[MSIxIndex]);
1639 else
1640 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1641 sc->last_reply_idx[MSIxIndex]);
1642 } else
1643 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1644 reply_post_host_index), sc->last_reply_idx[0]);
1645
1646 threshold_reply_count = 0;
1647 }
1648 }
1649
1650 /* No match, just return */
1651 if (num_completed == 0)
1652 return (DONE);
1653
1654 /* Clear response interrupt */
1655 if (sc->msix_enable) {
1656 if ((sc->device_id == MRSAS_INVADER) ||
1653 (sc->device_id == MRSAS_FURY)) {
1657 (sc->device_id == MRSAS_FURY) ||
1658 (sc->device_id == MRSAS_INTRUDER) ||
1659 (sc->device_id == MRSAS_INTRUDER_24)) {
1660 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1661 ((MSIxIndex & 0x7) << 24) |
1662 sc->last_reply_idx[MSIxIndex]);
1663 } else
1664 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1665 sc->last_reply_idx[MSIxIndex]);
1666 } else
1667 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1668 reply_post_host_index), sc->last_reply_idx[0]);
1669
1670 return (0);
1671}
1672
1673/*
1674 * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
1675 * input: Adapter instance soft state
1676 *
1677 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1678 * It checks the command status and maps the appropriate CAM status for the
1679 * CCB.
1680 */
1681void
1682mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1683{
1684 struct mrsas_softc *sc = cmd->sc;
1685 u_int8_t *sense_data;
1686
1687 switch (status) {
1688 case MFI_STAT_OK:
1689 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1690 break;
1691 case MFI_STAT_SCSI_IO_FAILED:
1692 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1693 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1694 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1695 if (sense_data) {
1696 /* For now just copy 18 bytes back */
1697 memcpy(sense_data, cmd->sense, 18);
1698 cmd->ccb_ptr->csio.sense_len = 18;
1699 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1700 }
1701 break;
1702 case MFI_STAT_LD_OFFLINE:
1703 case MFI_STAT_DEVICE_NOT_FOUND:
1704 if (cmd->ccb_ptr->ccb_h.target_lun)
1705 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1706 else
1707 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1708 break;
1709 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1710 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1711 break;
1712 default:
1713 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1714 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1715 cmd->ccb_ptr->csio.scsi_status = status;
1716 }
1717 return;
1718}
1719
1720/*
1721 * mrsas_alloc_mem: Allocate DMAable memory
1722 * input: Adapter instance soft state
1723 *
1724 * This function creates the parent DMA tag and allocates DMAable memory. DMA
1725 * tag describes constraints of DMA mapping. Memory allocated is mapped into
1726 * Kernel virtual address. Callback argument is physical memory address.
1727 */
1728static int
1729mrsas_alloc_mem(struct mrsas_softc *sc)
1730{
1731 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1732 chain_frame_size, evt_detail_size, count;
1733
1734 /*
1735 * Allocate parent DMA tag
1736 */
1737 if (bus_dma_tag_create(NULL, /* parent */
1738 1, /* alignment */
1739 0, /* boundary */
1740 BUS_SPACE_MAXADDR, /* lowaddr */
1741 BUS_SPACE_MAXADDR, /* highaddr */
1742 NULL, NULL, /* filter, filterarg */
1743 MAXPHYS, /* maxsize */
1744 sc->max_num_sge, /* nsegments */
1745 MAXPHYS, /* maxsegsize */
1746 0, /* flags */
1747 NULL, NULL, /* lockfunc, lockarg */
1748 &sc->mrsas_parent_tag /* tag */
1749 )) {
1750 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1751 return (ENOMEM);
1752 }
1753 /*
1754 * Allocate for version buffer
1755 */
1756 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1757 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1758 1, 0,
1759 BUS_SPACE_MAXADDR_32BIT,
1760 BUS_SPACE_MAXADDR,
1761 NULL, NULL,
1762 verbuf_size,
1763 1,
1764 verbuf_size,
1765 BUS_DMA_ALLOCNOW,
1766 NULL, NULL,
1767 &sc->verbuf_tag)) {
1768 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1769 return (ENOMEM);
1770 }
1771 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1772 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1773 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1774 return (ENOMEM);
1775 }
1776 bzero(sc->verbuf_mem, verbuf_size);
1777 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1778 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1779 BUS_DMA_NOWAIT)) {
1780 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1781 return (ENOMEM);
1782 }
1783 /*
1784 * Allocate IO Request Frames
1785 */
1786 io_req_size = sc->io_frames_alloc_sz;
1787 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1788 16, 0,
1789 BUS_SPACE_MAXADDR_32BIT,
1790 BUS_SPACE_MAXADDR,
1791 NULL, NULL,
1792 io_req_size,
1793 1,
1794 io_req_size,
1795 BUS_DMA_ALLOCNOW,
1796 NULL, NULL,
1797 &sc->io_request_tag)) {
1798 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1799 return (ENOMEM);
1800 }
1801 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1802 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1803 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1804 return (ENOMEM);
1805 }
1806 bzero(sc->io_request_mem, io_req_size);
1807 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1808 sc->io_request_mem, io_req_size, mrsas_addr_cb,
1809 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1810 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1811 return (ENOMEM);
1812 }
1813 /*
1814 * Allocate Chain Frames
1815 */
1816 chain_frame_size = sc->chain_frames_alloc_sz;
1817 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1818 4, 0,
1819 BUS_SPACE_MAXADDR_32BIT,
1820 BUS_SPACE_MAXADDR,
1821 NULL, NULL,
1822 chain_frame_size,
1823 1,
1824 chain_frame_size,
1825 BUS_DMA_ALLOCNOW,
1826 NULL, NULL,
1827 &sc->chain_frame_tag)) {
1828 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1829 return (ENOMEM);
1830 }
1831 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1832 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1833 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1834 return (ENOMEM);
1835 }
1836 bzero(sc->chain_frame_mem, chain_frame_size);
1837 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1838 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1839 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1840 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1841 return (ENOMEM);
1842 }
1843 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
1844 /*
1845 * Allocate Reply Descriptor Array
1846 */
1847 reply_desc_size = sc->reply_alloc_sz * count;
1848 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1849 16, 0,
1850 BUS_SPACE_MAXADDR_32BIT,
1851 BUS_SPACE_MAXADDR,
1852 NULL, NULL,
1853 reply_desc_size,
1854 1,
1855 reply_desc_size,
1856 BUS_DMA_ALLOCNOW,
1857 NULL, NULL,
1858 &sc->reply_desc_tag)) {
1859 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1860 return (ENOMEM);
1861 }
1862 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1863 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1864 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1865 return (ENOMEM);
1866 }
1867 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1868 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1869 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1870 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1871 return (ENOMEM);
1872 }
1873 /*
1874 * Allocate Sense Buffer Array. Keep in lower 4GB
1875 */
1876 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1877 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1878 64, 0,
1879 BUS_SPACE_MAXADDR_32BIT,
1880 BUS_SPACE_MAXADDR,
1881 NULL, NULL,
1882 sense_size,
1883 1,
1884 sense_size,
1885 BUS_DMA_ALLOCNOW,
1886 NULL, NULL,
1887 &sc->sense_tag)) {
1888 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1889 return (ENOMEM);
1890 }
1891 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1892 BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1893 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1894 return (ENOMEM);
1895 }
1896 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1897 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1898 BUS_DMA_NOWAIT)) {
1899 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1900 return (ENOMEM);
1901 }
1902 /*
1903 * Allocate for Event detail structure
1904 */
1905 evt_detail_size = sizeof(struct mrsas_evt_detail);
1906 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1907 1, 0,
1908 BUS_SPACE_MAXADDR_32BIT,
1909 BUS_SPACE_MAXADDR,
1910 NULL, NULL,
1911 evt_detail_size,
1912 1,
1913 evt_detail_size,
1914 BUS_DMA_ALLOCNOW,
1915 NULL, NULL,
1916 &sc->evt_detail_tag)) {
1917 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1918 return (ENOMEM);
1919 }
1920 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1921 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1922 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1923 return (ENOMEM);
1924 }
1925 bzero(sc->evt_detail_mem, evt_detail_size);
1926 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1927 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1928 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1929 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1930 return (ENOMEM);
1931 }
1932 /*
1933 * Create a dma tag for data buffers; size will be the maximum
1934 * possible I/O size (280kB).
1935 */
1936 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1937 1,
1938 0,
1939 BUS_SPACE_MAXADDR,
1940 BUS_SPACE_MAXADDR,
1941 NULL, NULL,
1942 MAXPHYS,
1943 sc->max_num_sge, /* nsegments */
1944 MAXPHYS,
1945 BUS_DMA_ALLOCNOW,
1946 busdma_lock_mutex,
1947 &sc->io_lock,
1948 &sc->data_tag)) {
1949 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1950 return (ENOMEM);
1951 }
1952 return (0);
1953}
1954
1955/*
1956 * mrsas_addr_cb: Callback function of bus_dmamap_load()
1957 * input: callback argument, machine dependent type
1958 * that describes DMA segments, number of segments, error code
1959 *
1960 * This function is for the driver to receive mapping information resultant of
1961 * the bus_dmamap_load(). The information is actually not being used, but the
1962 * address is saved anyway.
1963 */
1964void
1965mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1966{
1967 bus_addr_t *addr;
1968
1969 addr = arg;
1970 *addr = segs[0].ds_addr;
1971}
1972
1973/*
1974 * mrsas_setup_raidmap: Set up RAID map.
1975 * input: Adapter instance soft state
1976 *
1977 * Allocate DMA memory for the RAID maps and perform setup.
1978 */
1979static int
1980mrsas_setup_raidmap(struct mrsas_softc *sc)
1981{
1982 int i;
1983
1984 for (i = 0; i < 2; i++) {
1985 sc->ld_drv_map[i] =
1986 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
1987 /* Do Error handling */
1988 if (!sc->ld_drv_map[i]) {
1989 device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
1990
1991 if (i == 1)
1992 free(sc->ld_drv_map[0], M_MRSAS);
1993 /* ABORT driver initialization */
1994 goto ABORT;
1995 }
1996 }
1997
1998 for (int i = 0; i < 2; i++) {
1999 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2000 4, 0,
2001 BUS_SPACE_MAXADDR_32BIT,
2002 BUS_SPACE_MAXADDR,
2003 NULL, NULL,
2004 sc->max_map_sz,
2005 1,
2006 sc->max_map_sz,
2007 BUS_DMA_ALLOCNOW,
2008 NULL, NULL,
2009 &sc->raidmap_tag[i])) {
2010 device_printf(sc->mrsas_dev,
2011 "Cannot allocate raid map tag.\n");
2012 return (ENOMEM);
2013 }
2014 if (bus_dmamem_alloc(sc->raidmap_tag[i],
2015 (void **)&sc->raidmap_mem[i],
2016 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2017 device_printf(sc->mrsas_dev,
2018 "Cannot allocate raidmap memory.\n");
2019 return (ENOMEM);
2020 }
2021 bzero(sc->raidmap_mem[i], sc->max_map_sz);
2022
2023 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2024 sc->raidmap_mem[i], sc->max_map_sz,
2025 mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2026 BUS_DMA_NOWAIT)) {
2027 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2028 return (ENOMEM);
2029 }
2030 if (!sc->raidmap_mem[i]) {
2031 device_printf(sc->mrsas_dev,
2032 "Cannot allocate memory for raid map.\n");
2033 return (ENOMEM);
2034 }
2035 }
2036
2037 if (!mrsas_get_map_info(sc))
2038 mrsas_sync_map_info(sc);
2039
2040 return (0);
2041
2042ABORT:
2043 return (1);
2044}
2045
2046/**
2047 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
2048 * @sc: Adapter soft state
2049 *
2050 * Return 0 on success.
2051 */
2052void
2053megasas_setup_jbod_map(struct mrsas_softc *sc)
2054{
2055 int i;
2056 uint32_t pd_seq_map_sz;
2057
2058 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2059 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2060
2061 if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2062 sc->use_seqnum_jbod_fp = 0;
2063 return;
2064 }
2065 if (sc->jbodmap_mem[0])
2066 goto skip_alloc;
2067
2068 for (i = 0; i < 2; i++) {
2069 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2070 4, 0,
2071 BUS_SPACE_MAXADDR_32BIT,
2072 BUS_SPACE_MAXADDR,
2073 NULL, NULL,
2074 pd_seq_map_sz,
2075 1,
2076 pd_seq_map_sz,
2077 BUS_DMA_ALLOCNOW,
2078 NULL, NULL,
2079 &sc->jbodmap_tag[i])) {
2080 device_printf(sc->mrsas_dev,
2081 "Cannot allocate jbod map tag.\n");
2082 return;
2083 }
2084 if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2085 (void **)&sc->jbodmap_mem[i],
2086 BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2087 device_printf(sc->mrsas_dev,
2088 "Cannot allocate jbod map memory.\n");
2089 return;
2090 }
2091 bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2092
2093 if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2094 sc->jbodmap_mem[i], pd_seq_map_sz,
2095 mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2096 BUS_DMA_NOWAIT)) {
2097 device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2098 return;
2099 }
2100 if (!sc->jbodmap_mem[i]) {
2101 device_printf(sc->mrsas_dev,
2102 "Cannot allocate memory for jbod map.\n");
2103 sc->use_seqnum_jbod_fp = 0;
2104 return;
2105 }
2106 }
2107
2108skip_alloc:
2109 if (!megasas_sync_pd_seq_num(sc, false) &&
2110 !megasas_sync_pd_seq_num(sc, true))
2111 sc->use_seqnum_jbod_fp = 1;
2112 else
2113 sc->use_seqnum_jbod_fp = 0;
2114
2115 device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2116}
2117
2118/*
2119 * mrsas_init_fw: Initialize Firmware
2120 * input: Adapter soft state
2121 *
2122 * Calls transition_to_ready() to make sure Firmware is in operational state and
2123 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It
2124 * issues internal commands to get the controller info after the IOC_INIT
2125 * command response is received by Firmware. Note: code relating to
2126 * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2127 * is left here as placeholder.
2128 */
2129static int
2130mrsas_init_fw(struct mrsas_softc *sc)
2131{
2132
2133 int ret, loop, ocr = 0;
2134 u_int32_t max_sectors_1;
2135 u_int32_t max_sectors_2;
2136 u_int32_t tmp_sectors;
2137 u_int32_t scratch_pad_2;
2138 int msix_enable = 0;
2139 int fw_msix_count = 0;
2140
2141 /* Make sure Firmware is ready */
2142 ret = mrsas_transition_to_ready(sc, ocr);
2143 if (ret != SUCCESS) {
2144 return (ret);
2145 }
2146 /* MSI-x index 0- reply post host index register */
2147 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2148 /* Check if MSI-X is supported while in ready state */
2149 msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2150
2151 if (msix_enable) {
2152 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2153 outbound_scratch_pad_2));
2154
2155 /* Check max MSI-X vectors */
2156 if (sc->device_id == MRSAS_TBOLT) {
2157 sc->msix_vectors = (scratch_pad_2
2158 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2159 fw_msix_count = sc->msix_vectors;
2160 } else {
2161 /* Invader/Fury supports 96 MSI-X vectors */
2162 sc->msix_vectors = ((scratch_pad_2
2163 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2164 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2165 fw_msix_count = sc->msix_vectors;
2166
2167 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2168 loop++) {
2169 sc->msix_reg_offset[loop] =
2170 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2171 (loop * 0x10);
2172 }
2173 }
2174
2175 /* Don't bother allocating more MSI-X vectors than cpus */
2176 sc->msix_vectors = min(sc->msix_vectors,
2177 mp_ncpus);
2178
2179 /* Allocate MSI-x vectors */
2180 if (mrsas_allocate_msix(sc) == SUCCESS)
2181 sc->msix_enable = 1;
2182 else
2183 sc->msix_enable = 0;
2184
2185 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2186 "Online CPU %d Current MSIX <%d>\n",
2187 fw_msix_count, mp_ncpus, sc->msix_vectors);
2188 }
2189 if (mrsas_init_adapter(sc) != SUCCESS) {
2190 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2191 return (1);
2192 }
2193 /* Allocate internal commands for pass-thru */
2194 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2195 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2196 return (1);
2197 }
2198 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2199 if (!sc->ctrl_info) {
2200 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2201 return (1);
2202 }
2203 /*
2204 * Get the controller info from FW, so that the MAX VD support
2205 * availability can be decided.
2206 */
2207 if (mrsas_get_ctrl_info(sc)) {
2208 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2209 return (1);
2210 }
2211 sc->secure_jbod_support =
2212 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2213
2214 if (sc->secure_jbod_support)
2215 device_printf(sc->mrsas_dev, "FW supports SED \n");
2216
2217 if (sc->use_seqnum_jbod_fp)
2218 device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2219
2220 if (mrsas_setup_raidmap(sc) != SUCCESS) {
2221 device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2222 "There seems to be some problem in the controller\n"
2223 "Please contact to the SUPPORT TEAM if the problem persists\n");
2224 }
2225 megasas_setup_jbod_map(sc);
2226
2227 /* For pass-thru, get PD/LD list and controller info */
2228 memset(sc->pd_list, 0,
2229 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2230 if (mrsas_get_pd_list(sc) != SUCCESS) {
2231 device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2232 return (1);
2233 }
2234 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2235 if (mrsas_get_ld_list(sc) != SUCCESS) {
2236 device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2237 return (1);
2238 }
2239 /*
2240 * Compute the max allowed sectors per IO: The controller info has
2241 * two limits on max sectors. Driver should use the minimum of these
2242 * two.
2243 *
2244 * 1 << stripe_sz_ops.min = max sectors per strip
2245 *
2246 * Note that older firmwares ( < FW ver 30) didn't report information to
2247 * calculate max_sectors_1. So the number ended up as zero always.
2248 */
2249 tmp_sectors = 0;
2250 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2251 sc->ctrl_info->max_strips_per_io;
2252 max_sectors_2 = sc->ctrl_info->max_request_size;
2253 tmp_sectors = min(max_sectors_1, max_sectors_2);
2254 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
2255
2256 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2257 sc->max_sectors_per_req = tmp_sectors;
2258
2259 sc->disableOnlineCtrlReset =
2260 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2261 sc->UnevenSpanSupport =
2262 sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2263 if (sc->UnevenSpanSupport) {
2264 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2265 sc->UnevenSpanSupport);
2266
2267 if (MR_ValidateMapInfo(sc))
2268 sc->fast_path_io = 1;
2269 else
2270 sc->fast_path_io = 0;
2271 }
2272 return (0);
2273}
2274
2275/*
2276 * mrsas_init_adapter: Initializes the adapter/controller
2277 * input: Adapter soft state
2278 *
2279 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2280 * ROC/controller. The FW register is read to determined the number of
2281 * commands that is supported. All memory allocations for IO is based on
2282 * max_cmd. Appropriate calculations are performed in this function.
2283 */
2284int
2285mrsas_init_adapter(struct mrsas_softc *sc)
2286{
2287 uint32_t status;
2288 u_int32_t max_cmd, scratch_pad_2;
2289 int ret;
2290 int i = 0;
2291
2292 /* Read FW status register */
2293 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2294
2295 /* Get operational params from status register */
2296 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2297
2298 /* Decrement the max supported by 1, to correlate with FW */
2299 sc->max_fw_cmds = sc->max_fw_cmds - 1;
2300 max_cmd = sc->max_fw_cmds;
2301
2302 /* Determine allocation size of command frames */
2303 sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2;
2304 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
2305 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2306 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
2307 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2308 outbound_scratch_pad_2));
2309 /*
2310 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2311 * Firmware support extended IO chain frame which is 4 time more
2312 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2313 * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
2314 */
2315 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2316 sc->max_chain_frame_sz =
2317 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2318 * MEGASAS_1MB_IO;
2319 else
2320 sc->max_chain_frame_sz =
2321 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2322 * MEGASAS_256K_IO;
2323
2324 sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * max_cmd;
2325 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2326 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2327
2328 sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2329 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2330
2331 mrsas_dprint(sc, MRSAS_INFO, "Avago Debug: MAX sge 0x%X MAX chain frame size 0x%X \n",
2332 sc->max_num_sge, sc->max_chain_frame_sz);
2333
2334 /* Used for pass thru MFI frame (DCMD) */
2335 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2336
2337 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2338 sizeof(MPI2_SGE_IO_UNION)) / 16;
2339
2340 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2341
2342 for (i = 0; i < count; i++)
2343 sc->last_reply_idx[i] = 0;
2344
2345 ret = mrsas_alloc_mem(sc);
2346 if (ret != SUCCESS)
2347 return (ret);
2348
2349 ret = mrsas_alloc_mpt_cmds(sc);
2350 if (ret != SUCCESS)
2351 return (ret);
2352
2353 ret = mrsas_ioc_init(sc);
2354 if (ret != SUCCESS)
2355 return (ret);
2356
2357 return (0);
2358}
2359
2360/*
2361 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
2362 * input: Adapter soft state
2363 *
2364 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2365 */
2366int
2367mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2368{
2369 int ioc_init_size;
2370
2371 /* Allocate IOC INIT command */
2372 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2373 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2374 1, 0,
2375 BUS_SPACE_MAXADDR_32BIT,
2376 BUS_SPACE_MAXADDR,
2377 NULL, NULL,
2378 ioc_init_size,
2379 1,
2380 ioc_init_size,
2381 BUS_DMA_ALLOCNOW,
2382 NULL, NULL,
2383 &sc->ioc_init_tag)) {
2384 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2385 return (ENOMEM);
2386 }
2387 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2388 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2389 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2390 return (ENOMEM);
2391 }
2392 bzero(sc->ioc_init_mem, ioc_init_size);
2393 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2394 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2395 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2396 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2397 return (ENOMEM);
2398 }
2399 return (0);
2400}
2401
2402/*
2403 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
2404 * input: Adapter soft state
2405 *
2406 * Deallocates memory of the IOC Init cmd.
2407 */
2408void
2409mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2410{
2411 if (sc->ioc_init_phys_mem)
2412 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2413 if (sc->ioc_init_mem != NULL)
2414 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2415 if (sc->ioc_init_tag != NULL)
2416 bus_dma_tag_destroy(sc->ioc_init_tag);
2417}
2418
2419/*
2420 * mrsas_ioc_init: Sends IOC Init command to FW
2421 * input: Adapter soft state
2422 *
2423 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2424 */
2425int
2426mrsas_ioc_init(struct mrsas_softc *sc)
2427{
2428 struct mrsas_init_frame *init_frame;
2429 pMpi2IOCInitRequest_t IOCInitMsg;
2430 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2431 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
2432 bus_addr_t phys_addr;
2433 int i, retcode = 0;
2434
2435 /* Allocate memory for the IOC INIT command */
2436 if (mrsas_alloc_ioc_cmd(sc)) {
2437 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2438 return (1);
2439 }
2440 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2441 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2442 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2443 IOCInitMsg->MsgVersion = MPI2_VERSION;
2444 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
2445 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
2446 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
2447 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
2448 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
2449 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2450
2451 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2452 init_frame->cmd = MFI_CMD_INIT;
2453 init_frame->cmd_status = 0xFF;
2454 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2455
2456 /* driver support Extended MSIX */
2457 if ((sc->device_id == MRSAS_INVADER) ||
2452 (sc->device_id == MRSAS_FURY)) {
2458 (sc->device_id == MRSAS_FURY) ||
2459 (sc->device_id == MRSAS_INTRUDER) ||
2460 (sc->device_id == MRSAS_INTRUDER_24)) {
2461 init_frame->driver_operations.
2462 mfi_capabilities.support_additional_msix = 1;
2463 }
2464 if (sc->verbuf_mem) {
2465 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2466 MRSAS_VERSION);
2467 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2468 init_frame->driver_ver_hi = 0;
2469 }
2470 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2471 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2472 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2473 if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2474 init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2475 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2476 init_frame->queue_info_new_phys_addr_lo = phys_addr;
2477 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
2478
2479 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
2480 req_desc.MFAIo.RequestFlags =
2481 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2482
2483 mrsas_disable_intr(sc);
2484 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2485 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2486
2487 /*
2488 * Poll response timer to wait for Firmware response. While this
2489 * timer with the DELAY call could block CPU, the time interval for
2490 * this is only 1 millisecond.
2491 */
2492 if (init_frame->cmd_status == 0xFF) {
2493 for (i = 0; i < (max_wait * 1000); i++) {
2494 if (init_frame->cmd_status == 0xFF)
2495 DELAY(1000);
2496 else
2497 break;
2498 }
2499 }
2500 if (init_frame->cmd_status == 0)
2501 mrsas_dprint(sc, MRSAS_OCR,
2502 "IOC INIT response received from FW.\n");
2503 else {
2504 if (init_frame->cmd_status == 0xFF)
2505 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2506 else
2507 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2508 retcode = 1;
2509 }
2510
2511 mrsas_free_ioc_cmd(sc);
2512 return (retcode);
2513}
2514
2515/*
2516 * mrsas_alloc_mpt_cmds: Allocates the command packets
2517 * input: Adapter instance soft state
2518 *
2519 * This function allocates the internal commands for IOs. Each command that is
2520 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2521 * array is allocated with mrsas_mpt_cmd context. The free commands are
2522 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2523 * max_fw_cmds.
2524 */
2525int
2526mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2527{
2528 int i, j;
2529 u_int32_t max_cmd, count;
2530 struct mrsas_mpt_cmd *cmd;
2531 pMpi2ReplyDescriptorsUnion_t reply_desc;
2532 u_int32_t offset, chain_offset, sense_offset;
2533 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2534 u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2535
2536 max_cmd = sc->max_fw_cmds;
2537
2538 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2539 if (!sc->req_desc) {
2540 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2541 return (ENOMEM);
2542 }
2543 memset(sc->req_desc, 0, sc->request_alloc_sz);
2544
2545 /*
2546 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2547 * Allocate the dynamic array first and then allocate individual
2548 * commands.
2549 */
2550 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT);
2551 if (!sc->mpt_cmd_list) {
2552 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2553 return (ENOMEM);
2554 }
2555 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd);
2556 for (i = 0; i < max_cmd; i++) {
2557 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2558 M_MRSAS, M_NOWAIT);
2559 if (!sc->mpt_cmd_list[i]) {
2560 for (j = 0; j < i; j++)
2561 free(sc->mpt_cmd_list[j], M_MRSAS);
2562 free(sc->mpt_cmd_list, M_MRSAS);
2563 sc->mpt_cmd_list = NULL;
2564 return (ENOMEM);
2565 }
2566 }
2567
2568 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2569 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2570 chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2571 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2572 sense_base = (u_int8_t *)sc->sense_mem;
2573 sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2574 for (i = 0; i < max_cmd; i++) {
2575 cmd = sc->mpt_cmd_list[i];
2576 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2577 chain_offset = sc->max_chain_frame_sz * i;
2578 sense_offset = MRSAS_SENSE_LEN * i;
2579 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2580 cmd->index = i + 1;
2581 cmd->ccb_ptr = NULL;
2582 callout_init(&cmd->cm_callout, 0);
2583 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2584 cmd->sc = sc;
2585 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2586 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2587 cmd->io_request_phys_addr = io_req_base_phys + offset;
2588 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2589 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2590 cmd->sense = sense_base + sense_offset;
2591 cmd->sense_phys_addr = sense_base_phys + sense_offset;
2592 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2593 return (FAIL);
2594 }
2595 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2596 }
2597
2598 /* Initialize reply descriptor array to 0xFFFFFFFF */
2599 reply_desc = sc->reply_desc_mem;
2600 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2601 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2602 reply_desc->Words = MRSAS_ULONG_MAX;
2603 }
2604 return (0);
2605}
2606
2607/*
2608 * mrsas_fire_cmd: Sends command to FW
2609 * input: Adapter softstate
2610 * request descriptor address low
2611 * request descriptor address high
2612 *
2613 * This functions fires the command to Firmware by writing to the
2614 * inbound_low_queue_port and inbound_high_queue_port.
2615 */
2616void
2617mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2618 u_int32_t req_desc_hi)
2619{
2620 mtx_lock(&sc->pci_lock);
2621 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2622 req_desc_lo);
2623 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2624 req_desc_hi);
2625 mtx_unlock(&sc->pci_lock);
2626}
2627
2628/*
2629 * mrsas_transition_to_ready: Move FW to Ready state input:
2630 * Adapter instance soft state
2631 *
2632 * During the initialization, FW passes can potentially be in any one of several
2633 * possible states. If the FW in operational, waiting-for-handshake states,
2634 * driver must take steps to bring it to ready state. Otherwise, it has to
2635 * wait for the ready state.
2636 */
2637int
2638mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2639{
2640 int i;
2641 u_int8_t max_wait;
2642 u_int32_t val, fw_state;
2643 u_int32_t cur_state;
2644 u_int32_t abs_state, curr_abs_state;
2645
2646 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2647 fw_state = val & MFI_STATE_MASK;
2648 max_wait = MRSAS_RESET_WAIT_TIME;
2649
2650 if (fw_state != MFI_STATE_READY)
2651 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2652
2653 while (fw_state != MFI_STATE_READY) {
2654 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2655 switch (fw_state) {
2656 case MFI_STATE_FAULT:
2657 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2658 if (ocr) {
2659 cur_state = MFI_STATE_FAULT;
2660 break;
2661 } else
2662 return -ENODEV;
2663 case MFI_STATE_WAIT_HANDSHAKE:
2664 /* Set the CLR bit in inbound doorbell */
2665 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2666 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2667 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2668 break;
2669 case MFI_STATE_BOOT_MESSAGE_PENDING:
2670 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2671 MFI_INIT_HOTPLUG);
2672 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2673 break;
2674 case MFI_STATE_OPERATIONAL:
2675 /*
2676 * Bring it to READY state; assuming max wait 10
2677 * secs
2678 */
2679 mrsas_disable_intr(sc);
2680 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2681 for (i = 0; i < max_wait * 1000; i++) {
2682 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2683 DELAY(1000);
2684 else
2685 break;
2686 }
2687 cur_state = MFI_STATE_OPERATIONAL;
2688 break;
2689 case MFI_STATE_UNDEFINED:
2690 /*
2691 * This state should not last for more than 2
2692 * seconds
2693 */
2694 cur_state = MFI_STATE_UNDEFINED;
2695 break;
2696 case MFI_STATE_BB_INIT:
2697 cur_state = MFI_STATE_BB_INIT;
2698 break;
2699 case MFI_STATE_FW_INIT:
2700 cur_state = MFI_STATE_FW_INIT;
2701 break;
2702 case MFI_STATE_FW_INIT_2:
2703 cur_state = MFI_STATE_FW_INIT_2;
2704 break;
2705 case MFI_STATE_DEVICE_SCAN:
2706 cur_state = MFI_STATE_DEVICE_SCAN;
2707 break;
2708 case MFI_STATE_FLUSH_CACHE:
2709 cur_state = MFI_STATE_FLUSH_CACHE;
2710 break;
2711 default:
2712 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2713 return -ENODEV;
2714 }
2715
2716 /*
2717 * The cur_state should not last for more than max_wait secs
2718 */
2719 for (i = 0; i < (max_wait * 1000); i++) {
2720 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2721 outbound_scratch_pad)) & MFI_STATE_MASK);
2722 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2723 outbound_scratch_pad));
2724 if (abs_state == curr_abs_state)
2725 DELAY(1000);
2726 else
2727 break;
2728 }
2729
2730 /*
2731 * Return error if fw_state hasn't changed after max_wait
2732 */
2733 if (curr_abs_state == abs_state) {
2734 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2735 "in %d secs\n", fw_state, max_wait);
2736 return -ENODEV;
2737 }
2738 }
2739 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2740 return 0;
2741}
2742
2743/*
2744 * mrsas_get_mfi_cmd: Get a cmd from free command pool
2745 * input: Adapter soft state
2746 *
2747 * This function removes an MFI command from the command list.
2748 */
2749struct mrsas_mfi_cmd *
2750mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2751{
2752 struct mrsas_mfi_cmd *cmd = NULL;
2753
2754 mtx_lock(&sc->mfi_cmd_pool_lock);
2755 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
2756 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2757 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2758 }
2759 mtx_unlock(&sc->mfi_cmd_pool_lock);
2760
2761 return cmd;
2762}
2763
2764/*
2765 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter.
2766 * input: Adapter Context.
2767 *
2768 * This function will check FW status register and flag do_timeout_reset flag.
2769 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
2770 * trigger reset.
2771 */
2772static void
2773mrsas_ocr_thread(void *arg)
2774{
2775 struct mrsas_softc *sc;
2776 u_int32_t fw_status, fw_state;
2777
2778 sc = (struct mrsas_softc *)arg;
2779
2780 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2781
2782 sc->ocr_thread_active = 1;
2783 mtx_lock(&sc->sim_lock);
2784 for (;;) {
2785 /* Sleep for 1 second and check the queue status */
2786 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2787 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2788 if (sc->remove_in_progress ||
2789 sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2790 mrsas_dprint(sc, MRSAS_OCR,
2791 "Exit due to %s from %s\n",
2792 sc->remove_in_progress ? "Shutdown" :
2793 "Hardware critical error", __func__);
2794 break;
2795 }
2796 fw_status = mrsas_read_reg(sc,
2797 offsetof(mrsas_reg_set, outbound_scratch_pad));
2798 fw_state = fw_status & MFI_STATE_MASK;
2799 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
2800 device_printf(sc->mrsas_dev, "%s started due to %s!\n",
2801 sc->disableOnlineCtrlReset ? "Kill Adapter" : "OCR",
2802 sc->do_timedout_reset ? "IO Timeout" :
2803 "FW fault detected");
2804 mtx_lock_spin(&sc->ioctl_lock);
2805 sc->reset_in_progress = 1;
2806 sc->reset_count++;
2807 mtx_unlock_spin(&sc->ioctl_lock);
2808 mrsas_xpt_freeze(sc);
2809 mrsas_reset_ctrl(sc, sc->do_timedout_reset);
2810 mrsas_xpt_release(sc);
2811 sc->reset_in_progress = 0;
2812 sc->do_timedout_reset = 0;
2813 }
2814 }
2815 mtx_unlock(&sc->sim_lock);
2816 sc->ocr_thread_active = 0;
2817 mrsas_kproc_exit(0);
2818}
2819
2820/*
2821 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR.
2822 * input: Adapter Context.
2823 *
2824 * This function will clear reply descriptor so that post OCR driver and FW will
2825 * lost old history.
2826 */
2827void
2828mrsas_reset_reply_desc(struct mrsas_softc *sc)
2829{
2830 int i, count;
2831 pMpi2ReplyDescriptorsUnion_t reply_desc;
2832
2833 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2834 for (i = 0; i < count; i++)
2835 sc->last_reply_idx[i] = 0;
2836
2837 reply_desc = sc->reply_desc_mem;
2838 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2839 reply_desc->Words = MRSAS_ULONG_MAX;
2840 }
2841}
2842
2843/*
2844 * mrsas_reset_ctrl: Core function to OCR/Kill adapter.
2845 * input: Adapter Context.
2846 *
2847 * This function will run from thread context so that it can sleep. 1. Do not
2848 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
2849 * to complete for 180 seconds. 3. If #2 does not find any outstanding
2850 * command Controller is in working state, so skip OCR. Otherwise, do
2851 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
2852 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
2853 * OCR, Re-fire Management command and move Controller to Operation state.
2854 */
2855int
2856mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
2857{
2858 int retval = SUCCESS, i, j, retry = 0;
2859 u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2860 union ccb *ccb;
2861 struct mrsas_mfi_cmd *mfi_cmd;
2862 struct mrsas_mpt_cmd *mpt_cmd;
2863 union mrsas_evt_class_locale class_locale;
2864
2865 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2866 device_printf(sc->mrsas_dev,
2867 "mrsas: Hardware critical error, returning FAIL.\n");
2868 return FAIL;
2869 }
2870 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2871 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2872 mrsas_disable_intr(sc);
2873 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
2874 sc->mrsas_fw_fault_check_delay * hz);
2875
2876 /* First try waiting for commands to complete */
2877 if (mrsas_wait_for_outstanding(sc, reset_reason)) {
2878 mrsas_dprint(sc, MRSAS_OCR,
2879 "resetting adapter from %s.\n",
2880 __func__);
2881 /* Now return commands back to the CAM layer */
2882 mtx_unlock(&sc->sim_lock);
2883 for (i = 0; i < sc->max_fw_cmds; i++) {
2884 mpt_cmd = sc->mpt_cmd_list[i];
2885 if (mpt_cmd->ccb_ptr) {
2886 ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2887 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2888 mrsas_cmd_done(sc, mpt_cmd);
2889 mrsas_atomic_dec(&sc->fw_outstanding);
2890 }
2891 }
2892 mtx_lock(&sc->sim_lock);
2893
2894 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2895 outbound_scratch_pad));
2896 abs_state = status_reg & MFI_STATE_MASK;
2897 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2898 if (sc->disableOnlineCtrlReset ||
2899 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2900 /* Reset not supported, kill adapter */
2901 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
2902 mrsas_kill_hba(sc);
2903 retval = FAIL;
2904 goto out;
2905 }
2906 /* Now try to reset the chip */
2907 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2908 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2909 MPI2_WRSEQ_FLUSH_KEY_VALUE);
2910 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2911 MPI2_WRSEQ_1ST_KEY_VALUE);
2912 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2913 MPI2_WRSEQ_2ND_KEY_VALUE);
2914 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2915 MPI2_WRSEQ_3RD_KEY_VALUE);
2916 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2917 MPI2_WRSEQ_4TH_KEY_VALUE);
2918 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2919 MPI2_WRSEQ_5TH_KEY_VALUE);
2920 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2921 MPI2_WRSEQ_6TH_KEY_VALUE);
2922
2923 /* Check that the diag write enable (DRWE) bit is on */
2924 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2925 fusion_host_diag));
2926 retry = 0;
2927 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2928 DELAY(100 * 1000);
2929 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2930 fusion_host_diag));
2931 if (retry++ == 100) {
2932 mrsas_dprint(sc, MRSAS_OCR,
2933 "Host diag unlock failed!\n");
2934 break;
2935 }
2936 }
2937 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2938 continue;
2939
2940 /* Send chip reset command */
2941 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
2942 host_diag | HOST_DIAG_RESET_ADAPTER);
2943 DELAY(3000 * 1000);
2944
2945 /* Make sure reset adapter bit is cleared */
2946 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2947 fusion_host_diag));
2948 retry = 0;
2949 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2950 DELAY(100 * 1000);
2951 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2952 fusion_host_diag));
2953 if (retry++ == 1000) {
2954 mrsas_dprint(sc, MRSAS_OCR,
2955 "Diag reset adapter never cleared!\n");
2956 break;
2957 }
2958 }
2959 if (host_diag & HOST_DIAG_RESET_ADAPTER)
2960 continue;
2961
2962 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2963 outbound_scratch_pad)) & MFI_STATE_MASK;
2964 retry = 0;
2965
2966 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2967 DELAY(100 * 1000);
2968 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2969 outbound_scratch_pad)) & MFI_STATE_MASK;
2970 }
2971 if (abs_state <= MFI_STATE_FW_INIT) {
2972 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
2973 " state = 0x%x\n", abs_state);
2974 continue;
2975 }
2976 /* Wait for FW to become ready */
2977 if (mrsas_transition_to_ready(sc, 1)) {
2978 mrsas_dprint(sc, MRSAS_OCR,
2979 "mrsas: Failed to transition controller to ready.\n");
2980 continue;
2981 }
2982 mrsas_reset_reply_desc(sc);
2983 if (mrsas_ioc_init(sc)) {
2984 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
2985 continue;
2986 }
2987 for (j = 0; j < sc->max_fw_cmds; j++) {
2988 mpt_cmd = sc->mpt_cmd_list[j];
2989 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2990 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
2991 mrsas_release_mfi_cmd(mfi_cmd);
2992 mrsas_release_mpt_cmd(mpt_cmd);
2993 }
2994 }
2995
2996 sc->aen_cmd = NULL;
2997
2998 /* Reset load balance info */
2999 memset(sc->load_balance_info, 0,
3000 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
3001
3002 if (mrsas_get_ctrl_info(sc)) {
3003 mrsas_kill_hba(sc);
3004 retval = FAIL;
3005 goto out;
3006 }
3007 if (!mrsas_get_map_info(sc))
3008 mrsas_sync_map_info(sc);
3009
3010 megasas_setup_jbod_map(sc);
3011
3012 memset(sc->pd_list, 0,
3013 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
3014 if (mrsas_get_pd_list(sc) != SUCCESS) {
3015 device_printf(sc->mrsas_dev, "Get PD list failed from OCR.\n"
3016 "Will get the latest PD LIST after OCR on event.\n");
3017 }
3018 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
3019 if (mrsas_get_ld_list(sc) != SUCCESS) {
3020 device_printf(sc->mrsas_dev, "Get LD lsit failed from OCR.\n"
3021 "Will get the latest LD LIST after OCR on event.\n");
3022 }
3023
3024 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3025 mrsas_enable_intr(sc);
3026 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3027
3028 /* Register AEN with FW for last sequence number */
3029 class_locale.members.reserved = 0;
3030 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3031 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3032
3033 if (mrsas_register_aen(sc, sc->last_seq_num,
3034 class_locale.word)) {
3035 device_printf(sc->mrsas_dev,
3036 "ERROR: AEN registration FAILED from OCR !!! "
3037 "Further events from the controller cannot be notified."
3038 "Either there is some problem in the controller"
3039 "or the controller does not support AEN.\n"
3040 "Please contact to the SUPPORT TEAM if the problem persists\n");
3041 }
3042 /* Adapter reset completed successfully */
3043 device_printf(sc->mrsas_dev, "Reset successful\n");
3044 retval = SUCCESS;
3045 goto out;
3046 }
3047 /* Reset failed, kill the adapter */
3048 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3049 mrsas_kill_hba(sc);
3050 retval = FAIL;
3051 } else {
3052 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3053 mrsas_enable_intr(sc);
3054 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3055 }
3056out:
3057 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3058 mrsas_dprint(sc, MRSAS_OCR,
3059 "Reset Exit with %d.\n", retval);
3060 return retval;
3061}
3062
3063/*
3064 * mrsas_kill_hba: Kill HBA when OCR is not supported
3065 * input: Adapter Context.
3066 *
3067 * This function will kill HBA when OCR is not supported.
3068 */
3069void
3070mrsas_kill_hba(struct mrsas_softc *sc)
3071{
3072 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3073 DELAY(1000 * 1000);
3074 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3075 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3076 MFI_STOP_ADP);
3077 /* Flush */
3078 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3079 mrsas_complete_outstanding_ioctls(sc);
3080}
3081
3082/**
3083 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba
3084 * input: Controller softc
3085 *
3086 * Returns void
3087 */
3088void
3089mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3090{
3091 int i;
3092 struct mrsas_mpt_cmd *cmd_mpt;
3093 struct mrsas_mfi_cmd *cmd_mfi;
3094 u_int32_t count, MSIxIndex;
3095
3096 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3097 for (i = 0; i < sc->max_fw_cmds; i++) {
3098 cmd_mpt = sc->mpt_cmd_list[i];
3099
3100 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3101 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3102 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3103 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3104 mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3105 cmd_mpt->io_request->RaidContext.status);
3106 }
3107 }
3108 }
3109}
3110
3111/*
3112 * mrsas_wait_for_outstanding: Wait for outstanding commands
3113 * input: Adapter Context.
3114 *
3115 * This function will wait for 180 seconds for outstanding commands to be
3116 * completed.
3117 */
3118int
3119mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3120{
3121 int i, outstanding, retval = 0;
3122 u_int32_t fw_state, count, MSIxIndex;
3123
3124
3125 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3126 if (sc->remove_in_progress) {
3127 mrsas_dprint(sc, MRSAS_OCR,
3128 "Driver remove or shutdown called.\n");
3129 retval = 1;
3130 goto out;
3131 }
3132 /* Check if firmware is in fault state */
3133 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3134 outbound_scratch_pad)) & MFI_STATE_MASK;
3135 if (fw_state == MFI_STATE_FAULT) {
3136 mrsas_dprint(sc, MRSAS_OCR,
3137 "Found FW in FAULT state, will reset adapter.\n");
3138 retval = 1;
3139 goto out;
3140 }
3141 if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3142 mrsas_dprint(sc, MRSAS_OCR,
3143 "DCMD IO TIMEOUT detected, will reset adapter.\n");
3144 retval = 1;
3145 goto out;
3146 }
3147 outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3148 if (!outstanding)
3149 goto out;
3150
3151 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3152 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3153 "commands to complete\n", i, outstanding);
3154 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3155 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3156 mrsas_complete_cmd(sc, MSIxIndex);
3157 }
3158 DELAY(1000 * 1000);
3159 }
3160
3161 if (mrsas_atomic_read(&sc->fw_outstanding)) {
3162 mrsas_dprint(sc, MRSAS_OCR,
3163 " pending commands remain after waiting,"
3164 " will reset adapter.\n");
3165 retval = 1;
3166 }
3167out:
3168 return retval;
3169}
3170
3171/*
3172 * mrsas_release_mfi_cmd: Return a cmd to free command pool
3173 * input: Command packet for return to free cmd pool
3174 *
3175 * This function returns the MFI command to the command list.
3176 */
3177void
3178mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
3179{
3180 struct mrsas_softc *sc = cmd->sc;
3181
3182 mtx_lock(&sc->mfi_cmd_pool_lock);
3183 cmd->ccb_ptr = NULL;
3184 cmd->cmd_id.frame_count = 0;
3185 TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
3186 mtx_unlock(&sc->mfi_cmd_pool_lock);
3187
3188 return;
3189}
3190
3191/*
3192 * mrsas_get_controller_info: Returns FW's controller structure
3193 * input: Adapter soft state
3194 * Controller information structure
3195 *
3196 * Issues an internal command (DCMD) to get the FW's controller structure. This
3197 * information is mainly used to find out the maximum IO transfer per command
3198 * supported by the FW.
3199 */
3200static int
3201mrsas_get_ctrl_info(struct mrsas_softc *sc)
3202{
3203 int retcode = 0;
3204 u_int8_t do_ocr = 1;
3205 struct mrsas_mfi_cmd *cmd;
3206 struct mrsas_dcmd_frame *dcmd;
3207
3208 cmd = mrsas_get_mfi_cmd(sc);
3209
3210 if (!cmd) {
3211 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3212 return -ENOMEM;
3213 }
3214 dcmd = &cmd->frame->dcmd;
3215
3216 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3217 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3218 mrsas_release_mfi_cmd(cmd);
3219 return -ENOMEM;
3220 }
3221 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3222
3223 dcmd->cmd = MFI_CMD_DCMD;
3224 dcmd->cmd_status = 0xFF;
3225 dcmd->sge_count = 1;
3226 dcmd->flags = MFI_FRAME_DIR_READ;
3227 dcmd->timeout = 0;
3228 dcmd->pad_0 = 0;
3229 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
3230 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
3231 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
3232 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
3233
3234 retcode = mrsas_issue_polled(sc, cmd);
3235 if (retcode == ETIMEDOUT)
3236 goto dcmd_timeout;
3237 else
3238 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3239
3240 do_ocr = 0;
3241 mrsas_update_ext_vd_details(sc);
3242
3243 sc->use_seqnum_jbod_fp =
3244 sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3245
3246dcmd_timeout:
3247 mrsas_free_ctlr_info_cmd(sc);
3248
3249 if (do_ocr)
3250 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3251 else
3252 mrsas_release_mfi_cmd(cmd);
3253
3254 return (retcode);
3255}
3256
3257/*
3258 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3259 * input:
3260 * sc - Controller's softc
3261*/
3262static void
3263mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3264{
3265 sc->max256vdSupport =
3266 sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3267 /* Below is additional check to address future FW enhancement */
3268 if (sc->ctrl_info->max_lds > 64)
3269 sc->max256vdSupport = 1;
3270
3271 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3272 * MRSAS_MAX_DEV_PER_CHANNEL;
3273 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3274 * MRSAS_MAX_DEV_PER_CHANNEL;
3275 if (sc->max256vdSupport) {
3276 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3277 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3278 } else {
3279 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3280 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3281 }
3282
3283 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3284 (sizeof(MR_LD_SPAN_MAP) *
3285 (sc->fw_supported_vd_count - 1));
3286 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3287 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
3288 (sizeof(MR_LD_SPAN_MAP) *
3289 (sc->drv_supported_vd_count - 1));
3290
3291 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3292
3293 if (sc->max256vdSupport)
3294 sc->current_map_sz = sc->new_map_sz;
3295 else
3296 sc->current_map_sz = sc->old_map_sz;
3297}
3298
3299/*
3300 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
3301 * input: Adapter soft state
3302 *
3303 * Allocates DMAable memory for the controller info internal command.
3304 */
3305int
3306mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3307{
3308 int ctlr_info_size;
3309
3310 /* Allocate get controller info command */
3311 ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3312 if (bus_dma_tag_create(sc->mrsas_parent_tag,
3313 1, 0,
3314 BUS_SPACE_MAXADDR_32BIT,
3315 BUS_SPACE_MAXADDR,
3316 NULL, NULL,
3317 ctlr_info_size,
3318 1,
3319 ctlr_info_size,
3320 BUS_DMA_ALLOCNOW,
3321 NULL, NULL,
3322 &sc->ctlr_info_tag)) {
3323 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3324 return (ENOMEM);
3325 }
3326 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3327 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3328 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3329 return (ENOMEM);
3330 }
3331 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3332 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3333 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3334 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3335 return (ENOMEM);
3336 }
3337 memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3338 return (0);
3339}
3340
3341/*
3342 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
3343 * input: Adapter soft state
3344 *
3345 * Deallocates memory of the get controller info cmd.
3346 */
3347void
3348mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3349{
3350 if (sc->ctlr_info_phys_addr)
3351 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3352 if (sc->ctlr_info_mem != NULL)
3353 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3354 if (sc->ctlr_info_tag != NULL)
3355 bus_dma_tag_destroy(sc->ctlr_info_tag);
3356}
3357
3358/*
3359 * mrsas_issue_polled: Issues a polling command
3360 * inputs: Adapter soft state
3361 * Command packet to be issued
3362 *
3363 * This function is for posting of internal commands to Firmware. MFI requires
3364 * the cmd_status to be set to 0xFF before posting. The maximun wait time of
3365 * the poll response timer is 180 seconds.
3366 */
3367int
3368mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3369{
3370 struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3371 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3372 int i, retcode = SUCCESS;
3373
3374 frame_hdr->cmd_status = 0xFF;
3375 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3376
3377 /* Issue the frame using inbound queue port */
3378 if (mrsas_issue_dcmd(sc, cmd)) {
3379 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3380 return (1);
3381 }
3382 /*
3383 * Poll response timer to wait for Firmware response. While this
3384 * timer with the DELAY call could block CPU, the time interval for
3385 * this is only 1 millisecond.
3386 */
3387 if (frame_hdr->cmd_status == 0xFF) {
3388 for (i = 0; i < (max_wait * 1000); i++) {
3389 if (frame_hdr->cmd_status == 0xFF)
3390 DELAY(1000);
3391 else
3392 break;
3393 }
3394 }
3395 if (frame_hdr->cmd_status == 0xFF) {
3396 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3397 "seconds from %s\n", max_wait, __func__);
3398 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3399 cmd->frame->dcmd.opcode);
3400 retcode = ETIMEDOUT;
3401 }
3402 return (retcode);
3403}
3404
3405/*
3406 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd
3407 * input: Adapter soft state mfi cmd pointer
3408 *
3409 * This function is called by mrsas_issued_blocked_cmd() and
3410 * mrsas_issued_polled(), to build the MPT command and then fire the command
3411 * to Firmware.
3412 */
3413int
3414mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3415{
3416 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3417
3418 req_desc = mrsas_build_mpt_cmd(sc, cmd);
3419 if (!req_desc) {
3420 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3421 return (1);
3422 }
3423 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3424
3425 return (0);
3426}
3427
3428/*
3429 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd
3430 * input: Adapter soft state mfi cmd to build
3431 *
3432 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3433 * command and prepares the MPT command to send to Firmware.
3434 */
3435MRSAS_REQUEST_DESCRIPTOR_UNION *
3436mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3437{
3438 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3439 u_int16_t index;
3440
3441 if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3442 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3443 return NULL;
3444 }
3445 index = cmd->cmd_id.context.smid;
3446
3447 req_desc = mrsas_get_request_desc(sc, index - 1);
3448 if (!req_desc)
3449 return NULL;
3450
3451 req_desc->addr.Words = 0;
3452 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3453
3454 req_desc->SCSIIO.SMID = index;
3455
3456 return (req_desc);
3457}
3458
3459/*
3460 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command
3461 * input: Adapter soft state mfi cmd pointer
3462 *
3463 * The MPT command and the io_request are setup as a passthru command. The SGE
3464 * chain address is set to frame_phys_addr of the MFI command.
3465 */
3466u_int8_t
3467mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3468{
3469 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3470 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3471 struct mrsas_mpt_cmd *mpt_cmd;
3472 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3473
3474 mpt_cmd = mrsas_get_mpt_cmd(sc);
3475 if (!mpt_cmd)
3476 return (1);
3477
3478 /* Save the smid. To be used for returning the cmd */
3479 mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3480
3481 mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3482
3483 /*
3484 * For cmds where the flag is set, store the flag and check on
3485 * completion. For cmds with this flag, don't call
3486 * mrsas_complete_cmd.
3487 */
3488
3489 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
3490 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3491
3492 io_req = mpt_cmd->io_request;
3493
3486 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
3494 if ((sc->device_id == MRSAS_INVADER) ||
3495 (sc->device_id == MRSAS_FURY) ||
3496 (sc->device_id == MRSAS_INTRUDER) ||
3497 (sc->device_id == MRSAS_INTRUDER_24)) {
3498 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3499
3500 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3501 sgl_ptr_end->Flags = 0;
3502 }
3503 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3504
3505 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3506 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3507 io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3508
3509 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
3510
3511 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3512 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3513
3514 mpi25_ieee_chain->Length = sc->max_chain_frame_sz;
3515
3516 return (0);
3517}
3518
3519/*
3520 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds
3521 * input: Adapter soft state Command to be issued
3522 *
3523 * This function waits on an event for the command to be returned from the ISR.
3524 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3525 * internal and ioctl commands.
3526 */
3527int
3528mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3529{
3530 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3531 unsigned long total_time = 0;
3532 int retcode = SUCCESS;
3533
3534 /* Initialize cmd_status */
3535 cmd->cmd_status = 0xFF;
3536
3537 /* Build MPT-MFI command for issue to FW */
3538 if (mrsas_issue_dcmd(sc, cmd)) {
3539 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3540 return (1);
3541 }
3542 sc->chan = (void *)&cmd;
3543
3544 while (1) {
3545 if (cmd->cmd_status == 0xFF) {
3546 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3547 } else
3548 break;
3549
3550 if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL
3551 * command */
3552 total_time++;
3553 if (total_time >= max_wait) {
3554 device_printf(sc->mrsas_dev,
3555 "Internal command timed out after %d seconds.\n", max_wait);
3556 retcode = 1;
3557 break;
3558 }
3559 }
3560 }
3561
3562 if (cmd->cmd_status == 0xFF) {
3563 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3564 "seconds from %s\n", max_wait, __func__);
3565 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3566 cmd->frame->dcmd.opcode);
3567 retcode = ETIMEDOUT;
3568 }
3569 return (retcode);
3570}
3571
3572/*
3573 * mrsas_complete_mptmfi_passthru: Completes a command
3574 * input: @sc: Adapter soft state
3575 * @cmd: Command to be completed
3576 * @status: cmd completion status
3577 *
3578 * This function is called from mrsas_complete_cmd() after an interrupt is
3579 * received from Firmware, and io_request->Function is
3580 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
3581 */
3582void
3583mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
3584 u_int8_t status)
3585{
3586 struct mrsas_header *hdr = &cmd->frame->hdr;
3587 u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
3588
3589 /* Reset the retry counter for future re-tries */
3590 cmd->retry_for_fw_reset = 0;
3591
3592 if (cmd->ccb_ptr)
3593 cmd->ccb_ptr = NULL;
3594
3595 switch (hdr->cmd) {
3596 case MFI_CMD_INVALID:
3597 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
3598 break;
3599 case MFI_CMD_PD_SCSI_IO:
3600 case MFI_CMD_LD_SCSI_IO:
3601 /*
3602 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3603 * issued either through an IO path or an IOCTL path. If it
3604 * was via IOCTL, we will send it to internal completion.
3605 */
3606 if (cmd->sync_cmd) {
3607 cmd->sync_cmd = 0;
3608 mrsas_wakeup(sc, cmd);
3609 break;
3610 }
3611 case MFI_CMD_SMP:
3612 case MFI_CMD_STP:
3613 case MFI_CMD_DCMD:
3614 /* Check for LD map update */
3615 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
3616 (cmd->frame->dcmd.mbox.b[1] == 1)) {
3617 sc->fast_path_io = 0;
3618 mtx_lock(&sc->raidmap_lock);
3619 sc->map_update_cmd = NULL;
3620 if (cmd_status != 0) {
3621 if (cmd_status != MFI_STAT_NOT_FOUND)
3622 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
3623 else {
3624 mrsas_release_mfi_cmd(cmd);
3625 mtx_unlock(&sc->raidmap_lock);
3626 break;
3627 }
3628 } else
3629 sc->map_id++;
3630 mrsas_release_mfi_cmd(cmd);
3631 if (MR_ValidateMapInfo(sc))
3632 sc->fast_path_io = 0;
3633 else
3634 sc->fast_path_io = 1;
3635 mrsas_sync_map_info(sc);
3636 mtx_unlock(&sc->raidmap_lock);
3637 break;
3638 }
3639 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3640 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
3641 sc->mrsas_aen_triggered = 0;
3642 }
3643 /* FW has an updated PD sequence */
3644 if ((cmd->frame->dcmd.opcode ==
3645 MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3646 (cmd->frame->dcmd.mbox.b[0] == 1)) {
3647
3648 mtx_lock(&sc->raidmap_lock);
3649 sc->jbod_seq_cmd = NULL;
3650 mrsas_release_mfi_cmd(cmd);
3651
3652 if (cmd_status == MFI_STAT_OK) {
3653 sc->pd_seq_map_id++;
3654 /* Re-register a pd sync seq num cmd */
3655 if (megasas_sync_pd_seq_num(sc, true))
3656 sc->use_seqnum_jbod_fp = 0;
3657 } else {
3658 sc->use_seqnum_jbod_fp = 0;
3659 device_printf(sc->mrsas_dev,
3660 "Jbod map sync failed, status=%x\n", cmd_status);
3661 }
3662 mtx_unlock(&sc->raidmap_lock);
3663 break;
3664 }
3665 /* See if got an event notification */
3666 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
3667 mrsas_complete_aen(sc, cmd);
3668 else
3669 mrsas_wakeup(sc, cmd);
3670 break;
3671 case MFI_CMD_ABORT:
3672 /* Command issued to abort another cmd return */
3673 mrsas_complete_abort(sc, cmd);
3674 break;
3675 default:
3676 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
3677 break;
3678 }
3679}
3680
3681/*
3682 * mrsas_wakeup: Completes an internal command
3683 * input: Adapter soft state
3684 * Command to be completed
3685 *
3686 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
3687 * timer is started. This function is called from
3688 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
3689 * from the command wait.
3690 */
3691void
3692mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3693{
3694 cmd->cmd_status = cmd->frame->io.cmd_status;
3695
3696 if (cmd->cmd_status == 0xFF)
3697 cmd->cmd_status = 0;
3698
3699 sc->chan = (void *)&cmd;
3700 wakeup_one((void *)&sc->chan);
3701 return;
3702}
3703
3704/*
3705 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input:
3706 * Adapter soft state Shutdown/Hibernate
3707 *
3708 * This function issues a DCMD internal command to Firmware to initiate shutdown
3709 * of the controller.
3710 */
3711static void
3712mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3713{
3714 struct mrsas_mfi_cmd *cmd;
3715 struct mrsas_dcmd_frame *dcmd;
3716
3717 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3718 return;
3719
3720 cmd = mrsas_get_mfi_cmd(sc);
3721 if (!cmd) {
3722 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
3723 return;
3724 }
3725 if (sc->aen_cmd)
3726 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3727 if (sc->map_update_cmd)
3728 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3729 if (sc->jbod_seq_cmd)
3730 mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
3731
3732 dcmd = &cmd->frame->dcmd;
3733 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3734
3735 dcmd->cmd = MFI_CMD_DCMD;
3736 dcmd->cmd_status = 0x0;
3737 dcmd->sge_count = 0;
3738 dcmd->flags = MFI_FRAME_DIR_NONE;
3739 dcmd->timeout = 0;
3740 dcmd->pad_0 = 0;
3741 dcmd->data_xfer_len = 0;
3742 dcmd->opcode = opcode;
3743
3744 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
3745
3746 mrsas_issue_blocked_cmd(sc, cmd);
3747 mrsas_release_mfi_cmd(cmd);
3748
3749 return;
3750}
3751
3752/*
3753 * mrsas_flush_cache: Requests FW to flush all its caches input:
3754 * Adapter soft state
3755 *
3756 * This function is issues a DCMD internal command to Firmware to initiate
3757 * flushing of all caches.
3758 */
3759static void
3760mrsas_flush_cache(struct mrsas_softc *sc)
3761{
3762 struct mrsas_mfi_cmd *cmd;
3763 struct mrsas_dcmd_frame *dcmd;
3764
3765 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3766 return;
3767
3768 cmd = mrsas_get_mfi_cmd(sc);
3769 if (!cmd) {
3770 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
3771 return;
3772 }
3773 dcmd = &cmd->frame->dcmd;
3774 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3775
3776 dcmd->cmd = MFI_CMD_DCMD;
3777 dcmd->cmd_status = 0x0;
3778 dcmd->sge_count = 0;
3779 dcmd->flags = MFI_FRAME_DIR_NONE;
3780 dcmd->timeout = 0;
3781 dcmd->pad_0 = 0;
3782 dcmd->data_xfer_len = 0;
3783 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3784 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3785
3786 mrsas_issue_blocked_cmd(sc, cmd);
3787 mrsas_release_mfi_cmd(cmd);
3788
3789 return;
3790}
3791
3792int
3793megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
3794{
3795 int retcode = 0;
3796 u_int8_t do_ocr = 1;
3797 struct mrsas_mfi_cmd *cmd;
3798 struct mrsas_dcmd_frame *dcmd;
3799 uint32_t pd_seq_map_sz;
3800 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
3801 bus_addr_t pd_seq_h;
3802
3803 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
3804 (sizeof(struct MR_PD_CFG_SEQ) *
3805 (MAX_PHYSICAL_DEVICES - 1));
3806
3807 cmd = mrsas_get_mfi_cmd(sc);
3808 if (!cmd) {
3809 device_printf(sc->mrsas_dev,
3810 "Cannot alloc for ld map info cmd.\n");
3811 return 1;
3812 }
3813 dcmd = &cmd->frame->dcmd;
3814
3815 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
3816 pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
3817 if (!pd_sync) {
3818 device_printf(sc->mrsas_dev,
3819 "Failed to alloc mem for jbod map info.\n");
3820 mrsas_release_mfi_cmd(cmd);
3821 return (ENOMEM);
3822 }
3823 memset(pd_sync, 0, pd_seq_map_sz);
3824 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3825 dcmd->cmd = MFI_CMD_DCMD;
3826 dcmd->cmd_status = 0xFF;
3827 dcmd->sge_count = 1;
3828 dcmd->timeout = 0;
3829 dcmd->pad_0 = 0;
3830 dcmd->data_xfer_len = (pd_seq_map_sz);
3831 dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
3832 dcmd->sgl.sge32[0].phys_addr = (pd_seq_h);
3833 dcmd->sgl.sge32[0].length = (pd_seq_map_sz);
3834
3835 if (pend) {
3836 dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
3837 dcmd->flags = (MFI_FRAME_DIR_WRITE);
3838 sc->jbod_seq_cmd = cmd;
3839 if (mrsas_issue_dcmd(sc, cmd)) {
3840 device_printf(sc->mrsas_dev,
3841 "Fail to send sync map info command.\n");
3842 return 1;
3843 } else
3844 return 0;
3845 } else
3846 dcmd->flags = MFI_FRAME_DIR_READ;
3847
3848 retcode = mrsas_issue_polled(sc, cmd);
3849 if (retcode == ETIMEDOUT)
3850 goto dcmd_timeout;
3851
3852 if (pd_sync->count > MAX_PHYSICAL_DEVICES) {
3853 device_printf(sc->mrsas_dev,
3854 "driver supports max %d JBOD, but FW reports %d\n",
3855 MAX_PHYSICAL_DEVICES, pd_sync->count);
3856 retcode = -EINVAL;
3857 }
3858 if (!retcode)
3859 sc->pd_seq_map_id++;
3860 do_ocr = 0;
3861
3862dcmd_timeout:
3863 if (do_ocr)
3864 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3865 else
3866 mrsas_release_mfi_cmd(cmd);
3867
3868 return (retcode);
3869}
3870
3871/*
3872 * mrsas_get_map_info: Load and validate RAID map input:
3873 * Adapter instance soft state
3874 *
3875 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
3876 * and validate RAID map. It returns 0 if successful, 1 other- wise.
3877 */
3878static int
3879mrsas_get_map_info(struct mrsas_softc *sc)
3880{
3881 uint8_t retcode = 0;
3882
3883 sc->fast_path_io = 0;
3884 if (!mrsas_get_ld_map_info(sc)) {
3885 retcode = MR_ValidateMapInfo(sc);
3886 if (retcode == 0) {
3887 sc->fast_path_io = 1;
3888 return 0;
3889 }
3890 }
3891 return 1;
3892}
3893
3894/*
3895 * mrsas_get_ld_map_info: Get FW's ld_map structure input:
3896 * Adapter instance soft state
3897 *
3898 * Issues an internal command (DCMD) to get the FW's controller PD list
3899 * structure.
3900 */
3901static int
3902mrsas_get_ld_map_info(struct mrsas_softc *sc)
3903{
3904 int retcode = 0;
3905 struct mrsas_mfi_cmd *cmd;
3906 struct mrsas_dcmd_frame *dcmd;
3907 void *map;
3908 bus_addr_t map_phys_addr = 0;
3909
3910 cmd = mrsas_get_mfi_cmd(sc);
3911 if (!cmd) {
3912 device_printf(sc->mrsas_dev,
3913 "Cannot alloc for ld map info cmd.\n");
3914 return 1;
3915 }
3916 dcmd = &cmd->frame->dcmd;
3917
3918 map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
3919 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
3920 if (!map) {
3921 device_printf(sc->mrsas_dev,
3922 "Failed to alloc mem for ld map info.\n");
3923 mrsas_release_mfi_cmd(cmd);
3924 return (ENOMEM);
3925 }
3926 memset(map, 0, sizeof(sc->max_map_sz));
3927 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3928
3929 dcmd->cmd = MFI_CMD_DCMD;
3930 dcmd->cmd_status = 0xFF;
3931 dcmd->sge_count = 1;
3932 dcmd->flags = MFI_FRAME_DIR_READ;
3933 dcmd->timeout = 0;
3934 dcmd->pad_0 = 0;
3935 dcmd->data_xfer_len = sc->current_map_sz;
3936 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3937 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3938 dcmd->sgl.sge32[0].length = sc->current_map_sz;
3939
3940 retcode = mrsas_issue_polled(sc, cmd);
3941 if (retcode == ETIMEDOUT)
3942 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3943 else
3944 mrsas_release_mfi_cmd(cmd);
3945
3946 return (retcode);
3947}
3948
3949/*
3950 * mrsas_sync_map_info: Get FW's ld_map structure input:
3951 * Adapter instance soft state
3952 *
3953 * Issues an internal command (DCMD) to get the FW's controller PD list
3954 * structure.
3955 */
3956static int
3957mrsas_sync_map_info(struct mrsas_softc *sc)
3958{
3959 int retcode = 0, i;
3960 struct mrsas_mfi_cmd *cmd;
3961 struct mrsas_dcmd_frame *dcmd;
3962 uint32_t size_sync_info, num_lds;
3963 MR_LD_TARGET_SYNC *target_map = NULL;
3964 MR_DRV_RAID_MAP_ALL *map;
3965 MR_LD_RAID *raid;
3966 MR_LD_TARGET_SYNC *ld_sync;
3967 bus_addr_t map_phys_addr = 0;
3968
3969 cmd = mrsas_get_mfi_cmd(sc);
3970 if (!cmd) {
3971 device_printf(sc->mrsas_dev,
3972 "Cannot alloc for sync map info cmd\n");
3973 return 1;
3974 }
3975 map = sc->ld_drv_map[sc->map_id & 1];
3976 num_lds = map->raidMap.ldCount;
3977
3978 dcmd = &cmd->frame->dcmd;
3979 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
3980 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3981
3982 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
3983 memset(target_map, 0, sc->max_map_sz);
3984
3985 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
3986
3987 ld_sync = (MR_LD_TARGET_SYNC *) target_map;
3988
3989 for (i = 0; i < num_lds; i++, ld_sync++) {
3990 raid = MR_LdRaidGet(i, map);
3991 ld_sync->targetId = MR_GetLDTgtId(i, map);
3992 ld_sync->seqNum = raid->seqNum;
3993 }
3994
3995 dcmd->cmd = MFI_CMD_DCMD;
3996 dcmd->cmd_status = 0xFF;
3997 dcmd->sge_count = 1;
3998 dcmd->flags = MFI_FRAME_DIR_WRITE;
3999 dcmd->timeout = 0;
4000 dcmd->pad_0 = 0;
4001 dcmd->data_xfer_len = sc->current_map_sz;
4002 dcmd->mbox.b[0] = num_lds;
4003 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
4004 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
4005 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
4006 dcmd->sgl.sge32[0].length = sc->current_map_sz;
4007
4008 sc->map_update_cmd = cmd;
4009 if (mrsas_issue_dcmd(sc, cmd)) {
4010 device_printf(sc->mrsas_dev,
4011 "Fail to send sync map info command.\n");
4012 return (1);
4013 }
4014 return (retcode);
4015}
4016
4017/*
4018 * mrsas_get_pd_list: Returns FW's PD list structure input:
4019 * Adapter soft state
4020 *
4021 * Issues an internal command (DCMD) to get the FW's controller PD list
4022 * structure. This information is mainly used to find out about system
4023 * supported by Firmware.
4024 */
4025static int
4026mrsas_get_pd_list(struct mrsas_softc *sc)
4027{
4028 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4029 u_int8_t do_ocr = 1;
4030 struct mrsas_mfi_cmd *cmd;
4031 struct mrsas_dcmd_frame *dcmd;
4032 struct MR_PD_LIST *pd_list_mem;
4033 struct MR_PD_ADDRESS *pd_addr;
4034 bus_addr_t pd_list_phys_addr = 0;
4035 struct mrsas_tmp_dcmd *tcmd;
4036
4037 cmd = mrsas_get_mfi_cmd(sc);
4038 if (!cmd) {
4039 device_printf(sc->mrsas_dev,
4040 "Cannot alloc for get PD list cmd\n");
4041 return 1;
4042 }
4043 dcmd = &cmd->frame->dcmd;
4044
4045 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4046 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4047 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4048 device_printf(sc->mrsas_dev,
4049 "Cannot alloc dmamap for get PD list cmd\n");
4050 mrsas_release_mfi_cmd(cmd);
4051 mrsas_free_tmp_dcmd(tcmd);
4052 free(tcmd, M_MRSAS);
4053 return (ENOMEM);
4054 } else {
4055 pd_list_mem = tcmd->tmp_dcmd_mem;
4056 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4057 }
4058 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4059
4060 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4061 dcmd->mbox.b[1] = 0;
4062 dcmd->cmd = MFI_CMD_DCMD;
4063 dcmd->cmd_status = 0xFF;
4064 dcmd->sge_count = 1;
4065 dcmd->flags = MFI_FRAME_DIR_READ;
4066 dcmd->timeout = 0;
4067 dcmd->pad_0 = 0;
4068 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4069 dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
4070 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
4071 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4072
4073 retcode = mrsas_issue_polled(sc, cmd);
4074 if (retcode == ETIMEDOUT)
4075 goto dcmd_timeout;
4076
4077 /* Get the instance PD list */
4078 pd_count = MRSAS_MAX_PD;
4079 pd_addr = pd_list_mem->addr;
4080 if (pd_list_mem->count < pd_count) {
4081 memset(sc->local_pd_list, 0,
4082 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4083 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
4084 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
4085 sc->local_pd_list[pd_addr->deviceId].driveType =
4086 pd_addr->scsiDevType;
4087 sc->local_pd_list[pd_addr->deviceId].driveState =
4088 MR_PD_STATE_SYSTEM;
4089 pd_addr++;
4090 }
4091 /*
4092 * Use mutext/spinlock if pd_list component size increase more than
4093 * 32 bit.
4094 */
4095 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4096 do_ocr = 0;
4097 }
4098dcmd_timeout:
4099 mrsas_free_tmp_dcmd(tcmd);
4100 free(tcmd, M_MRSAS);
4101
4102 if (do_ocr)
4103 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4104 else
4105 mrsas_release_mfi_cmd(cmd);
4106
4107 return (retcode);
4108}
4109
4110/*
4111 * mrsas_get_ld_list: Returns FW's LD list structure input:
4112 * Adapter soft state
4113 *
4114 * Issues an internal command (DCMD) to get the FW's controller PD list
4115 * structure. This information is mainly used to find out about supported by
4116 * the FW.
4117 */
4118static int
4119mrsas_get_ld_list(struct mrsas_softc *sc)
4120{
4121 int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
4122 u_int8_t do_ocr = 1;
4123 struct mrsas_mfi_cmd *cmd;
4124 struct mrsas_dcmd_frame *dcmd;
4125 struct MR_LD_LIST *ld_list_mem;
4126 bus_addr_t ld_list_phys_addr = 0;
4127 struct mrsas_tmp_dcmd *tcmd;
4128
4129 cmd = mrsas_get_mfi_cmd(sc);
4130 if (!cmd) {
4131 device_printf(sc->mrsas_dev,
4132 "Cannot alloc for get LD list cmd\n");
4133 return 1;
4134 }
4135 dcmd = &cmd->frame->dcmd;
4136
4137 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4138 ld_list_size = sizeof(struct MR_LD_LIST);
4139 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4140 device_printf(sc->mrsas_dev,
4141 "Cannot alloc dmamap for get LD list cmd\n");
4142 mrsas_release_mfi_cmd(cmd);
4143 mrsas_free_tmp_dcmd(tcmd);
4144 free(tcmd, M_MRSAS);
4145 return (ENOMEM);
4146 } else {
4147 ld_list_mem = tcmd->tmp_dcmd_mem;
4148 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4149 }
4150 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4151
4152 if (sc->max256vdSupport)
4153 dcmd->mbox.b[0] = 1;
4154
4155 dcmd->cmd = MFI_CMD_DCMD;
4156 dcmd->cmd_status = 0xFF;
4157 dcmd->sge_count = 1;
4158 dcmd->flags = MFI_FRAME_DIR_READ;
4159 dcmd->timeout = 0;
4160 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
4161 dcmd->opcode = MR_DCMD_LD_GET_LIST;
4162 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
4163 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
4164 dcmd->pad_0 = 0;
4165
4166 retcode = mrsas_issue_polled(sc, cmd);
4167 if (retcode == ETIMEDOUT)
4168 goto dcmd_timeout;
4169
4170#if VD_EXT_DEBUG
4171 printf("Number of LDs %d\n", ld_list_mem->ldCount);
4172#endif
4173
4174 /* Get the instance LD list */
4175 if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) {
4176 sc->CurLdCount = ld_list_mem->ldCount;
4177 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4178 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
4179 if (ld_list_mem->ldList[ld_index].state != 0) {
4180 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4181 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4182 }
4183 }
4184 do_ocr = 0;
4185 }
4186dcmd_timeout:
4187 mrsas_free_tmp_dcmd(tcmd);
4188 free(tcmd, M_MRSAS);
4189
4190 if (do_ocr)
4191 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4192 else
4193 mrsas_release_mfi_cmd(cmd);
4194
4195 return (retcode);
4196}
4197
4198/*
4199 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input:
4200 * Adapter soft state Temp command Size of alloction
4201 *
4202 * Allocates DMAable memory for a temporary internal command. The allocated
4203 * memory is initialized to all zeros upon successful loading of the dma
4204 * mapped memory.
4205 */
4206int
4207mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4208 struct mrsas_tmp_dcmd *tcmd, int size)
4209{
4210 if (bus_dma_tag_create(sc->mrsas_parent_tag,
4211 1, 0,
4212 BUS_SPACE_MAXADDR_32BIT,
4213 BUS_SPACE_MAXADDR,
4214 NULL, NULL,
4215 size,
4216 1,
4217 size,
4218 BUS_DMA_ALLOCNOW,
4219 NULL, NULL,
4220 &tcmd->tmp_dcmd_tag)) {
4221 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4222 return (ENOMEM);
4223 }
4224 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4225 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4226 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4227 return (ENOMEM);
4228 }
4229 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4230 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4231 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4232 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4233 return (ENOMEM);
4234 }
4235 memset(tcmd->tmp_dcmd_mem, 0, size);
4236 return (0);
4237}
4238
4239/*
4240 * mrsas_free_tmp_dcmd: Free memory for temporary command input:
4241 * temporary dcmd pointer
4242 *
4243 * Deallocates memory of the temporary command for use in the construction of
4244 * the internal DCMD.
4245 */
4246void
4247mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4248{
4249 if (tmp->tmp_dcmd_phys_addr)
4250 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4251 if (tmp->tmp_dcmd_mem != NULL)
4252 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4253 if (tmp->tmp_dcmd_tag != NULL)
4254 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4255}
4256
4257/*
4258 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input:
4259 * Adapter soft state Previously issued cmd to be aborted
4260 *
4261 * This function is used to abort previously issued commands, such as AEN and
4262 * RAID map sync map commands. The abort command is sent as a DCMD internal
4263 * command and subsequently the driver will wait for a return status. The
4264 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4265 */
4266static int
4267mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4268 struct mrsas_mfi_cmd *cmd_to_abort)
4269{
4270 struct mrsas_mfi_cmd *cmd;
4271 struct mrsas_abort_frame *abort_fr;
4272 u_int8_t retcode = 0;
4273 unsigned long total_time = 0;
4274 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4275
4276 cmd = mrsas_get_mfi_cmd(sc);
4277 if (!cmd) {
4278 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4279 return (1);
4280 }
4281 abort_fr = &cmd->frame->abort;
4282
4283 /* Prepare and issue the abort frame */
4284 abort_fr->cmd = MFI_CMD_ABORT;
4285 abort_fr->cmd_status = 0xFF;
4286 abort_fr->flags = 0;
4287 abort_fr->abort_context = cmd_to_abort->index;
4288 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4289 abort_fr->abort_mfi_phys_addr_hi = 0;
4290
4291 cmd->sync_cmd = 1;
4292 cmd->cmd_status = 0xFF;
4293
4294 if (mrsas_issue_dcmd(sc, cmd)) {
4295 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4296 return (1);
4297 }
4298 /* Wait for this cmd to complete */
4299 sc->chan = (void *)&cmd;
4300 while (1) {
4301 if (cmd->cmd_status == 0xFF) {
4302 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4303 } else
4304 break;
4305 total_time++;
4306 if (total_time >= max_wait) {
4307 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4308 retcode = 1;
4309 break;
4310 }
4311 }
4312
4313 cmd->sync_cmd = 0;
4314 mrsas_release_mfi_cmd(cmd);
4315 return (retcode);
4316}
4317
4318/*
4319 * mrsas_complete_abort: Completes aborting a command input:
4320 * Adapter soft state Cmd that was issued to abort another cmd
4321 *
4322 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4323 * change after sending the command. This function is called from
4324 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4325 */
4326void
4327mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4328{
4329 if (cmd->sync_cmd) {
4330 cmd->sync_cmd = 0;
4331 cmd->cmd_status = 0;
4332 sc->chan = (void *)&cmd;
4333 wakeup_one((void *)&sc->chan);
4334 }
4335 return;
4336}
4337
4338/*
4339 * mrsas_aen_handler: AEN processing callback function from thread context
4340 * input: Adapter soft state
4341 *
4342 * Asynchronous event handler
4343 */
4344void
4345mrsas_aen_handler(struct mrsas_softc *sc)
4346{
4347 union mrsas_evt_class_locale class_locale;
4348 int doscan = 0;
4349 u_int32_t seq_num;
4350 int error, fail_aen = 0;
4351
4352 if (sc == NULL) {
4353 printf("invalid instance!\n");
4354 return;
4355 }
4356 if (sc->evt_detail_mem) {
4357 switch (sc->evt_detail_mem->code) {
4358 case MR_EVT_PD_INSERTED:
4359 fail_aen = mrsas_get_pd_list(sc);
4360 if (!fail_aen)
4361 mrsas_bus_scan_sim(sc, sc->sim_1);
4362 else
4363 goto skip_register_aen;
4364 doscan = 0;
4365 break;
4366 case MR_EVT_PD_REMOVED:
4367 fail_aen = mrsas_get_pd_list(sc);
4368 if (!fail_aen)
4369 mrsas_bus_scan_sim(sc, sc->sim_1);
4370 else
4371 goto skip_register_aen;
4372 doscan = 0;
4373 break;
4374 case MR_EVT_LD_OFFLINE:
4375 case MR_EVT_CFG_CLEARED:
4376 case MR_EVT_LD_DELETED:
4377 mrsas_bus_scan_sim(sc, sc->sim_0);
4378 doscan = 0;
4379 break;
4380 case MR_EVT_LD_CREATED:
4381 fail_aen = mrsas_get_ld_list(sc);
4382 if (!fail_aen)
4383 mrsas_bus_scan_sim(sc, sc->sim_0);
4384 else
4385 goto skip_register_aen;
4386 doscan = 0;
4387 break;
4388 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4389 case MR_EVT_FOREIGN_CFG_IMPORTED:
4390 case MR_EVT_LD_STATE_CHANGE:
4391 doscan = 1;
4392 break;
4393 default:
4394 doscan = 0;
4395 break;
4396 }
4397 } else {
4398 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4399 return;
4400 }
4401 if (doscan) {
4402 fail_aen = mrsas_get_pd_list(sc);
4403 if (!fail_aen) {
4404 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4405 mrsas_bus_scan_sim(sc, sc->sim_1);
4406 } else
4407 goto skip_register_aen;
4408
4409 fail_aen = mrsas_get_ld_list(sc);
4410 if (!fail_aen) {
4411 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4412 mrsas_bus_scan_sim(sc, sc->sim_0);
4413 } else
4414 goto skip_register_aen;
4415 }
4416 seq_num = sc->evt_detail_mem->seq_num + 1;
4417
4418 /* Register AEN with FW for latest sequence number plus 1 */
4419 class_locale.members.reserved = 0;
4420 class_locale.members.locale = MR_EVT_LOCALE_ALL;
4421 class_locale.members.class = MR_EVT_CLASS_DEBUG;
4422
4423 if (sc->aen_cmd != NULL)
4424 return;
4425
4426 mtx_lock(&sc->aen_lock);
4427 error = mrsas_register_aen(sc, seq_num,
4428 class_locale.word);
4429 mtx_unlock(&sc->aen_lock);
4430
4431 if (error)
4432 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
4433
4434skip_register_aen:
4435 return;
4436
4437}
4438
4439
4440/*
4441 * mrsas_complete_aen: Completes AEN command
4442 * input: Adapter soft state
4443 * Cmd that was issued to abort another cmd
4444 *
4445 * This function will be called from ISR and will continue event processing from
4446 * thread context by enqueuing task in ev_tq (callback function
4447 * "mrsas_aen_handler").
4448 */
4449void
4450mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4451{
4452 /*
4453 * Don't signal app if it is just an aborted previously registered
4454 * aen
4455 */
4456 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
4457 sc->mrsas_aen_triggered = 1;
4458 mtx_lock(&sc->aen_lock);
4459 if (sc->mrsas_poll_waiting) {
4460 sc->mrsas_poll_waiting = 0;
4461 selwakeup(&sc->mrsas_select);
4462 }
4463 mtx_unlock(&sc->aen_lock);
4464 } else
4465 cmd->abort_aen = 0;
4466
4467 sc->aen_cmd = NULL;
4468 mrsas_release_mfi_cmd(cmd);
4469
4470 if (!sc->remove_in_progress)
4471 taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
4472
4473 return;
4474}
4475
4476static device_method_t mrsas_methods[] = {
4477 DEVMETHOD(device_probe, mrsas_probe),
4478 DEVMETHOD(device_attach, mrsas_attach),
4479 DEVMETHOD(device_detach, mrsas_detach),
4480 DEVMETHOD(device_suspend, mrsas_suspend),
4481 DEVMETHOD(device_resume, mrsas_resume),
4482 DEVMETHOD(bus_print_child, bus_generic_print_child),
4483 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
4484 {0, 0}
4485};
4486
4487static driver_t mrsas_driver = {
4488 "mrsas",
4489 mrsas_methods,
4490 sizeof(struct mrsas_softc)
4491};
4492
4493static devclass_t mrsas_devclass;
4494
4495DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
4496MODULE_DEPEND(mrsas, cam, 1, 1, 1);