Deleted Added
sdiff udiff text old ( 299669 ) new ( 299670 )
full compact
1/*
2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 2. Redistributions
12 * in binary form must reproduce the above copyright notice, this list of
13 * conditions and the following disclaimer in the documentation and/or other
14 * materials provided with the distribution. 3. Neither the name of the
15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16 * promote products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * The views and conclusions contained in the software and documentation are
32 * those of the authors and should not be interpreted as representing
33 * official policies,either expressed or implied, of the FreeBSD Project.
34 *
35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
37 *
38 */
39
40#include <sys/cdefs.h>
41__FBSDID("$FreeBSD: head/sys/dev/mrsas/mrsas.c 299669 2016-05-13 12:15:20Z kadesai $");
42
43#include <dev/mrsas/mrsas.h>
44#include <dev/mrsas/mrsas_ioctl.h>
45
46#include <cam/cam.h>
47#include <cam/cam_ccb.h>
48
49#include <sys/sysctl.h>
50#include <sys/types.h>
51#include <sys/sysent.h>
52#include <sys/kthread.h>
53#include <sys/taskqueue.h>
54#include <sys/smp.h>
55
56
57/*
58 * Function prototypes
59 */
60static d_open_t mrsas_open;
61static d_close_t mrsas_close;
62static d_read_t mrsas_read;
63static d_write_t mrsas_write;
64static d_ioctl_t mrsas_ioctl;
65static d_poll_t mrsas_poll;
66
67static void mrsas_ich_startup(void *arg);
68static struct mrsas_mgmt_info mrsas_mgmt_info;
69static struct mrsas_ident *mrsas_find_ident(device_t);
70static int mrsas_setup_msix(struct mrsas_softc *sc);
71static int mrsas_allocate_msix(struct mrsas_softc *sc);
72static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
73static void mrsas_flush_cache(struct mrsas_softc *sc);
74static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
75static void mrsas_ocr_thread(void *arg);
76static int mrsas_get_map_info(struct mrsas_softc *sc);
77static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
78static int mrsas_sync_map_info(struct mrsas_softc *sc);
79static int mrsas_get_pd_list(struct mrsas_softc *sc);
80static int mrsas_get_ld_list(struct mrsas_softc *sc);
81static int mrsas_setup_irq(struct mrsas_softc *sc);
82static int mrsas_alloc_mem(struct mrsas_softc *sc);
83static int mrsas_init_fw(struct mrsas_softc *sc);
84static int mrsas_setup_raidmap(struct mrsas_softc *sc);
85static void megasas_setup_jbod_map(struct mrsas_softc *sc);
86static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
87static int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
88static int mrsas_clear_intr(struct mrsas_softc *sc);
89static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
90static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
91static int
92mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
93 struct mrsas_mfi_cmd *cmd_to_abort);
94static struct mrsas_softc *
95mrsas_get_softc_instance(struct cdev *dev,
96 u_long cmd, caddr_t arg);
97u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
98u_int8_t
99mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
100 struct mrsas_mfi_cmd *mfi_cmd);
101void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
102int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
103int mrsas_init_adapter(struct mrsas_softc *sc);
104int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
105int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
106int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
107int mrsas_ioc_init(struct mrsas_softc *sc);
108int mrsas_bus_scan(struct mrsas_softc *sc);
109int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
111int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
112int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
113int
114mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
115 struct mrsas_mfi_cmd *cmd);
116int
117mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
118 int size);
119void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
120void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
121void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
122void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
123void mrsas_disable_intr(struct mrsas_softc *sc);
124void mrsas_enable_intr(struct mrsas_softc *sc);
125void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
126void mrsas_free_mem(struct mrsas_softc *sc);
127void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
128void mrsas_isr(void *arg);
129void mrsas_teardown_intr(struct mrsas_softc *sc);
130void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
131void mrsas_kill_hba(struct mrsas_softc *sc);
132void mrsas_aen_handler(struct mrsas_softc *sc);
133void
134mrsas_write_reg(struct mrsas_softc *sc, int offset,
135 u_int32_t value);
136void
137mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
138 u_int32_t req_desc_hi);
139void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
140void
141mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
142 struct mrsas_mfi_cmd *cmd, u_int8_t status);
143void
144mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
145 u_int8_t extStatus);
146struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
147
148MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
149 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
150
151extern int mrsas_cam_attach(struct mrsas_softc *sc);
152extern void mrsas_cam_detach(struct mrsas_softc *sc);
153extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
154extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
155extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
156extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
157extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
158extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
159extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
160extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
161extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
162extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
163extern void mrsas_xpt_release(struct mrsas_softc *sc);
164extern MRSAS_REQUEST_DESCRIPTOR_UNION *
165mrsas_get_request_desc(struct mrsas_softc *sc,
166 u_int16_t index);
167extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
168static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
169static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
170
171SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
172
173/*
174 * PCI device struct and table
175 *
176 */
177typedef struct mrsas_ident {
178 uint16_t vendor;
179 uint16_t device;
180 uint16_t subvendor;
181 uint16_t subdevice;
182 const char *desc;
183} MRSAS_CTLR_ID;
184
185MRSAS_CTLR_ID device_table[] = {
186 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
187 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
188 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
189 {0, 0, 0, 0, NULL}
190};
191
192/*
193 * Character device entry points
194 *
195 */
196static struct cdevsw mrsas_cdevsw = {
197 .d_version = D_VERSION,
198 .d_open = mrsas_open,
199 .d_close = mrsas_close,
200 .d_read = mrsas_read,
201 .d_write = mrsas_write,
202 .d_ioctl = mrsas_ioctl,
203 .d_poll = mrsas_poll,
204 .d_name = "mrsas",
205};
206
207MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
208
209/*
210 * In the cdevsw routines, we find our softc by using the si_drv1 member of
211 * struct cdev. We set this variable to point to our softc in our attach
212 * routine when we create the /dev entry.
213 */
214int
215mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
216{
217 struct mrsas_softc *sc;
218
219 sc = dev->si_drv1;
220 return (0);
221}
222
223int
224mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
225{
226 struct mrsas_softc *sc;
227
228 sc = dev->si_drv1;
229 return (0);
230}
231
232int
233mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
234{
235 struct mrsas_softc *sc;
236
237 sc = dev->si_drv1;
238 return (0);
239}
240int
241mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
242{
243 struct mrsas_softc *sc;
244
245 sc = dev->si_drv1;
246 return (0);
247}
248
249/*
250 * Register Read/Write Functions
251 *
252 */
253void
254mrsas_write_reg(struct mrsas_softc *sc, int offset,
255 u_int32_t value)
256{
257 bus_space_tag_t bus_tag = sc->bus_tag;
258 bus_space_handle_t bus_handle = sc->bus_handle;
259
260 bus_space_write_4(bus_tag, bus_handle, offset, value);
261}
262
263u_int32_t
264mrsas_read_reg(struct mrsas_softc *sc, int offset)
265{
266 bus_space_tag_t bus_tag = sc->bus_tag;
267 bus_space_handle_t bus_handle = sc->bus_handle;
268
269 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
270}
271
272
273/*
274 * Interrupt Disable/Enable/Clear Functions
275 *
276 */
277void
278mrsas_disable_intr(struct mrsas_softc *sc)
279{
280 u_int32_t mask = 0xFFFFFFFF;
281 u_int32_t status;
282
283 sc->mask_interrupts = 1;
284 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
285 /* Dummy read to force pci flush */
286 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
287}
288
289void
290mrsas_enable_intr(struct mrsas_softc *sc)
291{
292 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
293 u_int32_t status;
294
295 sc->mask_interrupts = 0;
296 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
297 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
298
299 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
300 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
301}
302
303static int
304mrsas_clear_intr(struct mrsas_softc *sc)
305{
306 u_int32_t status, fw_status, fw_state;
307
308 /* Read received interrupt */
309 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
310
311 /*
312 * If FW state change interrupt is received, write to it again to
313 * clear
314 */
315 if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
316 fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
317 outbound_scratch_pad));
318 fw_state = fw_status & MFI_STATE_MASK;
319 if (fw_state == MFI_STATE_FAULT) {
320 device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
321 if (sc->ocr_thread_active)
322 wakeup(&sc->ocr_chan);
323 }
324 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
325 mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
326 return (1);
327 }
328 /* Not our interrupt, so just return */
329 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
330 return (0);
331
332 /* We got a reply interrupt */
333 return (1);
334}
335
336/*
337 * PCI Support Functions
338 *
339 */
340static struct mrsas_ident *
341mrsas_find_ident(device_t dev)
342{
343 struct mrsas_ident *pci_device;
344
345 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
346 if ((pci_device->vendor == pci_get_vendor(dev)) &&
347 (pci_device->device == pci_get_device(dev)) &&
348 ((pci_device->subvendor == pci_get_subvendor(dev)) ||
349 (pci_device->subvendor == 0xffff)) &&
350 ((pci_device->subdevice == pci_get_subdevice(dev)) ||
351 (pci_device->subdevice == 0xffff)))
352 return (pci_device);
353 }
354 return (NULL);
355}
356
357static int
358mrsas_probe(device_t dev)
359{
360 static u_int8_t first_ctrl = 1;
361 struct mrsas_ident *id;
362
363 if ((id = mrsas_find_ident(dev)) != NULL) {
364 if (first_ctrl) {
365 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
366 MRSAS_VERSION);
367 first_ctrl = 0;
368 }
369 device_set_desc(dev, id->desc);
370 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
371 return (-30);
372 }
373 return (ENXIO);
374}
375
376/*
377 * mrsas_setup_sysctl: setup sysctl values for mrsas
378 * input: Adapter instance soft state
379 *
380 * Setup sysctl entries for mrsas driver.
381 */
382static void
383mrsas_setup_sysctl(struct mrsas_softc *sc)
384{
385 struct sysctl_ctx_list *sysctl_ctx = NULL;
386 struct sysctl_oid *sysctl_tree = NULL;
387 char tmpstr[80], tmpstr2[80];
388
389 /*
390 * Setup the sysctl variable so the user can change the debug level
391 * on the fly.
392 */
393 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
394 device_get_unit(sc->mrsas_dev));
395 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
396
397 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
398 if (sysctl_ctx != NULL)
399 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
400
401 if (sysctl_tree == NULL) {
402 sysctl_ctx_init(&sc->sysctl_ctx);
403 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
404 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
405 CTLFLAG_RD, 0, tmpstr);
406 if (sc->sysctl_tree == NULL)
407 return;
408 sysctl_ctx = &sc->sysctl_ctx;
409 sysctl_tree = sc->sysctl_tree;
410 }
411 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
412 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
413 "Disable the use of OCR");
414
415 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
416 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
417 strlen(MRSAS_VERSION), "driver version");
418
419 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
420 OID_AUTO, "reset_count", CTLFLAG_RD,
421 &sc->reset_count, 0, "number of ocr from start of the day");
422
423 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
424 OID_AUTO, "fw_outstanding", CTLFLAG_RD,
425 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
426
427 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
428 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
429 &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
430
431 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
432 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
433 "Driver debug level");
434
435 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
436 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
437 0, "Driver IO timeout value in mili-second.");
438
439 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
440 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
441 &sc->mrsas_fw_fault_check_delay,
442 0, "FW fault check thread delay in seconds. <default is 1 sec>");
443
444 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
445 OID_AUTO, "reset_in_progress", CTLFLAG_RD,
446 &sc->reset_in_progress, 0, "ocr in progress status");
447
448}
449
450/*
451 * mrsas_get_tunables: get tunable parameters.
452 * input: Adapter instance soft state
453 *
454 * Get tunable parameters. This will help to debug driver at boot time.
455 */
456static void
457mrsas_get_tunables(struct mrsas_softc *sc)
458{
459 char tmpstr[80];
460
461 /* XXX default to some debugging for now */
462 sc->mrsas_debug = MRSAS_FAULT;
463 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
464 sc->mrsas_fw_fault_check_delay = 1;
465 sc->reset_count = 0;
466 sc->reset_in_progress = 0;
467
468 /*
469 * Grab the global variables.
470 */
471 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
472
473 /*
474 * Grab the global variables.
475 */
476 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
477
478 /* Grab the unit-instance variables */
479 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
480 device_get_unit(sc->mrsas_dev));
481 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
482}
483
484/*
485 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
486 * Used to get sequence number at driver load time.
487 * input: Adapter soft state
488 *
489 * Allocates DMAable memory for the event log info internal command.
490 */
491int
492mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
493{
494 int el_info_size;
495
496 /* Allocate get event log info command */
497 el_info_size = sizeof(struct mrsas_evt_log_info);
498 if (bus_dma_tag_create(sc->mrsas_parent_tag,
499 1, 0,
500 BUS_SPACE_MAXADDR_32BIT,
501 BUS_SPACE_MAXADDR,
502 NULL, NULL,
503 el_info_size,
504 1,
505 el_info_size,
506 BUS_DMA_ALLOCNOW,
507 NULL, NULL,
508 &sc->el_info_tag)) {
509 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
510 return (ENOMEM);
511 }
512 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
513 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
514 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
515 return (ENOMEM);
516 }
517 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
518 sc->el_info_mem, el_info_size, mrsas_addr_cb,
519 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
520 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
521 return (ENOMEM);
522 }
523 memset(sc->el_info_mem, 0, el_info_size);
524 return (0);
525}
526
527/*
528 * mrsas_free_evt_info_cmd: Free memory for Event log info command
529 * input: Adapter soft state
530 *
531 * Deallocates memory for the event log info internal command.
532 */
533void
534mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
535{
536 if (sc->el_info_phys_addr)
537 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
538 if (sc->el_info_mem != NULL)
539 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
540 if (sc->el_info_tag != NULL)
541 bus_dma_tag_destroy(sc->el_info_tag);
542}
543
544/*
545 * mrsas_get_seq_num: Get latest event sequence number
546 * @sc: Adapter soft state
547 * @eli: Firmware event log sequence number information.
548 *
549 * Firmware maintains a log of all events in a non-volatile area.
550 * Driver get the sequence number using DCMD
551 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
552 */
553
554static int
555mrsas_get_seq_num(struct mrsas_softc *sc,
556 struct mrsas_evt_log_info *eli)
557{
558 struct mrsas_mfi_cmd *cmd;
559 struct mrsas_dcmd_frame *dcmd;
560 u_int8_t do_ocr = 1, retcode = 0;
561
562 cmd = mrsas_get_mfi_cmd(sc);
563
564 if (!cmd) {
565 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
566 return -ENOMEM;
567 }
568 dcmd = &cmd->frame->dcmd;
569
570 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
571 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
572 mrsas_release_mfi_cmd(cmd);
573 return -ENOMEM;
574 }
575 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
576
577 dcmd->cmd = MFI_CMD_DCMD;
578 dcmd->cmd_status = 0x0;
579 dcmd->sge_count = 1;
580 dcmd->flags = MFI_FRAME_DIR_READ;
581 dcmd->timeout = 0;
582 dcmd->pad_0 = 0;
583 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
584 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
585 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
586 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
587
588 retcode = mrsas_issue_blocked_cmd(sc, cmd);
589 if (retcode == ETIMEDOUT)
590 goto dcmd_timeout;
591
592 do_ocr = 0;
593 /*
594 * Copy the data back into callers buffer
595 */
596 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
597 mrsas_free_evt_log_info_cmd(sc);
598
599dcmd_timeout:
600 if (do_ocr)
601 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
602 else
603 mrsas_release_mfi_cmd(cmd);
604
605 return retcode;
606}
607
608
609/*
610 * mrsas_register_aen: Register for asynchronous event notification
611 * @sc: Adapter soft state
612 * @seq_num: Starting sequence number
613 * @class_locale: Class of the event
614 *
615 * This function subscribes for events beyond the @seq_num
616 * and type @class_locale.
617 *
618 */
619static int
620mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
621 u_int32_t class_locale_word)
622{
623 int ret_val;
624 struct mrsas_mfi_cmd *cmd;
625 struct mrsas_dcmd_frame *dcmd;
626 union mrsas_evt_class_locale curr_aen;
627 union mrsas_evt_class_locale prev_aen;
628
629 /*
630 * If there an AEN pending already (aen_cmd), check if the
631 * class_locale of that pending AEN is inclusive of the new AEN
632 * request we currently have. If it is, then we don't have to do
633 * anything. In other words, whichever events the current AEN request
634 * is subscribing to, have already been subscribed to. If the old_cmd
635 * is _not_ inclusive, then we have to abort that command, form a
636 * class_locale that is superset of both old and current and re-issue
637 * to the FW
638 */
639
640 curr_aen.word = class_locale_word;
641
642 if (sc->aen_cmd) {
643
644 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
645
646 /*
647 * A class whose enum value is smaller is inclusive of all
648 * higher values. If a PROGRESS (= -1) was previously
649 * registered, then a new registration requests for higher
650 * classes need not be sent to FW. They are automatically
651 * included. Locale numbers don't have such hierarchy. They
652 * are bitmap values
653 */
654 if ((prev_aen.members.class <= curr_aen.members.class) &&
655 !((prev_aen.members.locale & curr_aen.members.locale) ^
656 curr_aen.members.locale)) {
657 /*
658 * Previously issued event registration includes
659 * current request. Nothing to do.
660 */
661 return 0;
662 } else {
663 curr_aen.members.locale |= prev_aen.members.locale;
664
665 if (prev_aen.members.class < curr_aen.members.class)
666 curr_aen.members.class = prev_aen.members.class;
667
668 sc->aen_cmd->abort_aen = 1;
669 ret_val = mrsas_issue_blocked_abort_cmd(sc,
670 sc->aen_cmd);
671
672 if (ret_val) {
673 printf("mrsas: Failed to abort "
674 "previous AEN command\n");
675 return ret_val;
676 }
677 }
678 }
679 cmd = mrsas_get_mfi_cmd(sc);
680
681 if (!cmd)
682 return -ENOMEM;
683
684 dcmd = &cmd->frame->dcmd;
685
686 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
687
688 /*
689 * Prepare DCMD for aen registration
690 */
691 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
692
693 dcmd->cmd = MFI_CMD_DCMD;
694 dcmd->cmd_status = 0x0;
695 dcmd->sge_count = 1;
696 dcmd->flags = MFI_FRAME_DIR_READ;
697 dcmd->timeout = 0;
698 dcmd->pad_0 = 0;
699 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
700 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
701 dcmd->mbox.w[0] = seq_num;
702 sc->last_seq_num = seq_num;
703 dcmd->mbox.w[1] = curr_aen.word;
704 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr;
705 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
706
707 if (sc->aen_cmd != NULL) {
708 mrsas_release_mfi_cmd(cmd);
709 return 0;
710 }
711 /*
712 * Store reference to the cmd used to register for AEN. When an
713 * application wants us to register for AEN, we have to abort this
714 * cmd and re-register with a new EVENT LOCALE supplied by that app
715 */
716 sc->aen_cmd = cmd;
717
718 /*
719 * Issue the aen registration frame
720 */
721 if (mrsas_issue_dcmd(sc, cmd)) {
722 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
723 return (1);
724 }
725 return 0;
726}
727
728/*
729 * mrsas_start_aen: Subscribes to AEN during driver load time
730 * @instance: Adapter soft state
731 */
732static int
733mrsas_start_aen(struct mrsas_softc *sc)
734{
735 struct mrsas_evt_log_info eli;
736 union mrsas_evt_class_locale class_locale;
737
738
739 /* Get the latest sequence number from FW */
740
741 memset(&eli, 0, sizeof(eli));
742
743 if (mrsas_get_seq_num(sc, &eli))
744 return -1;
745
746 /* Register AEN with FW for latest sequence number plus 1 */
747 class_locale.members.reserved = 0;
748 class_locale.members.locale = MR_EVT_LOCALE_ALL;
749 class_locale.members.class = MR_EVT_CLASS_DEBUG;
750
751 return mrsas_register_aen(sc, eli.newest_seq_num + 1,
752 class_locale.word);
753
754}
755
756/*
757 * mrsas_setup_msix: Allocate MSI-x vectors
758 * @sc: adapter soft state
759 */
760static int
761mrsas_setup_msix(struct mrsas_softc *sc)
762{
763 int i;
764
765 for (i = 0; i < sc->msix_vectors; i++) {
766 sc->irq_context[i].sc = sc;
767 sc->irq_context[i].MSIxIndex = i;
768 sc->irq_id[i] = i + 1;
769 sc->mrsas_irq[i] = bus_alloc_resource_any
770 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
771 ,RF_ACTIVE);
772 if (sc->mrsas_irq[i] == NULL) {
773 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
774 goto irq_alloc_failed;
775 }
776 if (bus_setup_intr(sc->mrsas_dev,
777 sc->mrsas_irq[i],
778 INTR_MPSAFE | INTR_TYPE_CAM,
779 NULL, mrsas_isr, &sc->irq_context[i],
780 &sc->intr_handle[i])) {
781 device_printf(sc->mrsas_dev,
782 "Cannot set up MSI-x interrupt handler\n");
783 goto irq_alloc_failed;
784 }
785 }
786 return SUCCESS;
787
788irq_alloc_failed:
789 mrsas_teardown_intr(sc);
790 return (FAIL);
791}
792
793/*
794 * mrsas_allocate_msix: Setup MSI-x vectors
795 * @sc: adapter soft state
796 */
797static int
798mrsas_allocate_msix(struct mrsas_softc *sc)
799{
800 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
801 device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
802 " of vectors\n", sc->msix_vectors);
803 } else {
804 device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
805 goto irq_alloc_failed;
806 }
807 return SUCCESS;
808
809irq_alloc_failed:
810 mrsas_teardown_intr(sc);
811 return (FAIL);
812}
813
814/*
815 * mrsas_attach: PCI entry point
816 * input: pointer to device struct
817 *
818 * Performs setup of PCI and registers, initializes mutexes and linked lists,
819 * registers interrupts and CAM, and initializes the adapter/controller to
820 * its proper state.
821 */
822static int
823mrsas_attach(device_t dev)
824{
825 struct mrsas_softc *sc = device_get_softc(dev);
826 uint32_t cmd, bar, error;
827
828 /* Look up our softc and initialize its fields. */
829 sc->mrsas_dev = dev;
830 sc->device_id = pci_get_device(dev);
831
832 mrsas_get_tunables(sc);
833
834 /*
835 * Set up PCI and registers
836 */
837 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
838 if ((cmd & PCIM_CMD_PORTEN) == 0) {
839 return (ENXIO);
840 }
841 /* Force the busmaster enable bit on. */
842 cmd |= PCIM_CMD_BUSMASTEREN;
843 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
844
845 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
846
847 sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */
848 if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
849 &(sc->reg_res_id), RF_ACTIVE))
850 == NULL) {
851 device_printf(dev, "Cannot allocate PCI registers\n");
852 goto attach_fail;
853 }
854 sc->bus_tag = rman_get_bustag(sc->reg_res);
855 sc->bus_handle = rman_get_bushandle(sc->reg_res);
856
857 /* Intialize mutexes */
858 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
859 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
860 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
861 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
862 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
863 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
864 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
865 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
866
867 /* Intialize linked list */
868 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
869 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
870
871 mrsas_atomic_set(&sc->fw_outstanding, 0);
872
873 sc->io_cmds_highwater = 0;
874
875 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
876 sc->UnevenSpanSupport = 0;
877
878 sc->msix_enable = 0;
879
880 /* Initialize Firmware */
881 if (mrsas_init_fw(sc) != SUCCESS) {
882 goto attach_fail_fw;
883 }
884 /* Register mrsas to CAM layer */
885 if ((mrsas_cam_attach(sc) != SUCCESS)) {
886 goto attach_fail_cam;
887 }
888 /* Register IRQs */
889 if (mrsas_setup_irq(sc) != SUCCESS) {
890 goto attach_fail_irq;
891 }
892 error = mrsas_kproc_create(mrsas_ocr_thread, sc,
893 &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
894 device_get_unit(sc->mrsas_dev));
895 if (error) {
896 device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
897 goto attach_fail_ocr_thread;
898 }
899 /*
900 * After FW initialization and OCR thread creation
901 * we will defer the cdev creation, AEN setup on ICH callback
902 */
903 sc->mrsas_ich.ich_func = mrsas_ich_startup;
904 sc->mrsas_ich.ich_arg = sc;
905 if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
906 device_printf(sc->mrsas_dev, "Config hook is already established\n");
907 }
908 mrsas_setup_sysctl(sc);
909 return SUCCESS;
910
911attach_fail_ocr_thread:
912 if (sc->ocr_thread_active)
913 wakeup(&sc->ocr_chan);
914attach_fail_irq:
915 mrsas_teardown_intr(sc);
916attach_fail_cam:
917 mrsas_cam_detach(sc);
918attach_fail_fw:
919 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */
920 if (sc->msix_enable == 1)
921 pci_release_msi(sc->mrsas_dev);
922 mrsas_free_mem(sc);
923 mtx_destroy(&sc->sim_lock);
924 mtx_destroy(&sc->aen_lock);
925 mtx_destroy(&sc->pci_lock);
926 mtx_destroy(&sc->io_lock);
927 mtx_destroy(&sc->ioctl_lock);
928 mtx_destroy(&sc->mpt_cmd_pool_lock);
929 mtx_destroy(&sc->mfi_cmd_pool_lock);
930 mtx_destroy(&sc->raidmap_lock);
931attach_fail:
932 if (sc->reg_res) {
933 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
934 sc->reg_res_id, sc->reg_res);
935 }
936 return (ENXIO);
937}
938
939/*
940 * Interrupt config hook
941 */
942static void
943mrsas_ich_startup(void *arg)
944{
945 struct mrsas_softc *sc = (struct mrsas_softc *)arg;
946
947 /*
948 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
949 */
950 sema_init(&sc->ioctl_count_sema,
951 MRSAS_MAX_MFI_CMDS - 5,
952 IOCTL_SEMA_DESCRIPTION);
953
954 /* Create a /dev entry for mrsas controller. */
955 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
956 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
957 device_get_unit(sc->mrsas_dev));
958
959 if (device_get_unit(sc->mrsas_dev) == 0) {
960 make_dev_alias_p(MAKEDEV_CHECKNAME,
961 &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
962 "megaraid_sas_ioctl_node");
963 }
964 if (sc->mrsas_cdev)
965 sc->mrsas_cdev->si_drv1 = sc;
966
967 /*
968 * Add this controller to mrsas_mgmt_info structure so that it can be
969 * exported to management applications
970 */
971 if (device_get_unit(sc->mrsas_dev) == 0)
972 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
973
974 mrsas_mgmt_info.count++;
975 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
976 mrsas_mgmt_info.max_index++;
977
978 /* Enable Interrupts */
979 mrsas_enable_intr(sc);
980
981 /* Initiate AEN (Asynchronous Event Notification) */
982 if (mrsas_start_aen(sc)) {
983 device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
984 "Further events from the controller will not be communicated.\n"
985 "Either there is some problem in the controller"
986 "or the controller does not support AEN.\n"
987 "Please contact to the SUPPORT TEAM if the problem persists\n");
988 }
989 if (sc->mrsas_ich.ich_arg != NULL) {
990 device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
991 config_intrhook_disestablish(&sc->mrsas_ich);
992 sc->mrsas_ich.ich_arg = NULL;
993 }
994}
995
996/*
997 * mrsas_detach: De-allocates and teardown resources
998 * input: pointer to device struct
999 *
1000 * This function is the entry point for device disconnect and detach.
1001 * It performs memory de-allocations, shutdown of the controller and various
1002 * teardown and destroy resource functions.
1003 */
1004static int
1005mrsas_detach(device_t dev)
1006{
1007 struct mrsas_softc *sc;
1008 int i = 0;
1009
1010 sc = device_get_softc(dev);
1011 sc->remove_in_progress = 1;
1012
1013 /* Destroy the character device so no other IOCTL will be handled */
1014 if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1015 destroy_dev(sc->mrsas_linux_emulator_cdev);
1016 destroy_dev(sc->mrsas_cdev);
1017
1018 /*
1019 * Take the instance off the instance array. Note that we will not
1020 * decrement the max_index. We let this array be sparse array
1021 */
1022 for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1023 if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1024 mrsas_mgmt_info.count--;
1025 mrsas_mgmt_info.sc_ptr[i] = NULL;
1026 break;
1027 }
1028 }
1029
1030 if (sc->ocr_thread_active)
1031 wakeup(&sc->ocr_chan);
1032 while (sc->reset_in_progress) {
1033 i++;
1034 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1035 mrsas_dprint(sc, MRSAS_INFO,
1036 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1037 }
1038 pause("mr_shutdown", hz);
1039 }
1040 i = 0;
1041 while (sc->ocr_thread_active) {
1042 i++;
1043 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1044 mrsas_dprint(sc, MRSAS_INFO,
1045 "[%2d]waiting for "
1046 "mrsas_ocr thread to quit ocr %d\n", i,
1047 sc->ocr_thread_active);
1048 }
1049 pause("mr_shutdown", hz);
1050 }
1051 mrsas_flush_cache(sc);
1052 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1053 mrsas_disable_intr(sc);
1054 mrsas_cam_detach(sc);
1055 mrsas_teardown_intr(sc);
1056 mrsas_free_mem(sc);
1057 mtx_destroy(&sc->sim_lock);
1058 mtx_destroy(&sc->aen_lock);
1059 mtx_destroy(&sc->pci_lock);
1060 mtx_destroy(&sc->io_lock);
1061 mtx_destroy(&sc->ioctl_lock);
1062 mtx_destroy(&sc->mpt_cmd_pool_lock);
1063 mtx_destroy(&sc->mfi_cmd_pool_lock);
1064 mtx_destroy(&sc->raidmap_lock);
1065
1066 /* Wait for all the semaphores to be released */
1067 while (sema_value(&sc->ioctl_count_sema) != (MRSAS_MAX_MFI_CMDS - 5))
1068 pause("mr_shutdown", hz);
1069
1070 /* Destroy the counting semaphore created for Ioctl */
1071 sema_destroy(&sc->ioctl_count_sema);
1072
1073 if (sc->reg_res) {
1074 bus_release_resource(sc->mrsas_dev,
1075 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1076 }
1077 if (sc->sysctl_tree != NULL)
1078 sysctl_ctx_free(&sc->sysctl_ctx);
1079
1080 return (0);
1081}
1082
1083/*
1084 * mrsas_free_mem: Frees allocated memory
1085 * input: Adapter instance soft state
1086 *
1087 * This function is called from mrsas_detach() to free previously allocated
1088 * memory.
1089 */
1090void
1091mrsas_free_mem(struct mrsas_softc *sc)
1092{
1093 int i;
1094 u_int32_t max_cmd;
1095 struct mrsas_mfi_cmd *mfi_cmd;
1096 struct mrsas_mpt_cmd *mpt_cmd;
1097
1098 /*
1099 * Free RAID map memory
1100 */
1101 for (i = 0; i < 2; i++) {
1102 if (sc->raidmap_phys_addr[i])
1103 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1104 if (sc->raidmap_mem[i] != NULL)
1105 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1106 if (sc->raidmap_tag[i] != NULL)
1107 bus_dma_tag_destroy(sc->raidmap_tag[i]);
1108
1109 if (sc->ld_drv_map[i] != NULL)
1110 free(sc->ld_drv_map[i], M_MRSAS);
1111 }
1112 for (i = 0; i < 2; i++) {
1113 if (sc->jbodmap_phys_addr[i])
1114 bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1115 if (sc->jbodmap_mem[i] != NULL)
1116 bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1117 if (sc->jbodmap_tag[i] != NULL)
1118 bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1119 }
1120 /*
1121 * Free version buffer memory
1122 */
1123 if (sc->verbuf_phys_addr)
1124 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1125 if (sc->verbuf_mem != NULL)
1126 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1127 if (sc->verbuf_tag != NULL)
1128 bus_dma_tag_destroy(sc->verbuf_tag);
1129
1130
1131 /*
1132 * Free sense buffer memory
1133 */
1134 if (sc->sense_phys_addr)
1135 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1136 if (sc->sense_mem != NULL)
1137 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1138 if (sc->sense_tag != NULL)
1139 bus_dma_tag_destroy(sc->sense_tag);
1140
1141 /*
1142 * Free chain frame memory
1143 */
1144 if (sc->chain_frame_phys_addr)
1145 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1146 if (sc->chain_frame_mem != NULL)
1147 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1148 if (sc->chain_frame_tag != NULL)
1149 bus_dma_tag_destroy(sc->chain_frame_tag);
1150
1151 /*
1152 * Free IO Request memory
1153 */
1154 if (sc->io_request_phys_addr)
1155 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1156 if (sc->io_request_mem != NULL)
1157 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1158 if (sc->io_request_tag != NULL)
1159 bus_dma_tag_destroy(sc->io_request_tag);
1160
1161 /*
1162 * Free Reply Descriptor memory
1163 */
1164 if (sc->reply_desc_phys_addr)
1165 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1166 if (sc->reply_desc_mem != NULL)
1167 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1168 if (sc->reply_desc_tag != NULL)
1169 bus_dma_tag_destroy(sc->reply_desc_tag);
1170
1171 /*
1172 * Free event detail memory
1173 */
1174 if (sc->evt_detail_phys_addr)
1175 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1176 if (sc->evt_detail_mem != NULL)
1177 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1178 if (sc->evt_detail_tag != NULL)
1179 bus_dma_tag_destroy(sc->evt_detail_tag);
1180
1181 /*
1182 * Free MFI frames
1183 */
1184 if (sc->mfi_cmd_list) {
1185 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1186 mfi_cmd = sc->mfi_cmd_list[i];
1187 mrsas_free_frame(sc, mfi_cmd);
1188 }
1189 }
1190 if (sc->mficmd_frame_tag != NULL)
1191 bus_dma_tag_destroy(sc->mficmd_frame_tag);
1192
1193 /*
1194 * Free MPT internal command list
1195 */
1196 max_cmd = sc->max_fw_cmds;
1197 if (sc->mpt_cmd_list) {
1198 for (i = 0; i < max_cmd; i++) {
1199 mpt_cmd = sc->mpt_cmd_list[i];
1200 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1201 free(sc->mpt_cmd_list[i], M_MRSAS);
1202 }
1203 free(sc->mpt_cmd_list, M_MRSAS);
1204 sc->mpt_cmd_list = NULL;
1205 }
1206 /*
1207 * Free MFI internal command list
1208 */
1209
1210 if (sc->mfi_cmd_list) {
1211 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1212 free(sc->mfi_cmd_list[i], M_MRSAS);
1213 }
1214 free(sc->mfi_cmd_list, M_MRSAS);
1215 sc->mfi_cmd_list = NULL;
1216 }
1217 /*
1218 * Free request descriptor memory
1219 */
1220 free(sc->req_desc, M_MRSAS);
1221 sc->req_desc = NULL;
1222
1223 /*
1224 * Destroy parent tag
1225 */
1226 if (sc->mrsas_parent_tag != NULL)
1227 bus_dma_tag_destroy(sc->mrsas_parent_tag);
1228
1229 /*
1230 * Free ctrl_info memory
1231 */
1232 if (sc->ctrl_info != NULL)
1233 free(sc->ctrl_info, M_MRSAS);
1234}
1235
1236/*
1237 * mrsas_teardown_intr: Teardown interrupt
1238 * input: Adapter instance soft state
1239 *
1240 * This function is called from mrsas_detach() to teardown and release bus
1241 * interrupt resourse.
1242 */
1243void
1244mrsas_teardown_intr(struct mrsas_softc *sc)
1245{
1246 int i;
1247
1248 if (!sc->msix_enable) {
1249 if (sc->intr_handle[0])
1250 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1251 if (sc->mrsas_irq[0] != NULL)
1252 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1253 sc->irq_id[0], sc->mrsas_irq[0]);
1254 sc->intr_handle[0] = NULL;
1255 } else {
1256 for (i = 0; i < sc->msix_vectors; i++) {
1257 if (sc->intr_handle[i])
1258 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1259 sc->intr_handle[i]);
1260
1261 if (sc->mrsas_irq[i] != NULL)
1262 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1263 sc->irq_id[i], sc->mrsas_irq[i]);
1264
1265 sc->intr_handle[i] = NULL;
1266 }
1267 pci_release_msi(sc->mrsas_dev);
1268 }
1269
1270}
1271
1272/*
1273 * mrsas_suspend: Suspend entry point
1274 * input: Device struct pointer
1275 *
1276 * This function is the entry point for system suspend from the OS.
1277 */
1278static int
1279mrsas_suspend(device_t dev)
1280{
1281 struct mrsas_softc *sc;
1282
1283 sc = device_get_softc(dev);
1284 return (0);
1285}
1286
1287/*
1288 * mrsas_resume: Resume entry point
1289 * input: Device struct pointer
1290 *
1291 * This function is the entry point for system resume from the OS.
1292 */
1293static int
1294mrsas_resume(device_t dev)
1295{
1296 struct mrsas_softc *sc;
1297
1298 sc = device_get_softc(dev);
1299 return (0);
1300}
1301
1302/**
1303 * mrsas_get_softc_instance: Find softc instance based on cmd type
1304 *
1305 * This function will return softc instance based on cmd type.
1306 * In some case, application fire ioctl on required management instance and
1307 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1308 * case, else get the softc instance from host_no provided by application in
1309 * user data.
1310 */
1311
1312static struct mrsas_softc *
1313mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1314{
1315 struct mrsas_softc *sc = NULL;
1316 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1317
1318 if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1319 sc = dev->si_drv1;
1320 } else {
1321 /*
1322 * get the Host number & the softc from data sent by the
1323 * Application
1324 */
1325 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1326 if (sc == NULL)
1327 printf("There is no Controller number %d\n",
1328 user_ioc->host_no);
1329 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1330 mrsas_dprint(sc, MRSAS_FAULT,
1331 "Invalid Controller number %d\n", user_ioc->host_no);
1332 }
1333
1334 return sc;
1335}
1336
1337/*
1338 * mrsas_ioctl: IOCtl commands entry point.
1339 *
1340 * This function is the entry point for IOCtls from the OS. It calls the
1341 * appropriate function for processing depending on the command received.
1342 */
1343static int
1344mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
1345 struct thread *td)
1346{
1347 struct mrsas_softc *sc;
1348 int ret = 0, i = 0;
1349 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1350
1351 sc = mrsas_get_softc_instance(dev, cmd, arg);
1352 if (!sc)
1353 return ENOENT;
1354
1355 if (sc->remove_in_progress) {
1356 mrsas_dprint(sc, MRSAS_INFO,
1357 "Driver remove or shutdown called.\n");
1358 return ENOENT;
1359 }
1360 mtx_lock_spin(&sc->ioctl_lock);
1361 if (!sc->reset_in_progress) {
1362 mtx_unlock_spin(&sc->ioctl_lock);
1363 goto do_ioctl;
1364 }
1365 mtx_unlock_spin(&sc->ioctl_lock);
1366 while (sc->reset_in_progress) {
1367 i++;
1368 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1369 mrsas_dprint(sc, MRSAS_INFO,
1370 "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1371 }
1372 pause("mr_ioctl", hz);
1373 }
1374
1375do_ioctl:
1376 switch (cmd) {
1377 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1378#ifdef COMPAT_FREEBSD32
1379 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1380#endif
1381 /*
1382 * Decrement the Ioctl counting Semaphore before getting an
1383 * mfi command
1384 */
1385 sema_wait(&sc->ioctl_count_sema);
1386
1387 ret = mrsas_passthru(sc, (void *)arg, cmd);
1388
1389 /* Increment the Ioctl counting semaphore value */
1390 sema_post(&sc->ioctl_count_sema);
1391
1392 break;
1393 case MRSAS_IOC_SCAN_BUS:
1394 ret = mrsas_bus_scan(sc);
1395 break;
1396
1397 case MRSAS_IOC_GET_PCI_INFO:
1398 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1399 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1400 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1401 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1402 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1403 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1404 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1405 "pci device no: %d, pci function no: %d,"
1406 "pci domain ID: %d\n",
1407 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1408 pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1409 ret = 0;
1410 break;
1411
1412 default:
1413 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1414 ret = ENOENT;
1415 }
1416
1417 return (ret);
1418}
1419
1420/*
1421 * mrsas_poll: poll entry point for mrsas driver fd
1422 *
1423 * This function is the entry point for poll from the OS. It waits for some AEN
1424 * events to be triggered from the controller and notifies back.
1425 */
1426static int
1427mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1428{
1429 struct mrsas_softc *sc;
1430 int revents = 0;
1431
1432 sc = dev->si_drv1;
1433
1434 if (poll_events & (POLLIN | POLLRDNORM)) {
1435 if (sc->mrsas_aen_triggered) {
1436 revents |= poll_events & (POLLIN | POLLRDNORM);
1437 }
1438 }
1439 if (revents == 0) {
1440 if (poll_events & (POLLIN | POLLRDNORM)) {
1441 mtx_lock(&sc->aen_lock);
1442 sc->mrsas_poll_waiting = 1;
1443 selrecord(td, &sc->mrsas_select);
1444 mtx_unlock(&sc->aen_lock);
1445 }
1446 }
1447 return revents;
1448}
1449
1450/*
1451 * mrsas_setup_irq: Set up interrupt
1452 * input: Adapter instance soft state
1453 *
1454 * This function sets up interrupts as a bus resource, with flags indicating
1455 * resource permitting contemporaneous sharing and for resource to activate
1456 * atomically.
1457 */
1458static int
1459mrsas_setup_irq(struct mrsas_softc *sc)
1460{
1461 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1462 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1463
1464 else {
1465 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1466 sc->irq_context[0].sc = sc;
1467 sc->irq_context[0].MSIxIndex = 0;
1468 sc->irq_id[0] = 0;
1469 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1470 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1471 if (sc->mrsas_irq[0] == NULL) {
1472 device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1473 "interrupt\n");
1474 return (FAIL);
1475 }
1476 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1477 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1478 &sc->irq_context[0], &sc->intr_handle[0])) {
1479 device_printf(sc->mrsas_dev, "Cannot set up legacy"
1480 "interrupt\n");
1481 return (FAIL);
1482 }
1483 }
1484 return (0);
1485}
1486
1487/*
1488 * mrsas_isr: ISR entry point
1489 * input: argument pointer
1490 *
1491 * This function is the interrupt service routine entry point. There are two
1492 * types of interrupts, state change interrupt and response interrupt. If an
1493 * interrupt is not ours, we just return.
1494 */
1495void
1496mrsas_isr(void *arg)
1497{
1498 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1499 struct mrsas_softc *sc = irq_context->sc;
1500 int status = 0;
1501
1502 if (sc->mask_interrupts)
1503 return;
1504
1505 if (!sc->msix_vectors) {
1506 status = mrsas_clear_intr(sc);
1507 if (!status)
1508 return;
1509 }
1510 /* If we are resetting, bail */
1511 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1512 printf(" Entered into ISR when OCR is going active. \n");
1513 mrsas_clear_intr(sc);
1514 return;
1515 }
1516 /* Process for reply request and clear response interrupt */
1517 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1518 mrsas_clear_intr(sc);
1519
1520 return;
1521}
1522
1523/*
1524 * mrsas_complete_cmd: Process reply request
1525 * input: Adapter instance soft state
1526 *
1527 * This function is called from mrsas_isr() to process reply request and clear
1528 * response interrupt. Processing of the reply request entails walking
1529 * through the reply descriptor array for the command request pended from
1530 * Firmware. We look at the Function field to determine the command type and
1531 * perform the appropriate action. Before we return, we clear the response
1532 * interrupt.
1533 */
1534static int
1535mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1536{
1537 Mpi2ReplyDescriptorsUnion_t *desc;
1538 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1539 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1540 struct mrsas_mpt_cmd *cmd_mpt;
1541 struct mrsas_mfi_cmd *cmd_mfi;
1542 u_int8_t reply_descript_type;
1543 u_int16_t smid, num_completed;
1544 u_int8_t status, extStatus;
1545 union desc_value desc_val;
1546 PLD_LOAD_BALANCE_INFO lbinfo;
1547 u_int32_t device_id;
1548 int threshold_reply_count = 0;
1549
1550
1551 /* If we have a hardware error, not need to continue */
1552 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1553 return (DONE);
1554
1555 desc = sc->reply_desc_mem;
1556 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1557 + sc->last_reply_idx[MSIxIndex];
1558
1559 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1560
1561 desc_val.word = desc->Words;
1562 num_completed = 0;
1563
1564 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1565
1566 /* Find our reply descriptor for the command and process */
1567 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1568 smid = reply_desc->SMID;
1569 cmd_mpt = sc->mpt_cmd_list[smid - 1];
1570 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1571
1572 status = scsi_io_req->RaidContext.status;
1573 extStatus = scsi_io_req->RaidContext.exStatus;
1574
1575 switch (scsi_io_req->Function) {
1576 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */
1577 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1578 lbinfo = &sc->load_balance_info[device_id];
1579 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1580 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1581 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1582 }
1583 /* Fall thru and complete IO */
1584 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1585 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1586 mrsas_cmd_done(sc, cmd_mpt);
1587 scsi_io_req->RaidContext.status = 0;
1588 scsi_io_req->RaidContext.exStatus = 0;
1589 mrsas_atomic_dec(&sc->fw_outstanding);
1590 break;
1591 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */
1592 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1593 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1594 cmd_mpt->flags = 0;
1595 mrsas_release_mpt_cmd(cmd_mpt);
1596 break;
1597 }
1598
1599 sc->last_reply_idx[MSIxIndex]++;
1600 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1601 sc->last_reply_idx[MSIxIndex] = 0;
1602
1603 desc->Words = ~((uint64_t)0x00); /* set it back to all
1604 * 0xFFFFFFFFs */
1605 num_completed++;
1606 threshold_reply_count++;
1607
1608 /* Get the next reply descriptor */
1609 if (!sc->last_reply_idx[MSIxIndex]) {
1610 desc = sc->reply_desc_mem;
1611 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1612 } else
1613 desc++;
1614
1615 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1616 desc_val.word = desc->Words;
1617
1618 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1619
1620 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1621 break;
1622
1623 /*
1624 * Write to reply post index after completing threshold reply
1625 * count and still there are more replies in reply queue
1626 * pending to be completed.
1627 */
1628 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1629 if (sc->msix_enable) {
1630 if ((sc->device_id == MRSAS_INVADER) ||
1631 (sc->device_id == MRSAS_FURY))
1632 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1633 ((MSIxIndex & 0x7) << 24) |
1634 sc->last_reply_idx[MSIxIndex]);
1635 else
1636 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1637 sc->last_reply_idx[MSIxIndex]);
1638 } else
1639 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1640 reply_post_host_index), sc->last_reply_idx[0]);
1641
1642 threshold_reply_count = 0;
1643 }
1644 }
1645
1646 /* No match, just return */
1647 if (num_completed == 0)
1648 return (DONE);
1649
1650 /* Clear response interrupt */
1651 if (sc->msix_enable) {
1652 if ((sc->device_id == MRSAS_INVADER) ||
1653 (sc->device_id == MRSAS_FURY)) {
1654 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1655 ((MSIxIndex & 0x7) << 24) |
1656 sc->last_reply_idx[MSIxIndex]);
1657 } else
1658 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1659 sc->last_reply_idx[MSIxIndex]);
1660 } else
1661 mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1662 reply_post_host_index), sc->last_reply_idx[0]);
1663
1664 return (0);
1665}
1666
1667/*
1668 * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
1669 * input: Adapter instance soft state
1670 *
1671 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1672 * It checks the command status and maps the appropriate CAM status for the
1673 * CCB.
1674 */
1675void
1676mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1677{
1678 struct mrsas_softc *sc = cmd->sc;
1679 u_int8_t *sense_data;
1680
1681 switch (status) {
1682 case MFI_STAT_OK:
1683 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1684 break;
1685 case MFI_STAT_SCSI_IO_FAILED:
1686 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1687 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1688 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1689 if (sense_data) {
1690 /* For now just copy 18 bytes back */
1691 memcpy(sense_data, cmd->sense, 18);
1692 cmd->ccb_ptr->csio.sense_len = 18;
1693 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1694 }
1695 break;
1696 case MFI_STAT_LD_OFFLINE:
1697 case MFI_STAT_DEVICE_NOT_FOUND:
1698 if (cmd->ccb_ptr->ccb_h.target_lun)
1699 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1700 else
1701 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1702 break;
1703 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1704 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1705 break;
1706 default:
1707 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1708 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1709 cmd->ccb_ptr->csio.scsi_status = status;
1710 }
1711 return;
1712}
1713
1714/*
1715 * mrsas_alloc_mem: Allocate DMAable memory
1716 * input: Adapter instance soft state
1717 *
1718 * This function creates the parent DMA tag and allocates DMAable memory. DMA
1719 * tag describes constraints of DMA mapping. Memory allocated is mapped into
1720 * Kernel virtual address. Callback argument is physical memory address.
1721 */
1722static int
1723mrsas_alloc_mem(struct mrsas_softc *sc)
1724{
1725 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1726 chain_frame_size, evt_detail_size, count;
1727
1728 /*
1729 * Allocate parent DMA tag
1730 */
1731 if (bus_dma_tag_create(NULL, /* parent */
1732 1, /* alignment */
1733 0, /* boundary */
1734 BUS_SPACE_MAXADDR, /* lowaddr */
1735 BUS_SPACE_MAXADDR, /* highaddr */
1736 NULL, NULL, /* filter, filterarg */
1737 MAXPHYS, /* maxsize */
1738 sc->max_num_sge, /* nsegments */
1739 MAXPHYS, /* maxsegsize */
1740 0, /* flags */
1741 NULL, NULL, /* lockfunc, lockarg */
1742 &sc->mrsas_parent_tag /* tag */
1743 )) {
1744 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1745 return (ENOMEM);
1746 }
1747 /*
1748 * Allocate for version buffer
1749 */
1750 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1751 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1752 1, 0,
1753 BUS_SPACE_MAXADDR_32BIT,
1754 BUS_SPACE_MAXADDR,
1755 NULL, NULL,
1756 verbuf_size,
1757 1,
1758 verbuf_size,
1759 BUS_DMA_ALLOCNOW,
1760 NULL, NULL,
1761 &sc->verbuf_tag)) {
1762 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1763 return (ENOMEM);
1764 }
1765 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1766 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1767 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1768 return (ENOMEM);
1769 }
1770 bzero(sc->verbuf_mem, verbuf_size);
1771 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1772 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1773 BUS_DMA_NOWAIT)) {
1774 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1775 return (ENOMEM);
1776 }
1777 /*
1778 * Allocate IO Request Frames
1779 */
1780 io_req_size = sc->io_frames_alloc_sz;
1781 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1782 16, 0,
1783 BUS_SPACE_MAXADDR_32BIT,
1784 BUS_SPACE_MAXADDR,
1785 NULL, NULL,
1786 io_req_size,
1787 1,
1788 io_req_size,
1789 BUS_DMA_ALLOCNOW,
1790 NULL, NULL,
1791 &sc->io_request_tag)) {
1792 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1793 return (ENOMEM);
1794 }
1795 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1796 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1797 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1798 return (ENOMEM);
1799 }
1800 bzero(sc->io_request_mem, io_req_size);
1801 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1802 sc->io_request_mem, io_req_size, mrsas_addr_cb,
1803 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1804 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1805 return (ENOMEM);
1806 }
1807 /*
1808 * Allocate Chain Frames
1809 */
1810 chain_frame_size = sc->chain_frames_alloc_sz;
1811 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1812 4, 0,
1813 BUS_SPACE_MAXADDR_32BIT,
1814 BUS_SPACE_MAXADDR,
1815 NULL, NULL,
1816 chain_frame_size,
1817 1,
1818 chain_frame_size,
1819 BUS_DMA_ALLOCNOW,
1820 NULL, NULL,
1821 &sc->chain_frame_tag)) {
1822 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1823 return (ENOMEM);
1824 }
1825 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1826 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1827 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1828 return (ENOMEM);
1829 }
1830 bzero(sc->chain_frame_mem, chain_frame_size);
1831 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1832 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1833 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1834 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1835 return (ENOMEM);
1836 }
1837 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
1838 /*
1839 * Allocate Reply Descriptor Array
1840 */
1841 reply_desc_size = sc->reply_alloc_sz * count;
1842 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1843 16, 0,
1844 BUS_SPACE_MAXADDR_32BIT,
1845 BUS_SPACE_MAXADDR,
1846 NULL, NULL,
1847 reply_desc_size,
1848 1,
1849 reply_desc_size,
1850 BUS_DMA_ALLOCNOW,
1851 NULL, NULL,
1852 &sc->reply_desc_tag)) {
1853 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1854 return (ENOMEM);
1855 }
1856 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1857 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1858 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1859 return (ENOMEM);
1860 }
1861 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1862 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1863 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1864 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1865 return (ENOMEM);
1866 }
1867 /*
1868 * Allocate Sense Buffer Array. Keep in lower 4GB
1869 */
1870 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1871 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1872 64, 0,
1873 BUS_SPACE_MAXADDR_32BIT,
1874 BUS_SPACE_MAXADDR,
1875 NULL, NULL,
1876 sense_size,
1877 1,
1878 sense_size,
1879 BUS_DMA_ALLOCNOW,
1880 NULL, NULL,
1881 &sc->sense_tag)) {
1882 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1883 return (ENOMEM);
1884 }
1885 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1886 BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1887 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1888 return (ENOMEM);
1889 }
1890 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1891 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1892 BUS_DMA_NOWAIT)) {
1893 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1894 return (ENOMEM);
1895 }
1896 /*
1897 * Allocate for Event detail structure
1898 */
1899 evt_detail_size = sizeof(struct mrsas_evt_detail);
1900 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1901 1, 0,
1902 BUS_SPACE_MAXADDR_32BIT,
1903 BUS_SPACE_MAXADDR,
1904 NULL, NULL,
1905 evt_detail_size,
1906 1,
1907 evt_detail_size,
1908 BUS_DMA_ALLOCNOW,
1909 NULL, NULL,
1910 &sc->evt_detail_tag)) {
1911 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1912 return (ENOMEM);
1913 }
1914 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1915 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1916 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1917 return (ENOMEM);
1918 }
1919 bzero(sc->evt_detail_mem, evt_detail_size);
1920 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1921 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1922 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1923 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1924 return (ENOMEM);
1925 }
1926 /*
1927 * Create a dma tag for data buffers; size will be the maximum
1928 * possible I/O size (280kB).
1929 */
1930 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1931 1,
1932 0,
1933 BUS_SPACE_MAXADDR,
1934 BUS_SPACE_MAXADDR,
1935 NULL, NULL,
1936 MAXPHYS,
1937 sc->max_num_sge, /* nsegments */
1938 MAXPHYS,
1939 BUS_DMA_ALLOCNOW,
1940 busdma_lock_mutex,
1941 &sc->io_lock,
1942 &sc->data_tag)) {
1943 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1944 return (ENOMEM);
1945 }
1946 return (0);
1947}
1948
1949/*
1950 * mrsas_addr_cb: Callback function of bus_dmamap_load()
1951 * input: callback argument, machine dependent type
1952 * that describes DMA segments, number of segments, error code
1953 *
1954 * This function is for the driver to receive mapping information resultant of
1955 * the bus_dmamap_load(). The information is actually not being used, but the
1956 * address is saved anyway.
1957 */
1958void
1959mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1960{
1961 bus_addr_t *addr;
1962
1963 addr = arg;
1964 *addr = segs[0].ds_addr;
1965}
1966
1967/*
1968 * mrsas_setup_raidmap: Set up RAID map.
1969 * input: Adapter instance soft state
1970 *
1971 * Allocate DMA memory for the RAID maps and perform setup.
1972 */
1973static int
1974mrsas_setup_raidmap(struct mrsas_softc *sc)
1975{
1976 int i;
1977
1978 for (i = 0; i < 2; i++) {
1979 sc->ld_drv_map[i] =
1980 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
1981 /* Do Error handling */
1982 if (!sc->ld_drv_map[i]) {
1983 device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
1984
1985 if (i == 1)
1986 free(sc->ld_drv_map[0], M_MRSAS);
1987 /* ABORT driver initialization */
1988 goto ABORT;
1989 }
1990 }
1991
1992 for (int i = 0; i < 2; i++) {
1993 if (bus_dma_tag_create(sc->mrsas_parent_tag,
1994 4, 0,
1995 BUS_SPACE_MAXADDR_32BIT,
1996 BUS_SPACE_MAXADDR,
1997 NULL, NULL,
1998 sc->max_map_sz,
1999 1,
2000 sc->max_map_sz,
2001 BUS_DMA_ALLOCNOW,
2002 NULL, NULL,
2003 &sc->raidmap_tag[i])) {
2004 device_printf(sc->mrsas_dev,
2005 "Cannot allocate raid map tag.\n");
2006 return (ENOMEM);
2007 }
2008 if (bus_dmamem_alloc(sc->raidmap_tag[i],
2009 (void **)&sc->raidmap_mem[i],
2010 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2011 device_printf(sc->mrsas_dev,
2012 "Cannot allocate raidmap memory.\n");
2013 return (ENOMEM);
2014 }
2015 bzero(sc->raidmap_mem[i], sc->max_map_sz);
2016
2017 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2018 sc->raidmap_mem[i], sc->max_map_sz,
2019 mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2020 BUS_DMA_NOWAIT)) {
2021 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2022 return (ENOMEM);
2023 }
2024 if (!sc->raidmap_mem[i]) {
2025 device_printf(sc->mrsas_dev,
2026 "Cannot allocate memory for raid map.\n");
2027 return (ENOMEM);
2028 }
2029 }
2030
2031 if (!mrsas_get_map_info(sc))
2032 mrsas_sync_map_info(sc);
2033
2034 return (0);
2035
2036ABORT:
2037 return (1);
2038}
2039
2040/**
2041 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
2042 * @sc: Adapter soft state
2043 *
2044 * Return 0 on success.
2045 */
2046void
2047megasas_setup_jbod_map(struct mrsas_softc *sc)
2048{
2049 int i;
2050 uint32_t pd_seq_map_sz;
2051
2052 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2053 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2054
2055 if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2056 sc->use_seqnum_jbod_fp = 0;
2057 return;
2058 }
2059 if (sc->jbodmap_mem[0])
2060 goto skip_alloc;
2061
2062 for (i = 0; i < 2; i++) {
2063 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2064 4, 0,
2065 BUS_SPACE_MAXADDR_32BIT,
2066 BUS_SPACE_MAXADDR,
2067 NULL, NULL,
2068 pd_seq_map_sz,
2069 1,
2070 pd_seq_map_sz,
2071 BUS_DMA_ALLOCNOW,
2072 NULL, NULL,
2073 &sc->jbodmap_tag[i])) {
2074 device_printf(sc->mrsas_dev,
2075 "Cannot allocate jbod map tag.\n");
2076 return;
2077 }
2078 if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2079 (void **)&sc->jbodmap_mem[i],
2080 BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2081 device_printf(sc->mrsas_dev,
2082 "Cannot allocate jbod map memory.\n");
2083 return;
2084 }
2085 bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2086
2087 if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2088 sc->jbodmap_mem[i], pd_seq_map_sz,
2089 mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2090 BUS_DMA_NOWAIT)) {
2091 device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2092 return;
2093 }
2094 if (!sc->jbodmap_mem[i]) {
2095 device_printf(sc->mrsas_dev,
2096 "Cannot allocate memory for jbod map.\n");
2097 sc->use_seqnum_jbod_fp = 0;
2098 return;
2099 }
2100 }
2101
2102skip_alloc:
2103 if (!megasas_sync_pd_seq_num(sc, false) &&
2104 !megasas_sync_pd_seq_num(sc, true))
2105 sc->use_seqnum_jbod_fp = 1;
2106 else
2107 sc->use_seqnum_jbod_fp = 0;
2108
2109 device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2110}
2111
2112/*
2113 * mrsas_init_fw: Initialize Firmware
2114 * input: Adapter soft state
2115 *
2116 * Calls transition_to_ready() to make sure Firmware is in operational state and
2117 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It
2118 * issues internal commands to get the controller info after the IOC_INIT
2119 * command response is received by Firmware. Note: code relating to
2120 * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2121 * is left here as placeholder.
2122 */
2123static int
2124mrsas_init_fw(struct mrsas_softc *sc)
2125{
2126
2127 int ret, loop, ocr = 0;
2128 u_int32_t max_sectors_1;
2129 u_int32_t max_sectors_2;
2130 u_int32_t tmp_sectors;
2131 u_int32_t scratch_pad_2;
2132 int msix_enable = 0;
2133 int fw_msix_count = 0;
2134
2135 /* Make sure Firmware is ready */
2136 ret = mrsas_transition_to_ready(sc, ocr);
2137 if (ret != SUCCESS) {
2138 return (ret);
2139 }
2140 /* MSI-x index 0- reply post host index register */
2141 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2142 /* Check if MSI-X is supported while in ready state */
2143 msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2144
2145 if (msix_enable) {
2146 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2147 outbound_scratch_pad_2));
2148
2149 /* Check max MSI-X vectors */
2150 if (sc->device_id == MRSAS_TBOLT) {
2151 sc->msix_vectors = (scratch_pad_2
2152 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2153 fw_msix_count = sc->msix_vectors;
2154 } else {
2155 /* Invader/Fury supports 96 MSI-X vectors */
2156 sc->msix_vectors = ((scratch_pad_2
2157 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2158 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2159 fw_msix_count = sc->msix_vectors;
2160
2161 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2162 loop++) {
2163 sc->msix_reg_offset[loop] =
2164 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2165 (loop * 0x10);
2166 }
2167 }
2168
2169 /* Don't bother allocating more MSI-X vectors than cpus */
2170 sc->msix_vectors = min(sc->msix_vectors,
2171 mp_ncpus);
2172
2173 /* Allocate MSI-x vectors */
2174 if (mrsas_allocate_msix(sc) == SUCCESS)
2175 sc->msix_enable = 1;
2176 else
2177 sc->msix_enable = 0;
2178
2179 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2180 "Online CPU %d Current MSIX <%d>\n",
2181 fw_msix_count, mp_ncpus, sc->msix_vectors);
2182 }
2183 if (mrsas_init_adapter(sc) != SUCCESS) {
2184 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2185 return (1);
2186 }
2187 /* Allocate internal commands for pass-thru */
2188 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2189 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2190 return (1);
2191 }
2192 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2193 if (!sc->ctrl_info) {
2194 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2195 return (1);
2196 }
2197 /*
2198 * Get the controller info from FW, so that the MAX VD support
2199 * availability can be decided.
2200 */
2201 if (mrsas_get_ctrl_info(sc)) {
2202 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2203 return (1);
2204 }
2205 sc->secure_jbod_support =
2206 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2207
2208 if (sc->secure_jbod_support)
2209 device_printf(sc->mrsas_dev, "FW supports SED \n");
2210
2211 if (sc->use_seqnum_jbod_fp)
2212 device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2213
2214 if (mrsas_setup_raidmap(sc) != SUCCESS) {
2215 device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2216 "There seems to be some problem in the controller\n"
2217 "Please contact to the SUPPORT TEAM if the problem persists\n");
2218 }
2219 megasas_setup_jbod_map(sc);
2220
2221 /* For pass-thru, get PD/LD list and controller info */
2222 memset(sc->pd_list, 0,
2223 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2224 if (mrsas_get_pd_list(sc) != SUCCESS) {
2225 device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2226 return (1);
2227 }
2228 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2229 if (mrsas_get_ld_list(sc) != SUCCESS) {
2230 device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2231 return (1);
2232 }
2233 /*
2234 * Compute the max allowed sectors per IO: The controller info has
2235 * two limits on max sectors. Driver should use the minimum of these
2236 * two.
2237 *
2238 * 1 << stripe_sz_ops.min = max sectors per strip
2239 *
2240 * Note that older firmwares ( < FW ver 30) didn't report information to
2241 * calculate max_sectors_1. So the number ended up as zero always.
2242 */
2243 tmp_sectors = 0;
2244 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2245 sc->ctrl_info->max_strips_per_io;
2246 max_sectors_2 = sc->ctrl_info->max_request_size;
2247 tmp_sectors = min(max_sectors_1, max_sectors_2);
2248 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
2249
2250 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2251 sc->max_sectors_per_req = tmp_sectors;
2252
2253 sc->disableOnlineCtrlReset =
2254 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2255 sc->UnevenSpanSupport =
2256 sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2257 if (sc->UnevenSpanSupport) {
2258 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2259 sc->UnevenSpanSupport);
2260
2261 if (MR_ValidateMapInfo(sc))
2262 sc->fast_path_io = 1;
2263 else
2264 sc->fast_path_io = 0;
2265 }
2266 return (0);
2267}
2268
2269/*
2270 * mrsas_init_adapter: Initializes the adapter/controller
2271 * input: Adapter soft state
2272 *
2273 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2274 * ROC/controller. The FW register is read to determined the number of
2275 * commands that is supported. All memory allocations for IO is based on
2276 * max_cmd. Appropriate calculations are performed in this function.
2277 */
2278int
2279mrsas_init_adapter(struct mrsas_softc *sc)
2280{
2281 uint32_t status;
2282 u_int32_t max_cmd, scratch_pad_2;
2283 int ret;
2284 int i = 0;
2285
2286 /* Read FW status register */
2287 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2288
2289 /* Get operational params from status register */
2290 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2291
2292 /* Decrement the max supported by 1, to correlate with FW */
2293 sc->max_fw_cmds = sc->max_fw_cmds - 1;
2294 max_cmd = sc->max_fw_cmds;
2295
2296 /* Determine allocation size of command frames */
2297 sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2;
2298 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
2299 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2300 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
2301 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2302 outbound_scratch_pad_2));
2303 /*
2304 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2305 * Firmware support extended IO chain frame which is 4 time more
2306 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2307 * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
2308 */
2309 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2310 sc->max_chain_frame_sz =
2311 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2312 * MEGASAS_1MB_IO;
2313 else
2314 sc->max_chain_frame_sz =
2315 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2316 * MEGASAS_256K_IO;
2317
2318 sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * max_cmd;
2319 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2320 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2321
2322 sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2323 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2324
2325 mrsas_dprint(sc, MRSAS_INFO, "Avago Debug: MAX sge 0x%X MAX chain frame size 0x%X \n",
2326 sc->max_num_sge, sc->max_chain_frame_sz);
2327
2328 /* Used for pass thru MFI frame (DCMD) */
2329 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2330
2331 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2332 sizeof(MPI2_SGE_IO_UNION)) / 16;
2333
2334 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2335
2336 for (i = 0; i < count; i++)
2337 sc->last_reply_idx[i] = 0;
2338
2339 ret = mrsas_alloc_mem(sc);
2340 if (ret != SUCCESS)
2341 return (ret);
2342
2343 ret = mrsas_alloc_mpt_cmds(sc);
2344 if (ret != SUCCESS)
2345 return (ret);
2346
2347 ret = mrsas_ioc_init(sc);
2348 if (ret != SUCCESS)
2349 return (ret);
2350
2351 return (0);
2352}
2353
2354/*
2355 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
2356 * input: Adapter soft state
2357 *
2358 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2359 */
2360int
2361mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2362{
2363 int ioc_init_size;
2364
2365 /* Allocate IOC INIT command */
2366 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2367 if (bus_dma_tag_create(sc->mrsas_parent_tag,
2368 1, 0,
2369 BUS_SPACE_MAXADDR_32BIT,
2370 BUS_SPACE_MAXADDR,
2371 NULL, NULL,
2372 ioc_init_size,
2373 1,
2374 ioc_init_size,
2375 BUS_DMA_ALLOCNOW,
2376 NULL, NULL,
2377 &sc->ioc_init_tag)) {
2378 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2379 return (ENOMEM);
2380 }
2381 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2382 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2383 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2384 return (ENOMEM);
2385 }
2386 bzero(sc->ioc_init_mem, ioc_init_size);
2387 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2388 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2389 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2390 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2391 return (ENOMEM);
2392 }
2393 return (0);
2394}
2395
2396/*
2397 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
2398 * input: Adapter soft state
2399 *
2400 * Deallocates memory of the IOC Init cmd.
2401 */
2402void
2403mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2404{
2405 if (sc->ioc_init_phys_mem)
2406 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2407 if (sc->ioc_init_mem != NULL)
2408 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2409 if (sc->ioc_init_tag != NULL)
2410 bus_dma_tag_destroy(sc->ioc_init_tag);
2411}
2412
2413/*
2414 * mrsas_ioc_init: Sends IOC Init command to FW
2415 * input: Adapter soft state
2416 *
2417 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2418 */
2419int
2420mrsas_ioc_init(struct mrsas_softc *sc)
2421{
2422 struct mrsas_init_frame *init_frame;
2423 pMpi2IOCInitRequest_t IOCInitMsg;
2424 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2425 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
2426 bus_addr_t phys_addr;
2427 int i, retcode = 0;
2428
2429 /* Allocate memory for the IOC INIT command */
2430 if (mrsas_alloc_ioc_cmd(sc)) {
2431 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2432 return (1);
2433 }
2434 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2435 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2436 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2437 IOCInitMsg->MsgVersion = MPI2_VERSION;
2438 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
2439 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
2440 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
2441 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
2442 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
2443 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2444
2445 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2446 init_frame->cmd = MFI_CMD_INIT;
2447 init_frame->cmd_status = 0xFF;
2448 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2449
2450 /* driver support Extended MSIX */
2451 if ((sc->device_id == MRSAS_INVADER) ||
2452 (sc->device_id == MRSAS_FURY)) {
2453 init_frame->driver_operations.
2454 mfi_capabilities.support_additional_msix = 1;
2455 }
2456 if (sc->verbuf_mem) {
2457 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2458 MRSAS_VERSION);
2459 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2460 init_frame->driver_ver_hi = 0;
2461 }
2462 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2463 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2464 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2465 if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2466 init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2467 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2468 init_frame->queue_info_new_phys_addr_lo = phys_addr;
2469 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
2470
2471 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
2472 req_desc.MFAIo.RequestFlags =
2473 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2474
2475 mrsas_disable_intr(sc);
2476 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2477 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2478
2479 /*
2480 * Poll response timer to wait for Firmware response. While this
2481 * timer with the DELAY call could block CPU, the time interval for
2482 * this is only 1 millisecond.
2483 */
2484 if (init_frame->cmd_status == 0xFF) {
2485 for (i = 0; i < (max_wait * 1000); i++) {
2486 if (init_frame->cmd_status == 0xFF)
2487 DELAY(1000);
2488 else
2489 break;
2490 }
2491 }
2492 if (init_frame->cmd_status == 0)
2493 mrsas_dprint(sc, MRSAS_OCR,
2494 "IOC INIT response received from FW.\n");
2495 else {
2496 if (init_frame->cmd_status == 0xFF)
2497 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2498 else
2499 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2500 retcode = 1;
2501 }
2502
2503 mrsas_free_ioc_cmd(sc);
2504 return (retcode);
2505}
2506
2507/*
2508 * mrsas_alloc_mpt_cmds: Allocates the command packets
2509 * input: Adapter instance soft state
2510 *
2511 * This function allocates the internal commands for IOs. Each command that is
2512 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2513 * array is allocated with mrsas_mpt_cmd context. The free commands are
2514 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2515 * max_fw_cmds.
2516 */
2517int
2518mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2519{
2520 int i, j;
2521 u_int32_t max_cmd, count;
2522 struct mrsas_mpt_cmd *cmd;
2523 pMpi2ReplyDescriptorsUnion_t reply_desc;
2524 u_int32_t offset, chain_offset, sense_offset;
2525 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2526 u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2527
2528 max_cmd = sc->max_fw_cmds;
2529
2530 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2531 if (!sc->req_desc) {
2532 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2533 return (ENOMEM);
2534 }
2535 memset(sc->req_desc, 0, sc->request_alloc_sz);
2536
2537 /*
2538 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2539 * Allocate the dynamic array first and then allocate individual
2540 * commands.
2541 */
2542 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT);
2543 if (!sc->mpt_cmd_list) {
2544 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2545 return (ENOMEM);
2546 }
2547 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd);
2548 for (i = 0; i < max_cmd; i++) {
2549 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2550 M_MRSAS, M_NOWAIT);
2551 if (!sc->mpt_cmd_list[i]) {
2552 for (j = 0; j < i; j++)
2553 free(sc->mpt_cmd_list[j], M_MRSAS);
2554 free(sc->mpt_cmd_list, M_MRSAS);
2555 sc->mpt_cmd_list = NULL;
2556 return (ENOMEM);
2557 }
2558 }
2559
2560 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2561 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2562 chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2563 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2564 sense_base = (u_int8_t *)sc->sense_mem;
2565 sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2566 for (i = 0; i < max_cmd; i++) {
2567 cmd = sc->mpt_cmd_list[i];
2568 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2569 chain_offset = sc->max_chain_frame_sz * i;
2570 sense_offset = MRSAS_SENSE_LEN * i;
2571 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2572 cmd->index = i + 1;
2573 cmd->ccb_ptr = NULL;
2574 callout_init(&cmd->cm_callout, 0);
2575 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2576 cmd->sc = sc;
2577 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2578 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2579 cmd->io_request_phys_addr = io_req_base_phys + offset;
2580 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2581 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2582 cmd->sense = sense_base + sense_offset;
2583 cmd->sense_phys_addr = sense_base_phys + sense_offset;
2584 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2585 return (FAIL);
2586 }
2587 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2588 }
2589
2590 /* Initialize reply descriptor array to 0xFFFFFFFF */
2591 reply_desc = sc->reply_desc_mem;
2592 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2593 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2594 reply_desc->Words = MRSAS_ULONG_MAX;
2595 }
2596 return (0);
2597}
2598
2599/*
2600 * mrsas_fire_cmd: Sends command to FW
2601 * input: Adapter softstate
2602 * request descriptor address low
2603 * request descriptor address high
2604 *
2605 * This functions fires the command to Firmware by writing to the
2606 * inbound_low_queue_port and inbound_high_queue_port.
2607 */
2608void
2609mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2610 u_int32_t req_desc_hi)
2611{
2612 mtx_lock(&sc->pci_lock);
2613 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2614 req_desc_lo);
2615 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2616 req_desc_hi);
2617 mtx_unlock(&sc->pci_lock);
2618}
2619
2620/*
2621 * mrsas_transition_to_ready: Move FW to Ready state input:
2622 * Adapter instance soft state
2623 *
2624 * During the initialization, FW passes can potentially be in any one of several
2625 * possible states. If the FW in operational, waiting-for-handshake states,
2626 * driver must take steps to bring it to ready state. Otherwise, it has to
2627 * wait for the ready state.
2628 */
2629int
2630mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2631{
2632 int i;
2633 u_int8_t max_wait;
2634 u_int32_t val, fw_state;
2635 u_int32_t cur_state;
2636 u_int32_t abs_state, curr_abs_state;
2637
2638 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2639 fw_state = val & MFI_STATE_MASK;
2640 max_wait = MRSAS_RESET_WAIT_TIME;
2641
2642 if (fw_state != MFI_STATE_READY)
2643 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2644
2645 while (fw_state != MFI_STATE_READY) {
2646 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2647 switch (fw_state) {
2648 case MFI_STATE_FAULT:
2649 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2650 if (ocr) {
2651 cur_state = MFI_STATE_FAULT;
2652 break;
2653 } else
2654 return -ENODEV;
2655 case MFI_STATE_WAIT_HANDSHAKE:
2656 /* Set the CLR bit in inbound doorbell */
2657 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2658 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2659 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2660 break;
2661 case MFI_STATE_BOOT_MESSAGE_PENDING:
2662 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2663 MFI_INIT_HOTPLUG);
2664 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2665 break;
2666 case MFI_STATE_OPERATIONAL:
2667 /*
2668 * Bring it to READY state; assuming max wait 10
2669 * secs
2670 */
2671 mrsas_disable_intr(sc);
2672 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2673 for (i = 0; i < max_wait * 1000; i++) {
2674 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2675 DELAY(1000);
2676 else
2677 break;
2678 }
2679 cur_state = MFI_STATE_OPERATIONAL;
2680 break;
2681 case MFI_STATE_UNDEFINED:
2682 /*
2683 * This state should not last for more than 2
2684 * seconds
2685 */
2686 cur_state = MFI_STATE_UNDEFINED;
2687 break;
2688 case MFI_STATE_BB_INIT:
2689 cur_state = MFI_STATE_BB_INIT;
2690 break;
2691 case MFI_STATE_FW_INIT:
2692 cur_state = MFI_STATE_FW_INIT;
2693 break;
2694 case MFI_STATE_FW_INIT_2:
2695 cur_state = MFI_STATE_FW_INIT_2;
2696 break;
2697 case MFI_STATE_DEVICE_SCAN:
2698 cur_state = MFI_STATE_DEVICE_SCAN;
2699 break;
2700 case MFI_STATE_FLUSH_CACHE:
2701 cur_state = MFI_STATE_FLUSH_CACHE;
2702 break;
2703 default:
2704 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2705 return -ENODEV;
2706 }
2707
2708 /*
2709 * The cur_state should not last for more than max_wait secs
2710 */
2711 for (i = 0; i < (max_wait * 1000); i++) {
2712 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2713 outbound_scratch_pad)) & MFI_STATE_MASK);
2714 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2715 outbound_scratch_pad));
2716 if (abs_state == curr_abs_state)
2717 DELAY(1000);
2718 else
2719 break;
2720 }
2721
2722 /*
2723 * Return error if fw_state hasn't changed after max_wait
2724 */
2725 if (curr_abs_state == abs_state) {
2726 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2727 "in %d secs\n", fw_state, max_wait);
2728 return -ENODEV;
2729 }
2730 }
2731 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2732 return 0;
2733}
2734
2735/*
2736 * mrsas_get_mfi_cmd: Get a cmd from free command pool
2737 * input: Adapter soft state
2738 *
2739 * This function removes an MFI command from the command list.
2740 */
2741struct mrsas_mfi_cmd *
2742mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2743{
2744 struct mrsas_mfi_cmd *cmd = NULL;
2745
2746 mtx_lock(&sc->mfi_cmd_pool_lock);
2747 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
2748 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2749 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2750 }
2751 mtx_unlock(&sc->mfi_cmd_pool_lock);
2752
2753 return cmd;
2754}
2755
2756/*
2757 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter.
2758 * input: Adapter Context.
2759 *
2760 * This function will check FW status register and flag do_timeout_reset flag.
2761 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
2762 * trigger reset.
2763 */
2764static void
2765mrsas_ocr_thread(void *arg)
2766{
2767 struct mrsas_softc *sc;
2768 u_int32_t fw_status, fw_state;
2769
2770 sc = (struct mrsas_softc *)arg;
2771
2772 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2773
2774 sc->ocr_thread_active = 1;
2775 mtx_lock(&sc->sim_lock);
2776 for (;;) {
2777 /* Sleep for 1 second and check the queue status */
2778 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2779 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2780 if (sc->remove_in_progress ||
2781 sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2782 mrsas_dprint(sc, MRSAS_OCR,
2783 "Exit due to %s from %s\n",
2784 sc->remove_in_progress ? "Shutdown" :
2785 "Hardware critical error", __func__);
2786 break;
2787 }
2788 fw_status = mrsas_read_reg(sc,
2789 offsetof(mrsas_reg_set, outbound_scratch_pad));
2790 fw_state = fw_status & MFI_STATE_MASK;
2791 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
2792 device_printf(sc->mrsas_dev, "%s started due to %s!\n",
2793 sc->disableOnlineCtrlReset ? "Kill Adapter" : "OCR",
2794 sc->do_timedout_reset ? "IO Timeout" :
2795 "FW fault detected");
2796 mtx_lock_spin(&sc->ioctl_lock);
2797 sc->reset_in_progress = 1;
2798 sc->reset_count++;
2799 mtx_unlock_spin(&sc->ioctl_lock);
2800 mrsas_xpt_freeze(sc);
2801 mrsas_reset_ctrl(sc, sc->do_timedout_reset);
2802 mrsas_xpt_release(sc);
2803 sc->reset_in_progress = 0;
2804 sc->do_timedout_reset = 0;
2805 }
2806 }
2807 mtx_unlock(&sc->sim_lock);
2808 sc->ocr_thread_active = 0;
2809 mrsas_kproc_exit(0);
2810}
2811
2812/*
2813 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR.
2814 * input: Adapter Context.
2815 *
2816 * This function will clear reply descriptor so that post OCR driver and FW will
2817 * lost old history.
2818 */
2819void
2820mrsas_reset_reply_desc(struct mrsas_softc *sc)
2821{
2822 int i, count;
2823 pMpi2ReplyDescriptorsUnion_t reply_desc;
2824
2825 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2826 for (i = 0; i < count; i++)
2827 sc->last_reply_idx[i] = 0;
2828
2829 reply_desc = sc->reply_desc_mem;
2830 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2831 reply_desc->Words = MRSAS_ULONG_MAX;
2832 }
2833}
2834
2835/*
2836 * mrsas_reset_ctrl: Core function to OCR/Kill adapter.
2837 * input: Adapter Context.
2838 *
2839 * This function will run from thread context so that it can sleep. 1. Do not
2840 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
2841 * to complete for 180 seconds. 3. If #2 does not find any outstanding
2842 * command Controller is in working state, so skip OCR. Otherwise, do
2843 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
2844 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
2845 * OCR, Re-fire Management command and move Controller to Operation state.
2846 */
2847int
2848mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
2849{
2850 int retval = SUCCESS, i, j, retry = 0;
2851 u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2852 union ccb *ccb;
2853 struct mrsas_mfi_cmd *mfi_cmd;
2854 struct mrsas_mpt_cmd *mpt_cmd;
2855 union mrsas_evt_class_locale class_locale;
2856
2857 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2858 device_printf(sc->mrsas_dev,
2859 "mrsas: Hardware critical error, returning FAIL.\n");
2860 return FAIL;
2861 }
2862 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2863 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2864 mrsas_disable_intr(sc);
2865 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
2866 sc->mrsas_fw_fault_check_delay * hz);
2867
2868 /* First try waiting for commands to complete */
2869 if (mrsas_wait_for_outstanding(sc, reset_reason)) {
2870 mrsas_dprint(sc, MRSAS_OCR,
2871 "resetting adapter from %s.\n",
2872 __func__);
2873 /* Now return commands back to the CAM layer */
2874 mtx_unlock(&sc->sim_lock);
2875 for (i = 0; i < sc->max_fw_cmds; i++) {
2876 mpt_cmd = sc->mpt_cmd_list[i];
2877 if (mpt_cmd->ccb_ptr) {
2878 ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2879 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2880 mrsas_cmd_done(sc, mpt_cmd);
2881 mrsas_atomic_dec(&sc->fw_outstanding);
2882 }
2883 }
2884 mtx_lock(&sc->sim_lock);
2885
2886 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2887 outbound_scratch_pad));
2888 abs_state = status_reg & MFI_STATE_MASK;
2889 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2890 if (sc->disableOnlineCtrlReset ||
2891 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2892 /* Reset not supported, kill adapter */
2893 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
2894 mrsas_kill_hba(sc);
2895 retval = FAIL;
2896 goto out;
2897 }
2898 /* Now try to reset the chip */
2899 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2900 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2901 MPI2_WRSEQ_FLUSH_KEY_VALUE);
2902 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2903 MPI2_WRSEQ_1ST_KEY_VALUE);
2904 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2905 MPI2_WRSEQ_2ND_KEY_VALUE);
2906 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2907 MPI2_WRSEQ_3RD_KEY_VALUE);
2908 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2909 MPI2_WRSEQ_4TH_KEY_VALUE);
2910 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2911 MPI2_WRSEQ_5TH_KEY_VALUE);
2912 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2913 MPI2_WRSEQ_6TH_KEY_VALUE);
2914
2915 /* Check that the diag write enable (DRWE) bit is on */
2916 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2917 fusion_host_diag));
2918 retry = 0;
2919 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2920 DELAY(100 * 1000);
2921 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2922 fusion_host_diag));
2923 if (retry++ == 100) {
2924 mrsas_dprint(sc, MRSAS_OCR,
2925 "Host diag unlock failed!\n");
2926 break;
2927 }
2928 }
2929 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2930 continue;
2931
2932 /* Send chip reset command */
2933 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
2934 host_diag | HOST_DIAG_RESET_ADAPTER);
2935 DELAY(3000 * 1000);
2936
2937 /* Make sure reset adapter bit is cleared */
2938 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2939 fusion_host_diag));
2940 retry = 0;
2941 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2942 DELAY(100 * 1000);
2943 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2944 fusion_host_diag));
2945 if (retry++ == 1000) {
2946 mrsas_dprint(sc, MRSAS_OCR,
2947 "Diag reset adapter never cleared!\n");
2948 break;
2949 }
2950 }
2951 if (host_diag & HOST_DIAG_RESET_ADAPTER)
2952 continue;
2953
2954 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2955 outbound_scratch_pad)) & MFI_STATE_MASK;
2956 retry = 0;
2957
2958 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2959 DELAY(100 * 1000);
2960 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2961 outbound_scratch_pad)) & MFI_STATE_MASK;
2962 }
2963 if (abs_state <= MFI_STATE_FW_INIT) {
2964 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
2965 " state = 0x%x\n", abs_state);
2966 continue;
2967 }
2968 /* Wait for FW to become ready */
2969 if (mrsas_transition_to_ready(sc, 1)) {
2970 mrsas_dprint(sc, MRSAS_OCR,
2971 "mrsas: Failed to transition controller to ready.\n");
2972 continue;
2973 }
2974 mrsas_reset_reply_desc(sc);
2975 if (mrsas_ioc_init(sc)) {
2976 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
2977 continue;
2978 }
2979 for (j = 0; j < sc->max_fw_cmds; j++) {
2980 mpt_cmd = sc->mpt_cmd_list[j];
2981 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2982 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
2983 mrsas_release_mfi_cmd(mfi_cmd);
2984 mrsas_release_mpt_cmd(mpt_cmd);
2985 }
2986 }
2987
2988 sc->aen_cmd = NULL;
2989
2990 /* Reset load balance info */
2991 memset(sc->load_balance_info, 0,
2992 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
2993
2994 if (mrsas_get_ctrl_info(sc)) {
2995 mrsas_kill_hba(sc);
2996 retval = FAIL;
2997 goto out;
2998 }
2999 if (!mrsas_get_map_info(sc))
3000 mrsas_sync_map_info(sc);
3001
3002 megasas_setup_jbod_map(sc);
3003
3004 memset(sc->pd_list, 0,
3005 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
3006 if (mrsas_get_pd_list(sc) != SUCCESS) {
3007 device_printf(sc->mrsas_dev, "Get PD list failed from OCR.\n"
3008 "Will get the latest PD LIST after OCR on event.\n");
3009 }
3010 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
3011 if (mrsas_get_ld_list(sc) != SUCCESS) {
3012 device_printf(sc->mrsas_dev, "Get LD lsit failed from OCR.\n"
3013 "Will get the latest LD LIST after OCR on event.\n");
3014 }
3015
3016 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3017 mrsas_enable_intr(sc);
3018 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3019
3020 /* Register AEN with FW for last sequence number */
3021 class_locale.members.reserved = 0;
3022 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3023 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3024
3025 if (mrsas_register_aen(sc, sc->last_seq_num,
3026 class_locale.word)) {
3027 device_printf(sc->mrsas_dev,
3028 "ERROR: AEN registration FAILED from OCR !!! "
3029 "Further events from the controller cannot be notified."
3030 "Either there is some problem in the controller"
3031 "or the controller does not support AEN.\n"
3032 "Please contact to the SUPPORT TEAM if the problem persists\n");
3033 }
3034 /* Adapter reset completed successfully */
3035 device_printf(sc->mrsas_dev, "Reset successful\n");
3036 retval = SUCCESS;
3037 goto out;
3038 }
3039 /* Reset failed, kill the adapter */
3040 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3041 mrsas_kill_hba(sc);
3042 retval = FAIL;
3043 } else {
3044 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3045 mrsas_enable_intr(sc);
3046 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3047 }
3048out:
3049 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3050 mrsas_dprint(sc, MRSAS_OCR,
3051 "Reset Exit with %d.\n", retval);
3052 return retval;
3053}
3054
3055/*
3056 * mrsas_kill_hba: Kill HBA when OCR is not supported
3057 * input: Adapter Context.
3058 *
3059 * This function will kill HBA when OCR is not supported.
3060 */
3061void
3062mrsas_kill_hba(struct mrsas_softc *sc)
3063{
3064 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3065 DELAY(1000 * 1000);
3066 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3067 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3068 MFI_STOP_ADP);
3069 /* Flush */
3070 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3071 mrsas_complete_outstanding_ioctls(sc);
3072}
3073
3074/**
3075 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba
3076 * input: Controller softc
3077 *
3078 * Returns void
3079 */
3080void
3081mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3082{
3083 int i;
3084 struct mrsas_mpt_cmd *cmd_mpt;
3085 struct mrsas_mfi_cmd *cmd_mfi;
3086 u_int32_t count, MSIxIndex;
3087
3088 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3089 for (i = 0; i < sc->max_fw_cmds; i++) {
3090 cmd_mpt = sc->mpt_cmd_list[i];
3091
3092 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3093 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3094 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3095 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3096 mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3097 cmd_mpt->io_request->RaidContext.status);
3098 }
3099 }
3100 }
3101}
3102
3103/*
3104 * mrsas_wait_for_outstanding: Wait for outstanding commands
3105 * input: Adapter Context.
3106 *
3107 * This function will wait for 180 seconds for outstanding commands to be
3108 * completed.
3109 */
3110int
3111mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3112{
3113 int i, outstanding, retval = 0;
3114 u_int32_t fw_state, count, MSIxIndex;
3115
3116
3117 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3118 if (sc->remove_in_progress) {
3119 mrsas_dprint(sc, MRSAS_OCR,
3120 "Driver remove or shutdown called.\n");
3121 retval = 1;
3122 goto out;
3123 }
3124 /* Check if firmware is in fault state */
3125 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
3126 outbound_scratch_pad)) & MFI_STATE_MASK;
3127 if (fw_state == MFI_STATE_FAULT) {
3128 mrsas_dprint(sc, MRSAS_OCR,
3129 "Found FW in FAULT state, will reset adapter.\n");
3130 retval = 1;
3131 goto out;
3132 }
3133 if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3134 mrsas_dprint(sc, MRSAS_OCR,
3135 "DCMD IO TIMEOUT detected, will reset adapter.\n");
3136 retval = 1;
3137 goto out;
3138 }
3139 outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3140 if (!outstanding)
3141 goto out;
3142
3143 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3144 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3145 "commands to complete\n", i, outstanding);
3146 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3147 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3148 mrsas_complete_cmd(sc, MSIxIndex);
3149 }
3150 DELAY(1000 * 1000);
3151 }
3152
3153 if (mrsas_atomic_read(&sc->fw_outstanding)) {
3154 mrsas_dprint(sc, MRSAS_OCR,
3155 " pending commands remain after waiting,"
3156 " will reset adapter.\n");
3157 retval = 1;
3158 }
3159out:
3160 return retval;
3161}
3162
3163/*
3164 * mrsas_release_mfi_cmd: Return a cmd to free command pool
3165 * input: Command packet for return to free cmd pool
3166 *
3167 * This function returns the MFI command to the command list.
3168 */
3169void
3170mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
3171{
3172 struct mrsas_softc *sc = cmd->sc;
3173
3174 mtx_lock(&sc->mfi_cmd_pool_lock);
3175 cmd->ccb_ptr = NULL;
3176 cmd->cmd_id.frame_count = 0;
3177 TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
3178 mtx_unlock(&sc->mfi_cmd_pool_lock);
3179
3180 return;
3181}
3182
3183/*
3184 * mrsas_get_controller_info: Returns FW's controller structure
3185 * input: Adapter soft state
3186 * Controller information structure
3187 *
3188 * Issues an internal command (DCMD) to get the FW's controller structure. This
3189 * information is mainly used to find out the maximum IO transfer per command
3190 * supported by the FW.
3191 */
3192static int
3193mrsas_get_ctrl_info(struct mrsas_softc *sc)
3194{
3195 int retcode = 0;
3196 u_int8_t do_ocr = 1;
3197 struct mrsas_mfi_cmd *cmd;
3198 struct mrsas_dcmd_frame *dcmd;
3199
3200 cmd = mrsas_get_mfi_cmd(sc);
3201
3202 if (!cmd) {
3203 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3204 return -ENOMEM;
3205 }
3206 dcmd = &cmd->frame->dcmd;
3207
3208 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3209 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3210 mrsas_release_mfi_cmd(cmd);
3211 return -ENOMEM;
3212 }
3213 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3214
3215 dcmd->cmd = MFI_CMD_DCMD;
3216 dcmd->cmd_status = 0xFF;
3217 dcmd->sge_count = 1;
3218 dcmd->flags = MFI_FRAME_DIR_READ;
3219 dcmd->timeout = 0;
3220 dcmd->pad_0 = 0;
3221 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
3222 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
3223 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
3224 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
3225
3226 retcode = mrsas_issue_polled(sc, cmd);
3227 if (retcode == ETIMEDOUT)
3228 goto dcmd_timeout;
3229 else
3230 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3231
3232 do_ocr = 0;
3233 mrsas_update_ext_vd_details(sc);
3234
3235 sc->use_seqnum_jbod_fp =
3236 sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3237
3238dcmd_timeout:
3239 mrsas_free_ctlr_info_cmd(sc);
3240
3241 if (do_ocr)
3242 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3243 else
3244 mrsas_release_mfi_cmd(cmd);
3245
3246 return (retcode);
3247}
3248
3249/*
3250 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3251 * input:
3252 * sc - Controller's softc
3253*/
3254static void
3255mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3256{
3257 sc->max256vdSupport =
3258 sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3259 /* Below is additional check to address future FW enhancement */
3260 if (sc->ctrl_info->max_lds > 64)
3261 sc->max256vdSupport = 1;
3262
3263 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3264 * MRSAS_MAX_DEV_PER_CHANNEL;
3265 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3266 * MRSAS_MAX_DEV_PER_CHANNEL;
3267 if (sc->max256vdSupport) {
3268 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3269 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3270 } else {
3271 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3272 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3273 }
3274
3275 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3276 (sizeof(MR_LD_SPAN_MAP) *
3277 (sc->fw_supported_vd_count - 1));
3278 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3279 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
3280 (sizeof(MR_LD_SPAN_MAP) *
3281 (sc->drv_supported_vd_count - 1));
3282
3283 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3284
3285 if (sc->max256vdSupport)
3286 sc->current_map_sz = sc->new_map_sz;
3287 else
3288 sc->current_map_sz = sc->old_map_sz;
3289}
3290
3291/*
3292 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
3293 * input: Adapter soft state
3294 *
3295 * Allocates DMAable memory for the controller info internal command.
3296 */
3297int
3298mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3299{
3300 int ctlr_info_size;
3301
3302 /* Allocate get controller info command */
3303 ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3304 if (bus_dma_tag_create(sc->mrsas_parent_tag,
3305 1, 0,
3306 BUS_SPACE_MAXADDR_32BIT,
3307 BUS_SPACE_MAXADDR,
3308 NULL, NULL,
3309 ctlr_info_size,
3310 1,
3311 ctlr_info_size,
3312 BUS_DMA_ALLOCNOW,
3313 NULL, NULL,
3314 &sc->ctlr_info_tag)) {
3315 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3316 return (ENOMEM);
3317 }
3318 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3319 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3320 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3321 return (ENOMEM);
3322 }
3323 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3324 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3325 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3326 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3327 return (ENOMEM);
3328 }
3329 memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3330 return (0);
3331}
3332
3333/*
3334 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
3335 * input: Adapter soft state
3336 *
3337 * Deallocates memory of the get controller info cmd.
3338 */
3339void
3340mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3341{
3342 if (sc->ctlr_info_phys_addr)
3343 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3344 if (sc->ctlr_info_mem != NULL)
3345 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3346 if (sc->ctlr_info_tag != NULL)
3347 bus_dma_tag_destroy(sc->ctlr_info_tag);
3348}
3349
3350/*
3351 * mrsas_issue_polled: Issues a polling command
3352 * inputs: Adapter soft state
3353 * Command packet to be issued
3354 *
3355 * This function is for posting of internal commands to Firmware. MFI requires
3356 * the cmd_status to be set to 0xFF before posting. The maximun wait time of
3357 * the poll response timer is 180 seconds.
3358 */
3359int
3360mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3361{
3362 struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3363 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3364 int i, retcode = SUCCESS;
3365
3366 frame_hdr->cmd_status = 0xFF;
3367 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3368
3369 /* Issue the frame using inbound queue port */
3370 if (mrsas_issue_dcmd(sc, cmd)) {
3371 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3372 return (1);
3373 }
3374 /*
3375 * Poll response timer to wait for Firmware response. While this
3376 * timer with the DELAY call could block CPU, the time interval for
3377 * this is only 1 millisecond.
3378 */
3379 if (frame_hdr->cmd_status == 0xFF) {
3380 for (i = 0; i < (max_wait * 1000); i++) {
3381 if (frame_hdr->cmd_status == 0xFF)
3382 DELAY(1000);
3383 else
3384 break;
3385 }
3386 }
3387 if (frame_hdr->cmd_status == 0xFF) {
3388 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3389 "seconds from %s\n", max_wait, __func__);
3390 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3391 cmd->frame->dcmd.opcode);
3392 retcode = ETIMEDOUT;
3393 }
3394 return (retcode);
3395}
3396
3397/*
3398 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd
3399 * input: Adapter soft state mfi cmd pointer
3400 *
3401 * This function is called by mrsas_issued_blocked_cmd() and
3402 * mrsas_issued_polled(), to build the MPT command and then fire the command
3403 * to Firmware.
3404 */
3405int
3406mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3407{
3408 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3409
3410 req_desc = mrsas_build_mpt_cmd(sc, cmd);
3411 if (!req_desc) {
3412 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3413 return (1);
3414 }
3415 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3416
3417 return (0);
3418}
3419
3420/*
3421 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd
3422 * input: Adapter soft state mfi cmd to build
3423 *
3424 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3425 * command and prepares the MPT command to send to Firmware.
3426 */
3427MRSAS_REQUEST_DESCRIPTOR_UNION *
3428mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3429{
3430 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3431 u_int16_t index;
3432
3433 if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3434 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3435 return NULL;
3436 }
3437 index = cmd->cmd_id.context.smid;
3438
3439 req_desc = mrsas_get_request_desc(sc, index - 1);
3440 if (!req_desc)
3441 return NULL;
3442
3443 req_desc->addr.Words = 0;
3444 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3445
3446 req_desc->SCSIIO.SMID = index;
3447
3448 return (req_desc);
3449}
3450
3451/*
3452 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command
3453 * input: Adapter soft state mfi cmd pointer
3454 *
3455 * The MPT command and the io_request are setup as a passthru command. The SGE
3456 * chain address is set to frame_phys_addr of the MFI command.
3457 */
3458u_int8_t
3459mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3460{
3461 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3462 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3463 struct mrsas_mpt_cmd *mpt_cmd;
3464 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3465
3466 mpt_cmd = mrsas_get_mpt_cmd(sc);
3467 if (!mpt_cmd)
3468 return (1);
3469
3470 /* Save the smid. To be used for returning the cmd */
3471 mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3472
3473 mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3474
3475 /*
3476 * For cmds where the flag is set, store the flag and check on
3477 * completion. For cmds with this flag, don't call
3478 * mrsas_complete_cmd.
3479 */
3480
3481 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
3482 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3483
3484 io_req = mpt_cmd->io_request;
3485
3486 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
3487 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3488
3489 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3490 sgl_ptr_end->Flags = 0;
3491 }
3492 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3493
3494 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3495 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3496 io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3497
3498 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
3499
3500 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3501 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3502
3503 mpi25_ieee_chain->Length = sc->max_chain_frame_sz;
3504
3505 return (0);
3506}
3507
3508/*
3509 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds
3510 * input: Adapter soft state Command to be issued
3511 *
3512 * This function waits on an event for the command to be returned from the ISR.
3513 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3514 * internal and ioctl commands.
3515 */
3516int
3517mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3518{
3519 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3520 unsigned long total_time = 0;
3521 int retcode = SUCCESS;
3522
3523 /* Initialize cmd_status */
3524 cmd->cmd_status = 0xFF;
3525
3526 /* Build MPT-MFI command for issue to FW */
3527 if (mrsas_issue_dcmd(sc, cmd)) {
3528 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3529 return (1);
3530 }
3531 sc->chan = (void *)&cmd;
3532
3533 while (1) {
3534 if (cmd->cmd_status == 0xFF) {
3535 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3536 } else
3537 break;
3538
3539 if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL
3540 * command */
3541 total_time++;
3542 if (total_time >= max_wait) {
3543 device_printf(sc->mrsas_dev,
3544 "Internal command timed out after %d seconds.\n", max_wait);
3545 retcode = 1;
3546 break;
3547 }
3548 }
3549 }
3550
3551 if (cmd->cmd_status == 0xFF) {
3552 device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3553 "seconds from %s\n", max_wait, __func__);
3554 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3555 cmd->frame->dcmd.opcode);
3556 retcode = ETIMEDOUT;
3557 }
3558 return (retcode);
3559}
3560
3561/*
3562 * mrsas_complete_mptmfi_passthru: Completes a command
3563 * input: @sc: Adapter soft state
3564 * @cmd: Command to be completed
3565 * @status: cmd completion status
3566 *
3567 * This function is called from mrsas_complete_cmd() after an interrupt is
3568 * received from Firmware, and io_request->Function is
3569 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
3570 */
3571void
3572mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
3573 u_int8_t status)
3574{
3575 struct mrsas_header *hdr = &cmd->frame->hdr;
3576 u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
3577
3578 /* Reset the retry counter for future re-tries */
3579 cmd->retry_for_fw_reset = 0;
3580
3581 if (cmd->ccb_ptr)
3582 cmd->ccb_ptr = NULL;
3583
3584 switch (hdr->cmd) {
3585 case MFI_CMD_INVALID:
3586 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
3587 break;
3588 case MFI_CMD_PD_SCSI_IO:
3589 case MFI_CMD_LD_SCSI_IO:
3590 /*
3591 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3592 * issued either through an IO path or an IOCTL path. If it
3593 * was via IOCTL, we will send it to internal completion.
3594 */
3595 if (cmd->sync_cmd) {
3596 cmd->sync_cmd = 0;
3597 mrsas_wakeup(sc, cmd);
3598 break;
3599 }
3600 case MFI_CMD_SMP:
3601 case MFI_CMD_STP:
3602 case MFI_CMD_DCMD:
3603 /* Check for LD map update */
3604 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
3605 (cmd->frame->dcmd.mbox.b[1] == 1)) {
3606 sc->fast_path_io = 0;
3607 mtx_lock(&sc->raidmap_lock);
3608 sc->map_update_cmd = NULL;
3609 if (cmd_status != 0) {
3610 if (cmd_status != MFI_STAT_NOT_FOUND)
3611 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
3612 else {
3613 mrsas_release_mfi_cmd(cmd);
3614 mtx_unlock(&sc->raidmap_lock);
3615 break;
3616 }
3617 } else
3618 sc->map_id++;
3619 mrsas_release_mfi_cmd(cmd);
3620 if (MR_ValidateMapInfo(sc))
3621 sc->fast_path_io = 0;
3622 else
3623 sc->fast_path_io = 1;
3624 mrsas_sync_map_info(sc);
3625 mtx_unlock(&sc->raidmap_lock);
3626 break;
3627 }
3628 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3629 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
3630 sc->mrsas_aen_triggered = 0;
3631 }
3632 /* FW has an updated PD sequence */
3633 if ((cmd->frame->dcmd.opcode ==
3634 MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3635 (cmd->frame->dcmd.mbox.b[0] == 1)) {
3636
3637 mtx_lock(&sc->raidmap_lock);
3638 sc->jbod_seq_cmd = NULL;
3639 mrsas_release_mfi_cmd(cmd);
3640
3641 if (cmd_status == MFI_STAT_OK) {
3642 sc->pd_seq_map_id++;
3643 /* Re-register a pd sync seq num cmd */
3644 if (megasas_sync_pd_seq_num(sc, true))
3645 sc->use_seqnum_jbod_fp = 0;
3646 } else {
3647 sc->use_seqnum_jbod_fp = 0;
3648 device_printf(sc->mrsas_dev,
3649 "Jbod map sync failed, status=%x\n", cmd_status);
3650 }
3651 mtx_unlock(&sc->raidmap_lock);
3652 break;
3653 }
3654 /* See if got an event notification */
3655 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
3656 mrsas_complete_aen(sc, cmd);
3657 else
3658 mrsas_wakeup(sc, cmd);
3659 break;
3660 case MFI_CMD_ABORT:
3661 /* Command issued to abort another cmd return */
3662 mrsas_complete_abort(sc, cmd);
3663 break;
3664 default:
3665 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
3666 break;
3667 }
3668}
3669
3670/*
3671 * mrsas_wakeup: Completes an internal command
3672 * input: Adapter soft state
3673 * Command to be completed
3674 *
3675 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
3676 * timer is started. This function is called from
3677 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
3678 * from the command wait.
3679 */
3680void
3681mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3682{
3683 cmd->cmd_status = cmd->frame->io.cmd_status;
3684
3685 if (cmd->cmd_status == 0xFF)
3686 cmd->cmd_status = 0;
3687
3688 sc->chan = (void *)&cmd;
3689 wakeup_one((void *)&sc->chan);
3690 return;
3691}
3692
3693/*
3694 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input:
3695 * Adapter soft state Shutdown/Hibernate
3696 *
3697 * This function issues a DCMD internal command to Firmware to initiate shutdown
3698 * of the controller.
3699 */
3700static void
3701mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3702{
3703 struct mrsas_mfi_cmd *cmd;
3704 struct mrsas_dcmd_frame *dcmd;
3705
3706 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3707 return;
3708
3709 cmd = mrsas_get_mfi_cmd(sc);
3710 if (!cmd) {
3711 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
3712 return;
3713 }
3714 if (sc->aen_cmd)
3715 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3716 if (sc->map_update_cmd)
3717 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3718 if (sc->jbod_seq_cmd)
3719 mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
3720
3721 dcmd = &cmd->frame->dcmd;
3722 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3723
3724 dcmd->cmd = MFI_CMD_DCMD;
3725 dcmd->cmd_status = 0x0;
3726 dcmd->sge_count = 0;
3727 dcmd->flags = MFI_FRAME_DIR_NONE;
3728 dcmd->timeout = 0;
3729 dcmd->pad_0 = 0;
3730 dcmd->data_xfer_len = 0;
3731 dcmd->opcode = opcode;
3732
3733 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
3734
3735 mrsas_issue_blocked_cmd(sc, cmd);
3736 mrsas_release_mfi_cmd(cmd);
3737
3738 return;
3739}
3740
3741/*
3742 * mrsas_flush_cache: Requests FW to flush all its caches input:
3743 * Adapter soft state
3744 *
3745 * This function is issues a DCMD internal command to Firmware to initiate
3746 * flushing of all caches.
3747 */
3748static void
3749mrsas_flush_cache(struct mrsas_softc *sc)
3750{
3751 struct mrsas_mfi_cmd *cmd;
3752 struct mrsas_dcmd_frame *dcmd;
3753
3754 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3755 return;
3756
3757 cmd = mrsas_get_mfi_cmd(sc);
3758 if (!cmd) {
3759 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
3760 return;
3761 }
3762 dcmd = &cmd->frame->dcmd;
3763 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3764
3765 dcmd->cmd = MFI_CMD_DCMD;
3766 dcmd->cmd_status = 0x0;
3767 dcmd->sge_count = 0;
3768 dcmd->flags = MFI_FRAME_DIR_NONE;
3769 dcmd->timeout = 0;
3770 dcmd->pad_0 = 0;
3771 dcmd->data_xfer_len = 0;
3772 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3773 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3774
3775 mrsas_issue_blocked_cmd(sc, cmd);
3776 mrsas_release_mfi_cmd(cmd);
3777
3778 return;
3779}
3780
3781int
3782megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
3783{
3784 int retcode = 0;
3785 u_int8_t do_ocr = 1;
3786 struct mrsas_mfi_cmd *cmd;
3787 struct mrsas_dcmd_frame *dcmd;
3788 uint32_t pd_seq_map_sz;
3789 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
3790 bus_addr_t pd_seq_h;
3791
3792 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
3793 (sizeof(struct MR_PD_CFG_SEQ) *
3794 (MAX_PHYSICAL_DEVICES - 1));
3795
3796 cmd = mrsas_get_mfi_cmd(sc);
3797 if (!cmd) {
3798 device_printf(sc->mrsas_dev,
3799 "Cannot alloc for ld map info cmd.\n");
3800 return 1;
3801 }
3802 dcmd = &cmd->frame->dcmd;
3803
3804 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
3805 pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
3806 if (!pd_sync) {
3807 device_printf(sc->mrsas_dev,
3808 "Failed to alloc mem for jbod map info.\n");
3809 mrsas_release_mfi_cmd(cmd);
3810 return (ENOMEM);
3811 }
3812 memset(pd_sync, 0, pd_seq_map_sz);
3813 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3814 dcmd->cmd = MFI_CMD_DCMD;
3815 dcmd->cmd_status = 0xFF;
3816 dcmd->sge_count = 1;
3817 dcmd->timeout = 0;
3818 dcmd->pad_0 = 0;
3819 dcmd->data_xfer_len = (pd_seq_map_sz);
3820 dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
3821 dcmd->sgl.sge32[0].phys_addr = (pd_seq_h);
3822 dcmd->sgl.sge32[0].length = (pd_seq_map_sz);
3823
3824 if (pend) {
3825 dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
3826 dcmd->flags = (MFI_FRAME_DIR_WRITE);
3827 sc->jbod_seq_cmd = cmd;
3828 if (mrsas_issue_dcmd(sc, cmd)) {
3829 device_printf(sc->mrsas_dev,
3830 "Fail to send sync map info command.\n");
3831 return 1;
3832 } else
3833 return 0;
3834 } else
3835 dcmd->flags = MFI_FRAME_DIR_READ;
3836
3837 retcode = mrsas_issue_polled(sc, cmd);
3838 if (retcode == ETIMEDOUT)
3839 goto dcmd_timeout;
3840
3841 if (pd_sync->count > MAX_PHYSICAL_DEVICES) {
3842 device_printf(sc->mrsas_dev,
3843 "driver supports max %d JBOD, but FW reports %d\n",
3844 MAX_PHYSICAL_DEVICES, pd_sync->count);
3845 retcode = -EINVAL;
3846 }
3847 if (!retcode)
3848 sc->pd_seq_map_id++;
3849 do_ocr = 0;
3850
3851dcmd_timeout:
3852 if (do_ocr)
3853 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3854 else
3855 mrsas_release_mfi_cmd(cmd);
3856
3857 return (retcode);
3858}
3859
3860/*
3861 * mrsas_get_map_info: Load and validate RAID map input:
3862 * Adapter instance soft state
3863 *
3864 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
3865 * and validate RAID map. It returns 0 if successful, 1 other- wise.
3866 */
3867static int
3868mrsas_get_map_info(struct mrsas_softc *sc)
3869{
3870 uint8_t retcode = 0;
3871
3872 sc->fast_path_io = 0;
3873 if (!mrsas_get_ld_map_info(sc)) {
3874 retcode = MR_ValidateMapInfo(sc);
3875 if (retcode == 0) {
3876 sc->fast_path_io = 1;
3877 return 0;
3878 }
3879 }
3880 return 1;
3881}
3882
3883/*
3884 * mrsas_get_ld_map_info: Get FW's ld_map structure input:
3885 * Adapter instance soft state
3886 *
3887 * Issues an internal command (DCMD) to get the FW's controller PD list
3888 * structure.
3889 */
3890static int
3891mrsas_get_ld_map_info(struct mrsas_softc *sc)
3892{
3893 int retcode = 0;
3894 struct mrsas_mfi_cmd *cmd;
3895 struct mrsas_dcmd_frame *dcmd;
3896 void *map;
3897 bus_addr_t map_phys_addr = 0;
3898
3899 cmd = mrsas_get_mfi_cmd(sc);
3900 if (!cmd) {
3901 device_printf(sc->mrsas_dev,
3902 "Cannot alloc for ld map info cmd.\n");
3903 return 1;
3904 }
3905 dcmd = &cmd->frame->dcmd;
3906
3907 map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
3908 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
3909 if (!map) {
3910 device_printf(sc->mrsas_dev,
3911 "Failed to alloc mem for ld map info.\n");
3912 mrsas_release_mfi_cmd(cmd);
3913 return (ENOMEM);
3914 }
3915 memset(map, 0, sizeof(sc->max_map_sz));
3916 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3917
3918 dcmd->cmd = MFI_CMD_DCMD;
3919 dcmd->cmd_status = 0xFF;
3920 dcmd->sge_count = 1;
3921 dcmd->flags = MFI_FRAME_DIR_READ;
3922 dcmd->timeout = 0;
3923 dcmd->pad_0 = 0;
3924 dcmd->data_xfer_len = sc->current_map_sz;
3925 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3926 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3927 dcmd->sgl.sge32[0].length = sc->current_map_sz;
3928
3929 retcode = mrsas_issue_polled(sc, cmd);
3930 if (retcode == ETIMEDOUT)
3931 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3932 else
3933 mrsas_release_mfi_cmd(cmd);
3934
3935 return (retcode);
3936}
3937
3938/*
3939 * mrsas_sync_map_info: Get FW's ld_map structure input:
3940 * Adapter instance soft state
3941 *
3942 * Issues an internal command (DCMD) to get the FW's controller PD list
3943 * structure.
3944 */
3945static int
3946mrsas_sync_map_info(struct mrsas_softc *sc)
3947{
3948 int retcode = 0, i;
3949 struct mrsas_mfi_cmd *cmd;
3950 struct mrsas_dcmd_frame *dcmd;
3951 uint32_t size_sync_info, num_lds;
3952 MR_LD_TARGET_SYNC *target_map = NULL;
3953 MR_DRV_RAID_MAP_ALL *map;
3954 MR_LD_RAID *raid;
3955 MR_LD_TARGET_SYNC *ld_sync;
3956 bus_addr_t map_phys_addr = 0;
3957
3958 cmd = mrsas_get_mfi_cmd(sc);
3959 if (!cmd) {
3960 device_printf(sc->mrsas_dev,
3961 "Cannot alloc for sync map info cmd\n");
3962 return 1;
3963 }
3964 map = sc->ld_drv_map[sc->map_id & 1];
3965 num_lds = map->raidMap.ldCount;
3966
3967 dcmd = &cmd->frame->dcmd;
3968 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
3969 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3970
3971 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
3972 memset(target_map, 0, sc->max_map_sz);
3973
3974 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
3975
3976 ld_sync = (MR_LD_TARGET_SYNC *) target_map;
3977
3978 for (i = 0; i < num_lds; i++, ld_sync++) {
3979 raid = MR_LdRaidGet(i, map);
3980 ld_sync->targetId = MR_GetLDTgtId(i, map);
3981 ld_sync->seqNum = raid->seqNum;
3982 }
3983
3984 dcmd->cmd = MFI_CMD_DCMD;
3985 dcmd->cmd_status = 0xFF;
3986 dcmd->sge_count = 1;
3987 dcmd->flags = MFI_FRAME_DIR_WRITE;
3988 dcmd->timeout = 0;
3989 dcmd->pad_0 = 0;
3990 dcmd->data_xfer_len = sc->current_map_sz;
3991 dcmd->mbox.b[0] = num_lds;
3992 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
3993 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3994 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3995 dcmd->sgl.sge32[0].length = sc->current_map_sz;
3996
3997 sc->map_update_cmd = cmd;
3998 if (mrsas_issue_dcmd(sc, cmd)) {
3999 device_printf(sc->mrsas_dev,
4000 "Fail to send sync map info command.\n");
4001 return (1);
4002 }
4003 return (retcode);
4004}
4005
4006/*
4007 * mrsas_get_pd_list: Returns FW's PD list structure input:
4008 * Adapter soft state
4009 *
4010 * Issues an internal command (DCMD) to get the FW's controller PD list
4011 * structure. This information is mainly used to find out about system
4012 * supported by Firmware.
4013 */
4014static int
4015mrsas_get_pd_list(struct mrsas_softc *sc)
4016{
4017 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4018 u_int8_t do_ocr = 1;
4019 struct mrsas_mfi_cmd *cmd;
4020 struct mrsas_dcmd_frame *dcmd;
4021 struct MR_PD_LIST *pd_list_mem;
4022 struct MR_PD_ADDRESS *pd_addr;
4023 bus_addr_t pd_list_phys_addr = 0;
4024 struct mrsas_tmp_dcmd *tcmd;
4025
4026 cmd = mrsas_get_mfi_cmd(sc);
4027 if (!cmd) {
4028 device_printf(sc->mrsas_dev,
4029 "Cannot alloc for get PD list cmd\n");
4030 return 1;
4031 }
4032 dcmd = &cmd->frame->dcmd;
4033
4034 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4035 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4036 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4037 device_printf(sc->mrsas_dev,
4038 "Cannot alloc dmamap for get PD list cmd\n");
4039 mrsas_release_mfi_cmd(cmd);
4040 mrsas_free_tmp_dcmd(tcmd);
4041 free(tcmd, M_MRSAS);
4042 return (ENOMEM);
4043 } else {
4044 pd_list_mem = tcmd->tmp_dcmd_mem;
4045 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4046 }
4047 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4048
4049 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4050 dcmd->mbox.b[1] = 0;
4051 dcmd->cmd = MFI_CMD_DCMD;
4052 dcmd->cmd_status = 0xFF;
4053 dcmd->sge_count = 1;
4054 dcmd->flags = MFI_FRAME_DIR_READ;
4055 dcmd->timeout = 0;
4056 dcmd->pad_0 = 0;
4057 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4058 dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
4059 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
4060 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4061
4062 retcode = mrsas_issue_polled(sc, cmd);
4063 if (retcode == ETIMEDOUT)
4064 goto dcmd_timeout;
4065
4066 /* Get the instance PD list */
4067 pd_count = MRSAS_MAX_PD;
4068 pd_addr = pd_list_mem->addr;
4069 if (pd_list_mem->count < pd_count) {
4070 memset(sc->local_pd_list, 0,
4071 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4072 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
4073 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
4074 sc->local_pd_list[pd_addr->deviceId].driveType =
4075 pd_addr->scsiDevType;
4076 sc->local_pd_list[pd_addr->deviceId].driveState =
4077 MR_PD_STATE_SYSTEM;
4078 pd_addr++;
4079 }
4080 /*
4081 * Use mutext/spinlock if pd_list component size increase more than
4082 * 32 bit.
4083 */
4084 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4085 do_ocr = 0;
4086 }
4087dcmd_timeout:
4088 mrsas_free_tmp_dcmd(tcmd);
4089 free(tcmd, M_MRSAS);
4090
4091 if (do_ocr)
4092 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4093 else
4094 mrsas_release_mfi_cmd(cmd);
4095
4096 return (retcode);
4097}
4098
4099/*
4100 * mrsas_get_ld_list: Returns FW's LD list structure input:
4101 * Adapter soft state
4102 *
4103 * Issues an internal command (DCMD) to get the FW's controller PD list
4104 * structure. This information is mainly used to find out about supported by
4105 * the FW.
4106 */
4107static int
4108mrsas_get_ld_list(struct mrsas_softc *sc)
4109{
4110 int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
4111 u_int8_t do_ocr = 1;
4112 struct mrsas_mfi_cmd *cmd;
4113 struct mrsas_dcmd_frame *dcmd;
4114 struct MR_LD_LIST *ld_list_mem;
4115 bus_addr_t ld_list_phys_addr = 0;
4116 struct mrsas_tmp_dcmd *tcmd;
4117
4118 cmd = mrsas_get_mfi_cmd(sc);
4119 if (!cmd) {
4120 device_printf(sc->mrsas_dev,
4121 "Cannot alloc for get LD list cmd\n");
4122 return 1;
4123 }
4124 dcmd = &cmd->frame->dcmd;
4125
4126 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4127 ld_list_size = sizeof(struct MR_LD_LIST);
4128 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4129 device_printf(sc->mrsas_dev,
4130 "Cannot alloc dmamap for get LD list cmd\n");
4131 mrsas_release_mfi_cmd(cmd);
4132 mrsas_free_tmp_dcmd(tcmd);
4133 free(tcmd, M_MRSAS);
4134 return (ENOMEM);
4135 } else {
4136 ld_list_mem = tcmd->tmp_dcmd_mem;
4137 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4138 }
4139 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4140
4141 if (sc->max256vdSupport)
4142 dcmd->mbox.b[0] = 1;
4143
4144 dcmd->cmd = MFI_CMD_DCMD;
4145 dcmd->cmd_status = 0xFF;
4146 dcmd->sge_count = 1;
4147 dcmd->flags = MFI_FRAME_DIR_READ;
4148 dcmd->timeout = 0;
4149 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
4150 dcmd->opcode = MR_DCMD_LD_GET_LIST;
4151 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
4152 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
4153 dcmd->pad_0 = 0;
4154
4155 retcode = mrsas_issue_polled(sc, cmd);
4156 if (retcode == ETIMEDOUT)
4157 goto dcmd_timeout;
4158
4159#if VD_EXT_DEBUG
4160 printf("Number of LDs %d\n", ld_list_mem->ldCount);
4161#endif
4162
4163 /* Get the instance LD list */
4164 if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) {
4165 sc->CurLdCount = ld_list_mem->ldCount;
4166 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4167 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
4168 if (ld_list_mem->ldList[ld_index].state != 0) {
4169 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4170 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4171 }
4172 }
4173 do_ocr = 0;
4174 }
4175dcmd_timeout:
4176 mrsas_free_tmp_dcmd(tcmd);
4177 free(tcmd, M_MRSAS);
4178
4179 if (do_ocr)
4180 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4181 else
4182 mrsas_release_mfi_cmd(cmd);
4183
4184 return (retcode);
4185}
4186
4187/*
4188 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input:
4189 * Adapter soft state Temp command Size of alloction
4190 *
4191 * Allocates DMAable memory for a temporary internal command. The allocated
4192 * memory is initialized to all zeros upon successful loading of the dma
4193 * mapped memory.
4194 */
4195int
4196mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4197 struct mrsas_tmp_dcmd *tcmd, int size)
4198{
4199 if (bus_dma_tag_create(sc->mrsas_parent_tag,
4200 1, 0,
4201 BUS_SPACE_MAXADDR_32BIT,
4202 BUS_SPACE_MAXADDR,
4203 NULL, NULL,
4204 size,
4205 1,
4206 size,
4207 BUS_DMA_ALLOCNOW,
4208 NULL, NULL,
4209 &tcmd->tmp_dcmd_tag)) {
4210 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4211 return (ENOMEM);
4212 }
4213 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4214 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4215 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4216 return (ENOMEM);
4217 }
4218 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4219 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4220 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4221 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4222 return (ENOMEM);
4223 }
4224 memset(tcmd->tmp_dcmd_mem, 0, size);
4225 return (0);
4226}
4227
4228/*
4229 * mrsas_free_tmp_dcmd: Free memory for temporary command input:
4230 * temporary dcmd pointer
4231 *
4232 * Deallocates memory of the temporary command for use in the construction of
4233 * the internal DCMD.
4234 */
4235void
4236mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4237{
4238 if (tmp->tmp_dcmd_phys_addr)
4239 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4240 if (tmp->tmp_dcmd_mem != NULL)
4241 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4242 if (tmp->tmp_dcmd_tag != NULL)
4243 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4244}
4245
4246/*
4247 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input:
4248 * Adapter soft state Previously issued cmd to be aborted
4249 *
4250 * This function is used to abort previously issued commands, such as AEN and
4251 * RAID map sync map commands. The abort command is sent as a DCMD internal
4252 * command and subsequently the driver will wait for a return status. The
4253 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4254 */
4255static int
4256mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4257 struct mrsas_mfi_cmd *cmd_to_abort)
4258{
4259 struct mrsas_mfi_cmd *cmd;
4260 struct mrsas_abort_frame *abort_fr;
4261 u_int8_t retcode = 0;
4262 unsigned long total_time = 0;
4263 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4264
4265 cmd = mrsas_get_mfi_cmd(sc);
4266 if (!cmd) {
4267 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4268 return (1);
4269 }
4270 abort_fr = &cmd->frame->abort;
4271
4272 /* Prepare and issue the abort frame */
4273 abort_fr->cmd = MFI_CMD_ABORT;
4274 abort_fr->cmd_status = 0xFF;
4275 abort_fr->flags = 0;
4276 abort_fr->abort_context = cmd_to_abort->index;
4277 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4278 abort_fr->abort_mfi_phys_addr_hi = 0;
4279
4280 cmd->sync_cmd = 1;
4281 cmd->cmd_status = 0xFF;
4282
4283 if (mrsas_issue_dcmd(sc, cmd)) {
4284 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4285 return (1);
4286 }
4287 /* Wait for this cmd to complete */
4288 sc->chan = (void *)&cmd;
4289 while (1) {
4290 if (cmd->cmd_status == 0xFF) {
4291 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4292 } else
4293 break;
4294 total_time++;
4295 if (total_time >= max_wait) {
4296 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4297 retcode = 1;
4298 break;
4299 }
4300 }
4301
4302 cmd->sync_cmd = 0;
4303 mrsas_release_mfi_cmd(cmd);
4304 return (retcode);
4305}
4306
4307/*
4308 * mrsas_complete_abort: Completes aborting a command input:
4309 * Adapter soft state Cmd that was issued to abort another cmd
4310 *
4311 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4312 * change after sending the command. This function is called from
4313 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4314 */
4315void
4316mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4317{
4318 if (cmd->sync_cmd) {
4319 cmd->sync_cmd = 0;
4320 cmd->cmd_status = 0;
4321 sc->chan = (void *)&cmd;
4322 wakeup_one((void *)&sc->chan);
4323 }
4324 return;
4325}
4326
4327/*
4328 * mrsas_aen_handler: AEN processing callback function from thread context
4329 * input: Adapter soft state
4330 *
4331 * Asynchronous event handler
4332 */
4333void
4334mrsas_aen_handler(struct mrsas_softc *sc)
4335{
4336 union mrsas_evt_class_locale class_locale;
4337 int doscan = 0;
4338 u_int32_t seq_num;
4339 int error, fail_aen = 0;
4340
4341 if (sc == NULL) {
4342 printf("invalid instance!\n");
4343 return;
4344 }
4345 if (sc->evt_detail_mem) {
4346 switch (sc->evt_detail_mem->code) {
4347 case MR_EVT_PD_INSERTED:
4348 fail_aen = mrsas_get_pd_list(sc);
4349 if (!fail_aen)
4350 mrsas_bus_scan_sim(sc, sc->sim_1);
4351 else
4352 goto skip_register_aen;
4353 doscan = 0;
4354 break;
4355 case MR_EVT_PD_REMOVED:
4356 fail_aen = mrsas_get_pd_list(sc);
4357 if (!fail_aen)
4358 mrsas_bus_scan_sim(sc, sc->sim_1);
4359 else
4360 goto skip_register_aen;
4361 doscan = 0;
4362 break;
4363 case MR_EVT_LD_OFFLINE:
4364 case MR_EVT_CFG_CLEARED:
4365 case MR_EVT_LD_DELETED:
4366 mrsas_bus_scan_sim(sc, sc->sim_0);
4367 doscan = 0;
4368 break;
4369 case MR_EVT_LD_CREATED:
4370 fail_aen = mrsas_get_ld_list(sc);
4371 if (!fail_aen)
4372 mrsas_bus_scan_sim(sc, sc->sim_0);
4373 else
4374 goto skip_register_aen;
4375 doscan = 0;
4376 break;
4377 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4378 case MR_EVT_FOREIGN_CFG_IMPORTED:
4379 case MR_EVT_LD_STATE_CHANGE:
4380 doscan = 1;
4381 break;
4382 default:
4383 doscan = 0;
4384 break;
4385 }
4386 } else {
4387 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4388 return;
4389 }
4390 if (doscan) {
4391 fail_aen = mrsas_get_pd_list(sc);
4392 if (!fail_aen) {
4393 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4394 mrsas_bus_scan_sim(sc, sc->sim_1);
4395 } else
4396 goto skip_register_aen;
4397
4398 fail_aen = mrsas_get_ld_list(sc);
4399 if (!fail_aen) {
4400 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4401 mrsas_bus_scan_sim(sc, sc->sim_0);
4402 } else
4403 goto skip_register_aen;
4404 }
4405 seq_num = sc->evt_detail_mem->seq_num + 1;
4406
4407 /* Register AEN with FW for latest sequence number plus 1 */
4408 class_locale.members.reserved = 0;
4409 class_locale.members.locale = MR_EVT_LOCALE_ALL;
4410 class_locale.members.class = MR_EVT_CLASS_DEBUG;
4411
4412 if (sc->aen_cmd != NULL)
4413 return;
4414
4415 mtx_lock(&sc->aen_lock);
4416 error = mrsas_register_aen(sc, seq_num,
4417 class_locale.word);
4418 mtx_unlock(&sc->aen_lock);
4419
4420 if (error)
4421 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
4422
4423skip_register_aen:
4424 return;
4425
4426}
4427
4428
4429/*
4430 * mrsas_complete_aen: Completes AEN command
4431 * input: Adapter soft state
4432 * Cmd that was issued to abort another cmd
4433 *
4434 * This function will be called from ISR and will continue event processing from
4435 * thread context by enqueuing task in ev_tq (callback function
4436 * "mrsas_aen_handler").
4437 */
4438void
4439mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4440{
4441 /*
4442 * Don't signal app if it is just an aborted previously registered
4443 * aen
4444 */
4445 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
4446 sc->mrsas_aen_triggered = 1;
4447 mtx_lock(&sc->aen_lock);
4448 if (sc->mrsas_poll_waiting) {
4449 sc->mrsas_poll_waiting = 0;
4450 selwakeup(&sc->mrsas_select);
4451 }
4452 mtx_unlock(&sc->aen_lock);
4453 } else
4454 cmd->abort_aen = 0;
4455
4456 sc->aen_cmd = NULL;
4457 mrsas_release_mfi_cmd(cmd);
4458
4459 if (!sc->remove_in_progress)
4460 taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
4461
4462 return;
4463}
4464
4465static device_method_t mrsas_methods[] = {
4466 DEVMETHOD(device_probe, mrsas_probe),
4467 DEVMETHOD(device_attach, mrsas_attach),
4468 DEVMETHOD(device_detach, mrsas_detach),
4469 DEVMETHOD(device_suspend, mrsas_suspend),
4470 DEVMETHOD(device_resume, mrsas_resume),
4471 DEVMETHOD(bus_print_child, bus_generic_print_child),
4472 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
4473 {0, 0}
4474};
4475
4476static driver_t mrsas_driver = {
4477 "mrsas",
4478 mrsas_methods,
4479 sizeof(struct mrsas_softc)
4480};
4481
4482static devclass_t mrsas_devclass;
4483
4484DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
4485MODULE_DEPEND(mrsas, cam, 1, 1, 1);