1/*
2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 2. Redistributions
12 * in binary form must reproduce the above copyright notice, this list of
13 * conditions and the following disclaimer in the documentation and/or other
14 * materials provided with the distribution. 3. Neither the name of the
15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16 * promote products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * The views and conclusions contained in the software and documentation are
32 * those of the authors and should not be interpreted as representing
33 * official policies,either expressed or implied, of the FreeBSD Project.
34 *
35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
37 *
38 */
39
40#include <sys/cdefs.h>
41__FBSDID("$FreeBSD$");
42
43#include <dev/mrsas/mrsas.h>
44#include <dev/mrsas/mrsas_ioctl.h>
45
46#include <cam/cam.h>
47#include <cam/cam_ccb.h>
48
49#include <sys/sysctl.h>
50#include <sys/types.h>
51#include <sys/sysent.h>
52#include <sys/kthread.h>
53#include <sys/taskqueue.h>
54#include <sys/smp.h>
55#include <sys/endian.h>
56
57/*
58 * Function prototypes
59 */
60static d_open_t mrsas_open;
61static d_close_t mrsas_close;
62static d_read_t mrsas_read;
63static d_write_t mrsas_write;
64static d_ioctl_t mrsas_ioctl;
65static d_poll_t mrsas_poll;
66
67static void mrsas_ich_startup(void *arg);
68static struct mrsas_mgmt_info mrsas_mgmt_info;
69static struct mrsas_ident *mrsas_find_ident(device_t);
70static int mrsas_setup_msix(struct mrsas_softc *sc);
71static int mrsas_allocate_msix(struct mrsas_softc *sc);
72static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
73static void mrsas_flush_cache(struct mrsas_softc *sc);
74static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
75static void mrsas_ocr_thread(void *arg);
76static int mrsas_get_map_info(struct mrsas_softc *sc);
77static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
78static int mrsas_sync_map_info(struct mrsas_softc *sc);
79static int mrsas_get_pd_list(struct mrsas_softc *sc);
80static int mrsas_get_ld_list(struct mrsas_softc *sc);
81static int mrsas_setup_irq(struct mrsas_softc *sc);
82static int mrsas_alloc_mem(struct mrsas_softc *sc);
83static int mrsas_init_fw(struct mrsas_softc *sc);
84static int mrsas_setup_raidmap(struct mrsas_softc *sc);
85static void megasas_setup_jbod_map(struct mrsas_softc *sc);
86static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
87static int mrsas_clear_intr(struct mrsas_softc *sc);
88static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
89static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
90static int
91mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
92    struct mrsas_mfi_cmd *cmd_to_abort);
93static void
94mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id);
95static struct mrsas_softc *
96mrsas_get_softc_instance(struct cdev *dev,
97    u_long cmd, caddr_t arg);
98u_int32_t
99mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset);
100u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
101u_int8_t
102mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
103    struct mrsas_mfi_cmd *mfi_cmd);
104void	mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
105int	mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
106int	mrsas_init_adapter(struct mrsas_softc *sc);
107int	mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
108int	mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
109int	mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
110int	mrsas_ioc_init(struct mrsas_softc *sc);
111int	mrsas_bus_scan(struct mrsas_softc *sc);
112int	mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
113int	mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
114int	mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
115int	mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
116int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
117int mrsas_reset_targets(struct mrsas_softc *sc);
118int
119mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
120    struct mrsas_mfi_cmd *cmd);
121int
122mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
123    int size);
124void	mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
125void	mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
126void	mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
127void	mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
128void	mrsas_disable_intr(struct mrsas_softc *sc);
129void	mrsas_enable_intr(struct mrsas_softc *sc);
130void	mrsas_free_ioc_cmd(struct mrsas_softc *sc);
131void	mrsas_free_mem(struct mrsas_softc *sc);
132void	mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
133void	mrsas_isr(void *arg);
134void	mrsas_teardown_intr(struct mrsas_softc *sc);
135void	mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
136void	mrsas_kill_hba(struct mrsas_softc *sc);
137void	mrsas_aen_handler(struct mrsas_softc *sc);
138void
139mrsas_write_reg(struct mrsas_softc *sc, int offset,
140    u_int32_t value);
141void
142mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
143    u_int32_t req_desc_hi);
144void	mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
145void
146mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
147    struct mrsas_mfi_cmd *cmd, u_int8_t status);
148struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
149
150MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
151        (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
152
153extern int mrsas_cam_attach(struct mrsas_softc *sc);
154extern void mrsas_cam_detach(struct mrsas_softc *sc);
155extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
156extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
157extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
158extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
159extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
160extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
161extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
162extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
163extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
164extern void mrsas_xpt_release(struct mrsas_softc *sc);
165extern MRSAS_REQUEST_DESCRIPTOR_UNION *
166mrsas_get_request_desc(struct mrsas_softc *sc,
167    u_int16_t index);
168extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
169static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
170static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
171void	mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
172
173void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd,
174	union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus,
175	u_int32_t data_length, u_int8_t *sense);
176void
177mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
178    u_int32_t req_desc_hi);
179
180SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
181    "MRSAS Driver Parameters");
182
183/*
184 * PCI device struct and table
185 *
186 */
187typedef struct mrsas_ident {
188	uint16_t vendor;
189	uint16_t device;
190	uint16_t subvendor;
191	uint16_t subdevice;
192	const char *desc;
193}	MRSAS_CTLR_ID;
194
195MRSAS_CTLR_ID device_table[] = {
196	{0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
197	{0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
198	{0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
199	{0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
200	{0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
201	{0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
202	{0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
203	{0x1000, MRSAS_VENTURA, 0xffff, 0xffff, "AVAGO Ventura SAS Controller"},
204	{0x1000, MRSAS_CRUSADER, 0xffff, 0xffff, "AVAGO Crusader SAS Controller"},
205	{0x1000, MRSAS_HARPOON, 0xffff, 0xffff, "AVAGO Harpoon SAS Controller"},
206	{0x1000, MRSAS_TOMCAT, 0xffff, 0xffff, "AVAGO Tomcat SAS Controller"},
207	{0x1000, MRSAS_VENTURA_4PORT, 0xffff, 0xffff, "AVAGO Ventura_4Port SAS Controller"},
208	{0x1000, MRSAS_CRUSADER_4PORT, 0xffff, 0xffff, "AVAGO Crusader_4Port SAS Controller"},
209	{0x1000, MRSAS_AERO_10E0, 0xffff, 0xffff, "BROADCOM AERO-10E0 SAS Controller"},
210	{0x1000, MRSAS_AERO_10E1, 0xffff, 0xffff, "BROADCOM AERO-10E1 SAS Controller"},
211	{0x1000, MRSAS_AERO_10E2, 0xffff, 0xffff, "BROADCOM AERO-10E2 SAS Controller"},
212	{0x1000, MRSAS_AERO_10E3, 0xffff, 0xffff, "BROADCOM AERO-10E3 SAS Controller"},
213	{0x1000, MRSAS_AERO_10E4, 0xffff, 0xffff, "BROADCOM AERO-10E4 SAS Controller"},
214	{0x1000, MRSAS_AERO_10E5, 0xffff, 0xffff, "BROADCOM AERO-10E5 SAS Controller"},
215	{0x1000, MRSAS_AERO_10E6, 0xffff, 0xffff, "BROADCOM AERO-10E6 SAS Controller"},
216	{0x1000, MRSAS_AERO_10E7, 0xffff, 0xffff, "BROADCOM AERO-10E7 SAS Controller"},
217	{0, 0, 0, 0, NULL}
218};
219
220/*
221 * Character device entry points
222 *
223 */
224static struct cdevsw mrsas_cdevsw = {
225	.d_version = D_VERSION,
226	.d_open = mrsas_open,
227	.d_close = mrsas_close,
228	.d_read = mrsas_read,
229	.d_write = mrsas_write,
230	.d_ioctl = mrsas_ioctl,
231	.d_poll = mrsas_poll,
232	.d_name = "mrsas",
233};
234
235MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
236
237/*
238 * In the cdevsw routines, we find our softc by using the si_drv1 member of
239 * struct cdev.  We set this variable to point to our softc in our attach
240 * routine when we create the /dev entry.
241 */
242int
243mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
244{
245	struct mrsas_softc *sc;
246
247	sc = dev->si_drv1;
248	return (0);
249}
250
251int
252mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
253{
254	struct mrsas_softc *sc;
255
256	sc = dev->si_drv1;
257	return (0);
258}
259
260int
261mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
262{
263	struct mrsas_softc *sc;
264
265	sc = dev->si_drv1;
266	return (0);
267}
268int
269mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
270{
271	struct mrsas_softc *sc;
272
273	sc = dev->si_drv1;
274	return (0);
275}
276
277u_int32_t
278mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset)
279{
280	u_int32_t i = 0, ret_val;
281
282	if (sc->is_aero) {
283		do {
284			ret_val = mrsas_read_reg(sc, offset);
285			i++;
286		} while(ret_val == 0 && i < 3);
287	} else
288		ret_val = mrsas_read_reg(sc, offset);
289
290	return ret_val;
291}
292
293/*
294 * Register Read/Write Functions
295 *
296 */
297void
298mrsas_write_reg(struct mrsas_softc *sc, int offset,
299    u_int32_t value)
300{
301	bus_space_tag_t bus_tag = sc->bus_tag;
302	bus_space_handle_t bus_handle = sc->bus_handle;
303
304	bus_space_write_4(bus_tag, bus_handle, offset, value);
305}
306
307u_int32_t
308mrsas_read_reg(struct mrsas_softc *sc, int offset)
309{
310	bus_space_tag_t bus_tag = sc->bus_tag;
311	bus_space_handle_t bus_handle = sc->bus_handle;
312
313	return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
314}
315
316/*
317 * Interrupt Disable/Enable/Clear Functions
318 *
319 */
320void
321mrsas_disable_intr(struct mrsas_softc *sc)
322{
323	u_int32_t mask = 0xFFFFFFFF;
324	u_int32_t status;
325
326	sc->mask_interrupts = 1;
327	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
328	/* Dummy read to force pci flush */
329	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
330}
331
332void
333mrsas_enable_intr(struct mrsas_softc *sc)
334{
335	u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
336	u_int32_t status;
337
338	sc->mask_interrupts = 0;
339	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
340	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
341
342	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
343	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
344}
345
346static int
347mrsas_clear_intr(struct mrsas_softc *sc)
348{
349	u_int32_t status;
350
351	/* Read received interrupt */
352	status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_intr_status));
353
354	/* Not our interrupt, so just return */
355	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
356		return (0);
357
358	/* We got a reply interrupt */
359	return (1);
360}
361
362/*
363 * PCI Support Functions
364 *
365 */
366static struct mrsas_ident *
367mrsas_find_ident(device_t dev)
368{
369	struct mrsas_ident *pci_device;
370
371	for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
372		if ((pci_device->vendor == pci_get_vendor(dev)) &&
373		    (pci_device->device == pci_get_device(dev)) &&
374		    ((pci_device->subvendor == pci_get_subvendor(dev)) ||
375		    (pci_device->subvendor == 0xffff)) &&
376		    ((pci_device->subdevice == pci_get_subdevice(dev)) ||
377		    (pci_device->subdevice == 0xffff)))
378			return (pci_device);
379	}
380	return (NULL);
381}
382
383static int
384mrsas_probe(device_t dev)
385{
386	static u_int8_t first_ctrl = 1;
387	struct mrsas_ident *id;
388
389	if ((id = mrsas_find_ident(dev)) != NULL) {
390		if (first_ctrl) {
391			printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
392			    MRSAS_VERSION);
393			first_ctrl = 0;
394		}
395		device_set_desc(dev, id->desc);
396		/* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
397		return (-30);
398	}
399	return (ENXIO);
400}
401
402/*
403 * mrsas_setup_sysctl:	setup sysctl values for mrsas
404 * input:				Adapter instance soft state
405 *
406 * Setup sysctl entries for mrsas driver.
407 */
408static void
409mrsas_setup_sysctl(struct mrsas_softc *sc)
410{
411	struct sysctl_ctx_list *sysctl_ctx = NULL;
412	struct sysctl_oid *sysctl_tree = NULL;
413	char tmpstr[80], tmpstr2[80];
414
415	/*
416	 * Setup the sysctl variable so the user can change the debug level
417	 * on the fly.
418	 */
419	snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
420	    device_get_unit(sc->mrsas_dev));
421	snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
422
423	sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
424	if (sysctl_ctx != NULL)
425		sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
426
427	if (sysctl_tree == NULL) {
428		sysctl_ctx_init(&sc->sysctl_ctx);
429		sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
430		    SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
431		    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr);
432		if (sc->sysctl_tree == NULL)
433			return;
434		sysctl_ctx = &sc->sysctl_ctx;
435		sysctl_tree = sc->sysctl_tree;
436	}
437	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
438	    OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
439	    "Disable the use of OCR");
440
441	SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
442	    OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
443	    strlen(MRSAS_VERSION), "driver version");
444
445	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
446	    OID_AUTO, "reset_count", CTLFLAG_RD,
447	    &sc->reset_count, 0, "number of ocr from start of the day");
448
449	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
450	    OID_AUTO, "fw_outstanding", CTLFLAG_RD,
451	    &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
452
453	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
454	    OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
455	    &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
456
457	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
458	    OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
459	    "Driver debug level");
460
461	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
462	    OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
463	    0, "Driver IO timeout value in mili-second.");
464
465	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
466	    OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
467	    &sc->mrsas_fw_fault_check_delay,
468	    0, "FW fault check thread delay in seconds. <default is 1 sec>");
469
470	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
471	    OID_AUTO, "reset_in_progress", CTLFLAG_RD,
472	    &sc->reset_in_progress, 0, "ocr in progress status");
473
474	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
475	    OID_AUTO, "block_sync_cache", CTLFLAG_RW,
476	    &sc->block_sync_cache, 0,
477	    "Block SYNC CACHE at driver. <default: 0, send it to FW>");
478	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
479	    OID_AUTO, "stream detection", CTLFLAG_RW,
480		&sc->drv_stream_detection, 0,
481		"Disable/Enable Stream detection. <default: 1, Enable Stream Detection>");
482	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
483	    OID_AUTO, "prp_count", CTLFLAG_RD,
484	    &sc->prp_count.val_rdonly, 0, "Number of IOs for which PRPs are built");
485	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
486	    OID_AUTO, "SGE holes", CTLFLAG_RD,
487	    &sc->sge_holes.val_rdonly, 0, "Number of IOs with holes in SGEs");
488}
489
490/*
491 * mrsas_get_tunables:	get tunable parameters.
492 * input:				Adapter instance soft state
493 *
494 * Get tunable parameters. This will help to debug driver at boot time.
495 */
496static void
497mrsas_get_tunables(struct mrsas_softc *sc)
498{
499	char tmpstr[80];
500
501	/* XXX default to some debugging for now */
502	sc->mrsas_debug =
503		(MRSAS_FAULT | MRSAS_OCR | MRSAS_INFO | MRSAS_TRACE | MRSAS_AEN);
504	sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
505	sc->mrsas_fw_fault_check_delay = 1;
506	sc->reset_count = 0;
507	sc->reset_in_progress = 0;
508	sc->block_sync_cache = 0;
509	sc->drv_stream_detection = 1;
510
511	/*
512	 * Grab the global variables.
513	 */
514	TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
515
516	/*
517	 * Grab the global variables.
518	 */
519	TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
520
521	/* Grab the unit-instance variables */
522	snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
523	    device_get_unit(sc->mrsas_dev));
524	TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
525}
526
527/*
528 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
529 * Used to get sequence number at driver load time.
530 * input:		Adapter soft state
531 *
532 * Allocates DMAable memory for the event log info internal command.
533 */
534int
535mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
536{
537	int el_info_size;
538
539	/* Allocate get event log info command */
540	el_info_size = sizeof(struct mrsas_evt_log_info);
541	if (bus_dma_tag_create(sc->mrsas_parent_tag,
542	    1, 0,
543	    BUS_SPACE_MAXADDR_32BIT,
544	    BUS_SPACE_MAXADDR,
545	    NULL, NULL,
546	    el_info_size,
547	    1,
548	    el_info_size,
549	    BUS_DMA_ALLOCNOW,
550	    NULL, NULL,
551	    &sc->el_info_tag)) {
552		device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
553		return (ENOMEM);
554	}
555	if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
556	    BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
557		device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
558		return (ENOMEM);
559	}
560	if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
561	    sc->el_info_mem, el_info_size, mrsas_addr_cb,
562	    &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
563		device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
564		return (ENOMEM);
565	}
566	memset(sc->el_info_mem, 0, el_info_size);
567	return (0);
568}
569
570/*
571 * mrsas_free_evt_info_cmd:	Free memory for Event log info command
572 * input:					Adapter soft state
573 *
574 * Deallocates memory for the event log info internal command.
575 */
576void
577mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
578{
579	if (sc->el_info_phys_addr)
580		bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
581	if (sc->el_info_mem != NULL)
582		bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
583	if (sc->el_info_tag != NULL)
584		bus_dma_tag_destroy(sc->el_info_tag);
585}
586
587/*
588 *  mrsas_get_seq_num:	Get latest event sequence number
589 *  @sc:				Adapter soft state
590 *  @eli:				Firmware event log sequence number information.
591 *
592 * Firmware maintains a log of all events in a non-volatile area.
593 * Driver get the sequence number using DCMD
594 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
595 */
596
597static int
598mrsas_get_seq_num(struct mrsas_softc *sc,
599    struct mrsas_evt_log_info *eli)
600{
601	struct mrsas_mfi_cmd *cmd;
602	struct mrsas_dcmd_frame *dcmd;
603	u_int8_t do_ocr = 1, retcode = 0;
604
605	cmd = mrsas_get_mfi_cmd(sc);
606
607	if (!cmd) {
608		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
609		return -ENOMEM;
610	}
611	dcmd = &cmd->frame->dcmd;
612
613	if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
614		device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
615		mrsas_release_mfi_cmd(cmd);
616		return -ENOMEM;
617	}
618	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
619
620	dcmd->cmd = MFI_CMD_DCMD;
621	dcmd->cmd_status = 0x0;
622	dcmd->sge_count = 1;
623	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
624	dcmd->timeout = 0;
625	dcmd->pad_0 = 0;
626	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_log_info));
627	dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_GET_INFO);
628	dcmd->sgl.sge32[0].phys_addr = htole32(sc->el_info_phys_addr & 0xFFFFFFFF);
629	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_log_info));
630
631	retcode = mrsas_issue_blocked_cmd(sc, cmd);
632	if (retcode == ETIMEDOUT)
633		goto dcmd_timeout;
634
635	do_ocr = 0;
636	/*
637	 * Copy the data back into callers buffer
638	 */
639	memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
640	mrsas_free_evt_log_info_cmd(sc);
641
642dcmd_timeout:
643	if (do_ocr)
644		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
645	else
646		mrsas_release_mfi_cmd(cmd);
647
648	return retcode;
649}
650
651/*
652 *  mrsas_register_aen:		Register for asynchronous event notification
653 *  @sc:			Adapter soft state
654 *  @seq_num:			Starting sequence number
655 *  @class_locale:		Class of the event
656 *
657 *  This function subscribes for events beyond the @seq_num
658 *  and type @class_locale.
659 *
660 */
661static int
662mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
663    u_int32_t class_locale_word)
664{
665	int ret_val;
666	struct mrsas_mfi_cmd *cmd;
667	struct mrsas_dcmd_frame *dcmd;
668	union mrsas_evt_class_locale curr_aen;
669	union mrsas_evt_class_locale prev_aen;
670
671	/*
672	 * If there an AEN pending already (aen_cmd), check if the
673	 * class_locale of that pending AEN is inclusive of the new AEN
674	 * request we currently have. If it is, then we don't have to do
675	 * anything. In other words, whichever events the current AEN request
676	 * is subscribing to, have already been subscribed to. If the old_cmd
677	 * is _not_ inclusive, then we have to abort that command, form a
678	 * class_locale that is superset of both old and current and re-issue
679	 * to the FW
680	 */
681
682	curr_aen.word = class_locale_word;
683
684	if (sc->aen_cmd) {
685		prev_aen.word = le32toh(sc->aen_cmd->frame->dcmd.mbox.w[1]);
686
687		/*
688		 * A class whose enum value is smaller is inclusive of all
689		 * higher values. If a PROGRESS (= -1) was previously
690		 * registered, then a new registration requests for higher
691		 * classes need not be sent to FW. They are automatically
692		 * included. Locale numbers don't have such hierarchy. They
693		 * are bitmap values
694		 */
695		if ((prev_aen.members.class <= curr_aen.members.class) &&
696		    !((prev_aen.members.locale & curr_aen.members.locale) ^
697		    curr_aen.members.locale)) {
698			/*
699			 * Previously issued event registration includes
700			 * current request. Nothing to do.
701			 */
702			return 0;
703		} else {
704			curr_aen.members.locale |= prev_aen.members.locale;
705
706			if (prev_aen.members.class < curr_aen.members.class)
707				curr_aen.members.class = prev_aen.members.class;
708
709			sc->aen_cmd->abort_aen = 1;
710			ret_val = mrsas_issue_blocked_abort_cmd(sc,
711			    sc->aen_cmd);
712
713			if (ret_val) {
714				printf("mrsas: Failed to abort previous AEN command\n");
715				return ret_val;
716			} else
717				sc->aen_cmd = NULL;
718		}
719	}
720	cmd = mrsas_get_mfi_cmd(sc);
721	if (!cmd)
722		return ENOMEM;
723
724	dcmd = &cmd->frame->dcmd;
725
726	memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
727
728	/*
729	 * Prepare DCMD for aen registration
730	 */
731	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
732
733	dcmd->cmd = MFI_CMD_DCMD;
734	dcmd->cmd_status = 0x0;
735	dcmd->sge_count = 1;
736	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
737	dcmd->timeout = 0;
738	dcmd->pad_0 = 0;
739	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_detail));
740	dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
741	dcmd->mbox.w[0] = htole32(seq_num);
742	sc->last_seq_num = seq_num;
743	dcmd->mbox.w[1] = htole32(curr_aen.word);
744	dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->evt_detail_phys_addr & 0xFFFFFFFF);
745	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_detail));
746
747	if (sc->aen_cmd != NULL) {
748		mrsas_release_mfi_cmd(cmd);
749		return 0;
750	}
751	/*
752	 * Store reference to the cmd used to register for AEN. When an
753	 * application wants us to register for AEN, we have to abort this
754	 * cmd and re-register with a new EVENT LOCALE supplied by that app
755	 */
756	sc->aen_cmd = cmd;
757
758	/*
759	 * Issue the aen registration frame
760	 */
761	if (mrsas_issue_dcmd(sc, cmd)) {
762		device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
763		return (1);
764	}
765	return 0;
766}
767
768/*
769 * mrsas_start_aen:	Subscribes to AEN during driver load time
770 * @instance:		Adapter soft state
771 */
772static int
773mrsas_start_aen(struct mrsas_softc *sc)
774{
775	struct mrsas_evt_log_info eli;
776	union mrsas_evt_class_locale class_locale;
777
778	/* Get the latest sequence number from FW */
779
780	memset(&eli, 0, sizeof(eli));
781
782	if (mrsas_get_seq_num(sc, &eli))
783		return -1;
784
785	/* Register AEN with FW for latest sequence number plus 1 */
786	class_locale.members.reserved = 0;
787	class_locale.members.locale = MR_EVT_LOCALE_ALL;
788	class_locale.members.class = MR_EVT_CLASS_DEBUG;
789
790	return mrsas_register_aen(sc, eli.newest_seq_num + 1,
791	    class_locale.word);
792
793}
794
795/*
796 * mrsas_setup_msix:	Allocate MSI-x vectors
797 * @sc:					adapter soft state
798 */
799static int
800mrsas_setup_msix(struct mrsas_softc *sc)
801{
802	int i;
803
804	for (i = 0; i < sc->msix_vectors; i++) {
805		sc->irq_context[i].sc = sc;
806		sc->irq_context[i].MSIxIndex = i;
807		sc->irq_id[i] = i + 1;
808		sc->mrsas_irq[i] = bus_alloc_resource_any
809		    (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
810		    ,RF_ACTIVE);
811		if (sc->mrsas_irq[i] == NULL) {
812			device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
813			goto irq_alloc_failed;
814		}
815		if (bus_setup_intr(sc->mrsas_dev,
816		    sc->mrsas_irq[i],
817		    INTR_MPSAFE | INTR_TYPE_CAM,
818		    NULL, mrsas_isr, &sc->irq_context[i],
819		    &sc->intr_handle[i])) {
820			device_printf(sc->mrsas_dev,
821			    "Cannot set up MSI-x interrupt handler\n");
822			goto irq_alloc_failed;
823		}
824	}
825	return SUCCESS;
826
827irq_alloc_failed:
828	mrsas_teardown_intr(sc);
829	return (FAIL);
830}
831
832/*
833 * mrsas_allocate_msix:		Setup MSI-x vectors
834 * @sc:						adapter soft state
835 */
836static int
837mrsas_allocate_msix(struct mrsas_softc *sc)
838{
839	if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
840		device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
841		    " of vectors\n", sc->msix_vectors);
842	} else {
843		device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
844		goto irq_alloc_failed;
845	}
846	return SUCCESS;
847
848irq_alloc_failed:
849	mrsas_teardown_intr(sc);
850	return (FAIL);
851}
852
853/*
854 * mrsas_attach:	PCI entry point
855 * input:			pointer to device struct
856 *
857 * Performs setup of PCI and registers, initializes mutexes and linked lists,
858 * registers interrupts and CAM, and initializes   the adapter/controller to
859 * its proper state.
860 */
861static int
862mrsas_attach(device_t dev)
863{
864	struct mrsas_softc *sc = device_get_softc(dev);
865	uint32_t cmd, error;
866
867	memset(sc, 0, sizeof(struct mrsas_softc));
868
869	/* Look up our softc and initialize its fields. */
870	sc->mrsas_dev = dev;
871	sc->device_id = pci_get_device(dev);
872
873	switch (sc->device_id) {
874	case MRSAS_INVADER:
875	case MRSAS_FURY:
876	case MRSAS_INTRUDER:
877	case MRSAS_INTRUDER_24:
878	case MRSAS_CUTLASS_52:
879	case MRSAS_CUTLASS_53:
880		sc->mrsas_gen3_ctrl = 1;
881		break;
882	case MRSAS_VENTURA:
883	case MRSAS_CRUSADER:
884	case MRSAS_HARPOON:
885	case MRSAS_TOMCAT:
886	case MRSAS_VENTURA_4PORT:
887	case MRSAS_CRUSADER_4PORT:
888		sc->is_ventura = true;
889		break;
890	case MRSAS_AERO_10E1:
891	case MRSAS_AERO_10E5:
892		device_printf(dev, "Adapter is in configurable secure mode\n");
893	case MRSAS_AERO_10E2:
894	case MRSAS_AERO_10E6:
895		sc->is_aero = true;
896		break;
897	case MRSAS_AERO_10E0:
898	case MRSAS_AERO_10E3:
899	case MRSAS_AERO_10E4:
900	case MRSAS_AERO_10E7:
901		device_printf(dev, "Adapter is in non-secure mode\n");
902		return SUCCESS;
903	}
904
905	mrsas_get_tunables(sc);
906
907	/*
908	 * Set up PCI and registers
909	 */
910	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
911	/* Force the busmaster enable bit on. */
912	cmd |= PCIM_CMD_BUSMASTEREN;
913	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
914
915	/* For Ventura/Aero system registers are mapped to BAR0 */
916	if (sc->is_ventura || sc->is_aero)
917		sc->reg_res_id = PCIR_BAR(0);	/* BAR0 offset */
918	else
919		sc->reg_res_id = PCIR_BAR(1);	/* BAR1 offset */
920
921	if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
922	    &(sc->reg_res_id), RF_ACTIVE))
923	    == NULL) {
924		device_printf(dev, "Cannot allocate PCI registers\n");
925		goto attach_fail;
926	}
927	sc->bus_tag = rman_get_bustag(sc->reg_res);
928	sc->bus_handle = rman_get_bushandle(sc->reg_res);
929
930	/* Intialize mutexes */
931	mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
932	mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
933	mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
934	mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
935	mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
936	mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
937	mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
938	mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
939	mtx_init(&sc->stream_lock, "mrsas_stream_lock", NULL, MTX_DEF);
940
941	/* Intialize linked list */
942	TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
943	TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
944
945	mrsas_atomic_set(&sc->fw_outstanding, 0);
946	mrsas_atomic_set(&sc->target_reset_outstanding, 0);
947	mrsas_atomic_set(&sc->prp_count, 0);
948	mrsas_atomic_set(&sc->sge_holes, 0);
949
950	sc->io_cmds_highwater = 0;
951
952	sc->adprecovery = MRSAS_HBA_OPERATIONAL;
953	sc->UnevenSpanSupport = 0;
954
955	sc->msix_enable = 0;
956
957	/* Initialize Firmware */
958	if (mrsas_init_fw(sc) != SUCCESS) {
959		goto attach_fail_fw;
960	}
961	/* Register mrsas to CAM layer */
962	if ((mrsas_cam_attach(sc) != SUCCESS)) {
963		goto attach_fail_cam;
964	}
965	/* Register IRQs */
966	if (mrsas_setup_irq(sc) != SUCCESS) {
967		goto attach_fail_irq;
968	}
969	error = mrsas_kproc_create(mrsas_ocr_thread, sc,
970	    &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
971	    device_get_unit(sc->mrsas_dev));
972	if (error) {
973		device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
974		goto attach_fail_ocr_thread;
975	}
976	/*
977	 * After FW initialization and OCR thread creation
978	 * we will defer the cdev creation, AEN setup on ICH callback
979	 */
980	sc->mrsas_ich.ich_func = mrsas_ich_startup;
981	sc->mrsas_ich.ich_arg = sc;
982	if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
983		device_printf(sc->mrsas_dev, "Config hook is already established\n");
984	}
985	mrsas_setup_sysctl(sc);
986	return SUCCESS;
987
988attach_fail_ocr_thread:
989	if (sc->ocr_thread_active)
990		wakeup(&sc->ocr_chan);
991attach_fail_irq:
992	mrsas_teardown_intr(sc);
993attach_fail_cam:
994	mrsas_cam_detach(sc);
995attach_fail_fw:
996	/* if MSIX vector is allocated and FW Init FAILED then release MSIX */
997	if (sc->msix_enable == 1)
998		pci_release_msi(sc->mrsas_dev);
999	mrsas_free_mem(sc);
1000	mtx_destroy(&sc->sim_lock);
1001	mtx_destroy(&sc->aen_lock);
1002	mtx_destroy(&sc->pci_lock);
1003	mtx_destroy(&sc->io_lock);
1004	mtx_destroy(&sc->ioctl_lock);
1005	mtx_destroy(&sc->mpt_cmd_pool_lock);
1006	mtx_destroy(&sc->mfi_cmd_pool_lock);
1007	mtx_destroy(&sc->raidmap_lock);
1008	mtx_destroy(&sc->stream_lock);
1009attach_fail:
1010	if (sc->reg_res) {
1011		bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
1012		    sc->reg_res_id, sc->reg_res);
1013	}
1014	return (ENXIO);
1015}
1016
1017/*
1018 * Interrupt config hook
1019 */
1020static void
1021mrsas_ich_startup(void *arg)
1022{
1023	int i = 0;
1024	struct mrsas_softc *sc = (struct mrsas_softc *)arg;
1025
1026	/*
1027	 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
1028	 */
1029	sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS,
1030	    IOCTL_SEMA_DESCRIPTION);
1031
1032	/* Create a /dev entry for mrsas controller. */
1033	sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
1034	    GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
1035	    device_get_unit(sc->mrsas_dev));
1036
1037	if (device_get_unit(sc->mrsas_dev) == 0) {
1038		make_dev_alias_p(MAKEDEV_CHECKNAME,
1039		    &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
1040		    "megaraid_sas_ioctl_node");
1041	}
1042	if (sc->mrsas_cdev)
1043		sc->mrsas_cdev->si_drv1 = sc;
1044
1045	/*
1046	 * Add this controller to mrsas_mgmt_info structure so that it can be
1047	 * exported to management applications
1048	 */
1049	if (device_get_unit(sc->mrsas_dev) == 0)
1050		memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
1051
1052	mrsas_mgmt_info.count++;
1053	mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
1054	mrsas_mgmt_info.max_index++;
1055
1056	/* Enable Interrupts */
1057	mrsas_enable_intr(sc);
1058
1059	/* Call DCMD get_pd_info for all system PDs */
1060	for (i = 0; i < MRSAS_MAX_PD; i++) {
1061		if ((sc->target_list[i].target_id != 0xffff) &&
1062			sc->pd_info_mem)
1063			mrsas_get_pd_info(sc, sc->target_list[i].target_id);
1064	}
1065
1066	/* Initiate AEN (Asynchronous Event Notification) */
1067	if (mrsas_start_aen(sc)) {
1068		device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
1069		    "Further events from the controller will not be communicated.\n"
1070		    "Either there is some problem in the controller"
1071		    "or the controller does not support AEN.\n"
1072		    "Please contact to the SUPPORT TEAM if the problem persists\n");
1073	}
1074	if (sc->mrsas_ich.ich_arg != NULL) {
1075		device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
1076		config_intrhook_disestablish(&sc->mrsas_ich);
1077		sc->mrsas_ich.ich_arg = NULL;
1078	}
1079}
1080
1081/*
1082 * mrsas_detach:	De-allocates and teardown resources
1083 * input:			pointer to device struct
1084 *
1085 * This function is the entry point for device disconnect and detach.
1086 * It performs memory de-allocations, shutdown of the controller and various
1087 * teardown and destroy resource functions.
1088 */
1089static int
1090mrsas_detach(device_t dev)
1091{
1092	struct mrsas_softc *sc;
1093	int i = 0;
1094
1095	sc = device_get_softc(dev);
1096	sc->remove_in_progress = 1;
1097
1098	/* Destroy the character device so no other IOCTL will be handled */
1099	if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1100		destroy_dev(sc->mrsas_linux_emulator_cdev);
1101	destroy_dev(sc->mrsas_cdev);
1102
1103	/*
1104	 * Take the instance off the instance array. Note that we will not
1105	 * decrement the max_index. We let this array be sparse array
1106	 */
1107	for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1108		if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1109			mrsas_mgmt_info.count--;
1110			mrsas_mgmt_info.sc_ptr[i] = NULL;
1111			break;
1112		}
1113	}
1114
1115	if (sc->ocr_thread_active)
1116		wakeup(&sc->ocr_chan);
1117	while (sc->reset_in_progress) {
1118		i++;
1119		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1120			mrsas_dprint(sc, MRSAS_INFO,
1121			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1122		}
1123		pause("mr_shutdown", hz);
1124	}
1125	i = 0;
1126	while (sc->ocr_thread_active) {
1127		i++;
1128		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1129			mrsas_dprint(sc, MRSAS_INFO,
1130			    "[%2d]waiting for "
1131			    "mrsas_ocr thread to quit ocr %d\n", i,
1132			    sc->ocr_thread_active);
1133		}
1134		pause("mr_shutdown", hz);
1135	}
1136	mrsas_flush_cache(sc);
1137	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1138	mrsas_disable_intr(sc);
1139
1140	if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
1141		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
1142			free(sc->streamDetectByLD[i], M_MRSAS);
1143		free(sc->streamDetectByLD, M_MRSAS);
1144		sc->streamDetectByLD = NULL;
1145	}
1146
1147	mrsas_cam_detach(sc);
1148	mrsas_teardown_intr(sc);
1149	mrsas_free_mem(sc);
1150	mtx_destroy(&sc->sim_lock);
1151	mtx_destroy(&sc->aen_lock);
1152	mtx_destroy(&sc->pci_lock);
1153	mtx_destroy(&sc->io_lock);
1154	mtx_destroy(&sc->ioctl_lock);
1155	mtx_destroy(&sc->mpt_cmd_pool_lock);
1156	mtx_destroy(&sc->mfi_cmd_pool_lock);
1157	mtx_destroy(&sc->raidmap_lock);
1158	mtx_destroy(&sc->stream_lock);
1159
1160	/* Wait for all the semaphores to be released */
1161	while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS)
1162		pause("mr_shutdown", hz);
1163
1164	/* Destroy the counting semaphore created for Ioctl */
1165	sema_destroy(&sc->ioctl_count_sema);
1166
1167	if (sc->reg_res) {
1168		bus_release_resource(sc->mrsas_dev,
1169		    SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1170	}
1171	if (sc->sysctl_tree != NULL)
1172		sysctl_ctx_free(&sc->sysctl_ctx);
1173
1174	return (0);
1175}
1176
1177static int
1178mrsas_shutdown(device_t dev)
1179{
1180	struct mrsas_softc *sc;
1181	int i;
1182
1183	sc = device_get_softc(dev);
1184	sc->remove_in_progress = 1;
1185	if (!KERNEL_PANICKED()) {
1186		if (sc->ocr_thread_active)
1187			wakeup(&sc->ocr_chan);
1188		i = 0;
1189		while (sc->reset_in_progress && i < 15) {
1190			i++;
1191			if ((i % MRSAS_RESET_NOTICE_INTERVAL) == 0) {
1192				mrsas_dprint(sc, MRSAS_INFO,
1193				    "[%2d]waiting for OCR to be finished "
1194				    "from %s\n", i, __func__);
1195			}
1196			pause("mr_shutdown", hz);
1197		}
1198		if (sc->reset_in_progress) {
1199			mrsas_dprint(sc, MRSAS_INFO,
1200			    "gave up waiting for OCR to be finished\n");
1201		}
1202	}
1203
1204	mrsas_flush_cache(sc);
1205	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1206	mrsas_disable_intr(sc);
1207	return (0);
1208}
1209
1210/*
1211 * mrsas_free_mem:		Frees allocated memory
1212 * input:				Adapter instance soft state
1213 *
1214 * This function is called from mrsas_detach() to free previously allocated
1215 * memory.
1216 */
1217void
1218mrsas_free_mem(struct mrsas_softc *sc)
1219{
1220	int i;
1221	u_int32_t max_fw_cmds;
1222	struct mrsas_mfi_cmd *mfi_cmd;
1223	struct mrsas_mpt_cmd *mpt_cmd;
1224
1225	/*
1226	 * Free RAID map memory
1227	 */
1228	for (i = 0; i < 2; i++) {
1229		if (sc->raidmap_phys_addr[i])
1230			bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1231		if (sc->raidmap_mem[i] != NULL)
1232			bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1233		if (sc->raidmap_tag[i] != NULL)
1234			bus_dma_tag_destroy(sc->raidmap_tag[i]);
1235
1236		if (sc->ld_drv_map[i] != NULL)
1237			free(sc->ld_drv_map[i], M_MRSAS);
1238	}
1239	for (i = 0; i < 2; i++) {
1240		if (sc->jbodmap_phys_addr[i])
1241			bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1242		if (sc->jbodmap_mem[i] != NULL)
1243			bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1244		if (sc->jbodmap_tag[i] != NULL)
1245			bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1246	}
1247	/*
1248	 * Free version buffer memory
1249	 */
1250	if (sc->verbuf_phys_addr)
1251		bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1252	if (sc->verbuf_mem != NULL)
1253		bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1254	if (sc->verbuf_tag != NULL)
1255		bus_dma_tag_destroy(sc->verbuf_tag);
1256
1257	/*
1258	 * Free sense buffer memory
1259	 */
1260	if (sc->sense_phys_addr)
1261		bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1262	if (sc->sense_mem != NULL)
1263		bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1264	if (sc->sense_tag != NULL)
1265		bus_dma_tag_destroy(sc->sense_tag);
1266
1267	/*
1268	 * Free chain frame memory
1269	 */
1270	if (sc->chain_frame_phys_addr)
1271		bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1272	if (sc->chain_frame_mem != NULL)
1273		bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1274	if (sc->chain_frame_tag != NULL)
1275		bus_dma_tag_destroy(sc->chain_frame_tag);
1276
1277	/*
1278	 * Free IO Request memory
1279	 */
1280	if (sc->io_request_phys_addr)
1281		bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1282	if (sc->io_request_mem != NULL)
1283		bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1284	if (sc->io_request_tag != NULL)
1285		bus_dma_tag_destroy(sc->io_request_tag);
1286
1287	/*
1288	 * Free Reply Descriptor memory
1289	 */
1290	if (sc->reply_desc_phys_addr)
1291		bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1292	if (sc->reply_desc_mem != NULL)
1293		bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1294	if (sc->reply_desc_tag != NULL)
1295		bus_dma_tag_destroy(sc->reply_desc_tag);
1296
1297	/*
1298	 * Free event detail memory
1299	 */
1300	if (sc->evt_detail_phys_addr)
1301		bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1302	if (sc->evt_detail_mem != NULL)
1303		bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1304	if (sc->evt_detail_tag != NULL)
1305		bus_dma_tag_destroy(sc->evt_detail_tag);
1306
1307	/*
1308	 * Free PD info memory
1309	 */
1310	if (sc->pd_info_phys_addr)
1311		bus_dmamap_unload(sc->pd_info_tag, sc->pd_info_dmamap);
1312	if (sc->pd_info_mem != NULL)
1313		bus_dmamem_free(sc->pd_info_tag, sc->pd_info_mem, sc->pd_info_dmamap);
1314	if (sc->pd_info_tag != NULL)
1315		bus_dma_tag_destroy(sc->pd_info_tag);
1316
1317	/*
1318	 * Free MFI frames
1319	 */
1320	if (sc->mfi_cmd_list) {
1321		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1322			mfi_cmd = sc->mfi_cmd_list[i];
1323			mrsas_free_frame(sc, mfi_cmd);
1324		}
1325	}
1326	if (sc->mficmd_frame_tag != NULL)
1327		bus_dma_tag_destroy(sc->mficmd_frame_tag);
1328
1329	/*
1330	 * Free MPT internal command list
1331	 */
1332	max_fw_cmds = sc->max_fw_cmds;
1333	if (sc->mpt_cmd_list) {
1334		for (i = 0; i < max_fw_cmds; i++) {
1335			mpt_cmd = sc->mpt_cmd_list[i];
1336			bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1337			free(sc->mpt_cmd_list[i], M_MRSAS);
1338		}
1339		free(sc->mpt_cmd_list, M_MRSAS);
1340		sc->mpt_cmd_list = NULL;
1341	}
1342	/*
1343	 * Free MFI internal command list
1344	 */
1345
1346	if (sc->mfi_cmd_list) {
1347		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1348			free(sc->mfi_cmd_list[i], M_MRSAS);
1349		}
1350		free(sc->mfi_cmd_list, M_MRSAS);
1351		sc->mfi_cmd_list = NULL;
1352	}
1353	/*
1354	 * Free request descriptor memory
1355	 */
1356	free(sc->req_desc, M_MRSAS);
1357	sc->req_desc = NULL;
1358
1359	/*
1360	 * Destroy parent tag
1361	 */
1362	if (sc->mrsas_parent_tag != NULL)
1363		bus_dma_tag_destroy(sc->mrsas_parent_tag);
1364
1365	/*
1366	 * Free ctrl_info memory
1367	 */
1368	if (sc->ctrl_info != NULL)
1369		free(sc->ctrl_info, M_MRSAS);
1370}
1371
1372/*
1373 * mrsas_teardown_intr:	Teardown interrupt
1374 * input:				Adapter instance soft state
1375 *
1376 * This function is called from mrsas_detach() to teardown and release bus
1377 * interrupt resourse.
1378 */
1379void
1380mrsas_teardown_intr(struct mrsas_softc *sc)
1381{
1382	int i;
1383
1384	if (!sc->msix_enable) {
1385		if (sc->intr_handle[0])
1386			bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1387		if (sc->mrsas_irq[0] != NULL)
1388			bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1389			    sc->irq_id[0], sc->mrsas_irq[0]);
1390		sc->intr_handle[0] = NULL;
1391	} else {
1392		for (i = 0; i < sc->msix_vectors; i++) {
1393			if (sc->intr_handle[i])
1394				bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1395				    sc->intr_handle[i]);
1396
1397			if (sc->mrsas_irq[i] != NULL)
1398				bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1399				    sc->irq_id[i], sc->mrsas_irq[i]);
1400
1401			sc->intr_handle[i] = NULL;
1402		}
1403		pci_release_msi(sc->mrsas_dev);
1404	}
1405
1406}
1407
1408/*
1409 * mrsas_suspend:	Suspend entry point
1410 * input:			Device struct pointer
1411 *
1412 * This function is the entry point for system suspend from the OS.
1413 */
1414static int
1415mrsas_suspend(device_t dev)
1416{
1417	/* This will be filled when the driver will have hibernation support */
1418	return (0);
1419}
1420
1421/*
1422 * mrsas_resume:	Resume entry point
1423 * input:			Device struct pointer
1424 *
1425 * This function is the entry point for system resume from the OS.
1426 */
1427static int
1428mrsas_resume(device_t dev)
1429{
1430	/* This will be filled when the driver will have hibernation support */
1431	return (0);
1432}
1433
1434/**
1435 * mrsas_get_softc_instance:    Find softc instance based on cmd type
1436 *
1437 * This function will return softc instance based on cmd type.
1438 * In some case, application fire ioctl on required management instance and
1439 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1440 * case, else get the softc instance from host_no provided by application in
1441 * user data.
1442 */
1443
1444static struct mrsas_softc *
1445mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1446{
1447	struct mrsas_softc *sc = NULL;
1448	struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1449
1450	if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1451		sc = dev->si_drv1;
1452	} else {
1453		/*
1454		 * get the Host number & the softc from data sent by the
1455		 * Application
1456		 */
1457		sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1458		if (sc == NULL)
1459			printf("There is no Controller number %d\n",
1460			    user_ioc->host_no);
1461		else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1462			mrsas_dprint(sc, MRSAS_FAULT,
1463			    "Invalid Controller number %d\n", user_ioc->host_no);
1464	}
1465
1466	return sc;
1467}
1468
1469/*
1470 * mrsas_ioctl:	IOCtl commands entry point.
1471 *
1472 * This function is the entry point for IOCtls from the OS.  It calls the
1473 * appropriate function for processing depending on the command received.
1474 */
1475static int
1476mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
1477    struct thread *td)
1478{
1479	struct mrsas_softc *sc;
1480	int ret = 0, i = 0;
1481	MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1482
1483	sc = mrsas_get_softc_instance(dev, cmd, arg);
1484	if (!sc)
1485		return ENOENT;
1486
1487	if (sc->remove_in_progress ||
1488		(sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
1489		mrsas_dprint(sc, MRSAS_INFO,
1490		    "Either driver remove or shutdown called or "
1491			"HW is in unrecoverable critical error state.\n");
1492		return ENOENT;
1493	}
1494	mtx_lock_spin(&sc->ioctl_lock);
1495	if (!sc->reset_in_progress) {
1496		mtx_unlock_spin(&sc->ioctl_lock);
1497		goto do_ioctl;
1498	}
1499	mtx_unlock_spin(&sc->ioctl_lock);
1500	while (sc->reset_in_progress) {
1501		i++;
1502		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1503			mrsas_dprint(sc, MRSAS_INFO,
1504			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1505		}
1506		pause("mr_ioctl", hz);
1507	}
1508
1509do_ioctl:
1510	switch (cmd) {
1511	case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1512#ifdef COMPAT_FREEBSD32
1513	case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1514#endif
1515		/*
1516		 * Decrement the Ioctl counting Semaphore before getting an
1517		 * mfi command
1518		 */
1519		sema_wait(&sc->ioctl_count_sema);
1520
1521		ret = mrsas_passthru(sc, (void *)arg, cmd);
1522
1523		/* Increment the Ioctl counting semaphore value */
1524		sema_post(&sc->ioctl_count_sema);
1525
1526		break;
1527	case MRSAS_IOC_SCAN_BUS:
1528		ret = mrsas_bus_scan(sc);
1529		break;
1530
1531	case MRSAS_IOC_GET_PCI_INFO:
1532		pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1533		memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1534		pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1535		pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1536		pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1537		pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1538		mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1539		    "pci device no: %d, pci function no: %d,"
1540		    "pci domain ID: %d\n",
1541		    pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1542		    pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1543		ret = 0;
1544		break;
1545
1546	default:
1547		mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1548		ret = ENOENT;
1549	}
1550
1551	return (ret);
1552}
1553
1554/*
1555 * mrsas_poll:	poll entry point for mrsas driver fd
1556 *
1557 * This function is the entry point for poll from the OS.  It waits for some AEN
1558 * events to be triggered from the controller and notifies back.
1559 */
1560static int
1561mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1562{
1563	struct mrsas_softc *sc;
1564	int revents = 0;
1565
1566	sc = dev->si_drv1;
1567
1568	if (poll_events & (POLLIN | POLLRDNORM)) {
1569		if (sc->mrsas_aen_triggered) {
1570			revents |= poll_events & (POLLIN | POLLRDNORM);
1571		}
1572	}
1573	if (revents == 0) {
1574		if (poll_events & (POLLIN | POLLRDNORM)) {
1575			mtx_lock(&sc->aen_lock);
1576			sc->mrsas_poll_waiting = 1;
1577			selrecord(td, &sc->mrsas_select);
1578			mtx_unlock(&sc->aen_lock);
1579		}
1580	}
1581	return revents;
1582}
1583
1584/*
1585 * mrsas_setup_irq:	Set up interrupt
1586 * input:			Adapter instance soft state
1587 *
1588 * This function sets up interrupts as a bus resource, with flags indicating
1589 * resource permitting contemporaneous sharing and for resource to activate
1590 * atomically.
1591 */
1592static int
1593mrsas_setup_irq(struct mrsas_softc *sc)
1594{
1595	if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1596		device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1597
1598	else {
1599		device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1600		sc->irq_context[0].sc = sc;
1601		sc->irq_context[0].MSIxIndex = 0;
1602		sc->irq_id[0] = 0;
1603		sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1604		    SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1605		if (sc->mrsas_irq[0] == NULL) {
1606			device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1607			    "interrupt\n");
1608			return (FAIL);
1609		}
1610		if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1611		    INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1612		    &sc->irq_context[0], &sc->intr_handle[0])) {
1613			device_printf(sc->mrsas_dev, "Cannot set up legacy"
1614			    "interrupt\n");
1615			return (FAIL);
1616		}
1617	}
1618	return (0);
1619}
1620
1621/*
1622 * mrsas_isr:	ISR entry point
1623 * input:		argument pointer
1624 *
1625 * This function is the interrupt service routine entry point.  There are two
1626 * types of interrupts, state change interrupt and response interrupt.  If an
1627 * interrupt is not ours, we just return.
1628 */
1629void
1630mrsas_isr(void *arg)
1631{
1632	struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1633	struct mrsas_softc *sc = irq_context->sc;
1634	int status = 0;
1635
1636	if (sc->mask_interrupts)
1637		return;
1638
1639	if (!sc->msix_vectors) {
1640		status = mrsas_clear_intr(sc);
1641		if (!status)
1642			return;
1643	}
1644	/* If we are resetting, bail */
1645	if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1646		printf(" Entered into ISR when OCR is going active. \n");
1647		mrsas_clear_intr(sc);
1648		return;
1649	}
1650	/* Process for reply request and clear response interrupt */
1651	if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1652		mrsas_clear_intr(sc);
1653
1654	return;
1655}
1656
1657/*
1658 * mrsas_complete_cmd:	Process reply request
1659 * input:				Adapter instance soft state
1660 *
1661 * This function is called from mrsas_isr() to process reply request and clear
1662 * response interrupt. Processing of the reply request entails walking
1663 * through the reply descriptor array for the command request  pended from
1664 * Firmware.  We look at the Function field to determine the command type and
1665 * perform the appropriate action.  Before we return, we clear the response
1666 * interrupt.
1667 */
1668int
1669mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1670{
1671	Mpi2ReplyDescriptorsUnion_t *desc;
1672	MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1673	MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1674	struct mrsas_mpt_cmd *cmd_mpt, *r1_cmd = NULL;
1675	struct mrsas_mfi_cmd *cmd_mfi;
1676	u_int8_t reply_descript_type, *sense;
1677	u_int16_t smid, num_completed;
1678	u_int8_t status, extStatus;
1679	union desc_value desc_val;
1680	PLD_LOAD_BALANCE_INFO lbinfo;
1681	u_int32_t device_id, data_length;
1682	int threshold_reply_count = 0;
1683#if TM_DEBUG
1684	MR_TASK_MANAGE_REQUEST *mr_tm_req;
1685	MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
1686#endif
1687
1688	/* If we have a hardware error, not need to continue */
1689	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1690		return (DONE);
1691
1692	desc = sc->reply_desc_mem;
1693	desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1694	    + sc->last_reply_idx[MSIxIndex];
1695
1696	reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1697
1698	desc_val.word = desc->Words;
1699	num_completed = 0;
1700
1701	reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1702
1703	/* Find our reply descriptor for the command and process */
1704	while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1705		smid = le16toh(reply_desc->SMID);
1706		cmd_mpt = sc->mpt_cmd_list[smid - 1];
1707		scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1708
1709		status = scsi_io_req->RaidContext.raid_context.status;
1710		extStatus = scsi_io_req->RaidContext.raid_context.exStatus;
1711		sense = cmd_mpt->sense;
1712		data_length = scsi_io_req->DataLength;
1713
1714		switch (scsi_io_req->Function) {
1715		case MPI2_FUNCTION_SCSI_TASK_MGMT:
1716#if TM_DEBUG
1717			mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request;
1718			mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)
1719			    &mr_tm_req->TmRequest;
1720			device_printf(sc->mrsas_dev, "TM completion type 0x%X, "
1721			    "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
1722#endif
1723            wakeup_one((void *)&sc->ocr_chan);
1724            break;
1725		case MPI2_FUNCTION_SCSI_IO_REQUEST:	/* Fast Path IO. */
1726			device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1727			lbinfo = &sc->load_balance_info[device_id];
1728			/* R1 load balancing for READ */
1729			if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1730				mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1731				cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1732			}
1733			/* Fall thru and complete IO */
1734		case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1735			if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
1736				mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1737				    extStatus, le32toh(data_length), sense);
1738				mrsas_cmd_done(sc, cmd_mpt);
1739				mrsas_atomic_dec(&sc->fw_outstanding);
1740			} else {
1741				/*
1742				 * If the peer  Raid  1/10 fast path failed,
1743				 * mark IO as failed to the scsi layer.
1744				 * Overwrite the current status by the failed status
1745				 * and make sure that if any command fails,
1746				 * driver returns fail status to CAM.
1747				 */
1748				cmd_mpt->cmd_completed = 1;
1749				r1_cmd = cmd_mpt->peer_cmd;
1750				if (r1_cmd->cmd_completed) {
1751					if (r1_cmd->io_request->RaidContext.raid_context.status != MFI_STAT_OK) {
1752						status = r1_cmd->io_request->RaidContext.raid_context.status;
1753						extStatus = r1_cmd->io_request->RaidContext.raid_context.exStatus;
1754						data_length = r1_cmd->io_request->DataLength;
1755						sense = r1_cmd->sense;
1756					}
1757					r1_cmd->ccb_ptr = NULL;
1758					if (r1_cmd->callout_owner) {
1759						callout_stop(&r1_cmd->cm_callout);
1760						r1_cmd->callout_owner  = false;
1761					}
1762					mrsas_release_mpt_cmd(r1_cmd);
1763					mrsas_atomic_dec(&sc->fw_outstanding);
1764					mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1765					    extStatus, le32toh(data_length), sense);
1766					mrsas_cmd_done(sc, cmd_mpt);
1767					mrsas_atomic_dec(&sc->fw_outstanding);
1768				}
1769			}
1770			break;
1771		case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	/* MFI command */
1772			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1773			/*
1774			 * Make sure NOT TO release the mfi command from the called
1775			 * function's context if it is fired with issue_polled call.
1776			 * And also make sure that the issue_polled call should only be
1777			 * used if INTERRUPT IS DISABLED.
1778			 */
1779			if (cmd_mfi->frame->hdr.flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
1780				mrsas_release_mfi_cmd(cmd_mfi);
1781			else
1782				mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1783			break;
1784		}
1785
1786		sc->last_reply_idx[MSIxIndex]++;
1787		if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1788			sc->last_reply_idx[MSIxIndex] = 0;
1789
1790		desc->Words = ~((uint64_t)0x00);	/* set it back to all
1791							 * 0xFFFFFFFFs */
1792		num_completed++;
1793		threshold_reply_count++;
1794
1795		/* Get the next reply descriptor */
1796		if (!sc->last_reply_idx[MSIxIndex]) {
1797			desc = sc->reply_desc_mem;
1798			desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1799		} else
1800			desc++;
1801
1802		reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1803		desc_val.word = desc->Words;
1804
1805		reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1806
1807		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1808			break;
1809
1810		/*
1811		 * Write to reply post index after completing threshold reply
1812		 * count and still there are more replies in reply queue
1813		 * pending to be completed.
1814		 */
1815		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1816			if (sc->msix_enable) {
1817				if (sc->msix_combined)
1818					mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1819					    ((MSIxIndex & 0x7) << 24) |
1820					    sc->last_reply_idx[MSIxIndex]);
1821				else
1822					mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1823					    sc->last_reply_idx[MSIxIndex]);
1824			} else
1825				mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1826				    reply_post_host_index), sc->last_reply_idx[0]);
1827
1828			threshold_reply_count = 0;
1829		}
1830	}
1831
1832	/* No match, just return */
1833	if (num_completed == 0)
1834		return (DONE);
1835
1836	/* Clear response interrupt */
1837	if (sc->msix_enable) {
1838		if (sc->msix_combined) {
1839			mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1840			    ((MSIxIndex & 0x7) << 24) |
1841			    sc->last_reply_idx[MSIxIndex]);
1842		} else
1843			mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1844			    sc->last_reply_idx[MSIxIndex]);
1845	} else
1846		mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1847		    reply_post_host_index), sc->last_reply_idx[0]);
1848
1849	return (0);
1850}
1851
1852/*
1853 * mrsas_map_mpt_cmd_status:	Allocate DMAable memory.
1854 * input:						Adapter instance soft state
1855 *
1856 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1857 * It checks the command status and maps the appropriate CAM status for the
1858 * CCB.
1859 */
1860void
1861mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status,
1862    u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense)
1863{
1864	struct mrsas_softc *sc = cmd->sc;
1865	u_int8_t *sense_data;
1866
1867	switch (status) {
1868	case MFI_STAT_OK:
1869		ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1870		break;
1871	case MFI_STAT_SCSI_IO_FAILED:
1872	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1873		ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1874		sense_data = (u_int8_t *)&ccb_ptr->csio.sense_data;
1875		if (sense_data) {
1876			/* For now just copy 18 bytes back */
1877			memcpy(sense_data, sense, 18);
1878			ccb_ptr->csio.sense_len = 18;
1879			ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1880		}
1881		break;
1882	case MFI_STAT_LD_OFFLINE:
1883	case MFI_STAT_DEVICE_NOT_FOUND:
1884		if (ccb_ptr->ccb_h.target_lun)
1885			ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1886		else
1887			ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1888		break;
1889	case MFI_STAT_CONFIG_SEQ_MISMATCH:
1890		ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1891		break;
1892	default:
1893		device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1894		ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1895		ccb_ptr->csio.scsi_status = status;
1896	}
1897	return;
1898}
1899
1900/*
1901 * mrsas_alloc_mem:	Allocate DMAable memory
1902 * input:			Adapter instance soft state
1903 *
1904 * This function creates the parent DMA tag and allocates DMAable memory. DMA
1905 * tag describes constraints of DMA mapping. Memory allocated is mapped into
1906 * Kernel virtual address. Callback argument is physical memory address.
1907 */
1908static int
1909mrsas_alloc_mem(struct mrsas_softc *sc)
1910{
1911	u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size,
1912		evt_detail_size, count, pd_info_size;
1913
1914	/*
1915	 * Allocate parent DMA tag
1916	 */
1917	if (bus_dma_tag_create(NULL,	/* parent */
1918	    1,				/* alignment */
1919	    0,				/* boundary */
1920	    BUS_SPACE_MAXADDR,		/* lowaddr */
1921	    BUS_SPACE_MAXADDR,		/* highaddr */
1922	    NULL, NULL,			/* filter, filterarg */
1923	    maxphys,			/* maxsize */
1924	    sc->max_num_sge,		/* nsegments */
1925	    maxphys,			/* maxsegsize */
1926	    0,				/* flags */
1927	    NULL, NULL,			/* lockfunc, lockarg */
1928	    &sc->mrsas_parent_tag	/* tag */
1929	    )) {
1930		device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1931		return (ENOMEM);
1932	}
1933	/*
1934	 * Allocate for version buffer
1935	 */
1936	verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1937	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1938	    1, 0,
1939	    BUS_SPACE_MAXADDR_32BIT,
1940	    BUS_SPACE_MAXADDR,
1941	    NULL, NULL,
1942	    verbuf_size,
1943	    1,
1944	    verbuf_size,
1945	    BUS_DMA_ALLOCNOW,
1946	    NULL, NULL,
1947	    &sc->verbuf_tag)) {
1948		device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1949		return (ENOMEM);
1950	}
1951	if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1952	    BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1953		device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1954		return (ENOMEM);
1955	}
1956	bzero(sc->verbuf_mem, verbuf_size);
1957	if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1958	    verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1959	    BUS_DMA_NOWAIT)) {
1960		device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1961		return (ENOMEM);
1962	}
1963	/*
1964	 * Allocate IO Request Frames
1965	 */
1966	io_req_size = sc->io_frames_alloc_sz;
1967	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1968	    16, 0,
1969	    BUS_SPACE_MAXADDR_32BIT,
1970	    BUS_SPACE_MAXADDR,
1971	    NULL, NULL,
1972	    io_req_size,
1973	    1,
1974	    io_req_size,
1975	    BUS_DMA_ALLOCNOW,
1976	    NULL, NULL,
1977	    &sc->io_request_tag)) {
1978		device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1979		return (ENOMEM);
1980	}
1981	if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1982	    BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1983		device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1984		return (ENOMEM);
1985	}
1986	bzero(sc->io_request_mem, io_req_size);
1987	if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1988	    sc->io_request_mem, io_req_size, mrsas_addr_cb,
1989	    &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1990		device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1991		return (ENOMEM);
1992	}
1993	/*
1994	 * Allocate Chain Frames
1995	 */
1996	chain_frame_size = sc->chain_frames_alloc_sz;
1997	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1998	    4, 0,
1999	    BUS_SPACE_MAXADDR_32BIT,
2000	    BUS_SPACE_MAXADDR,
2001	    NULL, NULL,
2002	    chain_frame_size,
2003	    1,
2004	    chain_frame_size,
2005	    BUS_DMA_ALLOCNOW,
2006	    NULL, NULL,
2007	    &sc->chain_frame_tag)) {
2008		device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
2009		return (ENOMEM);
2010	}
2011	if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
2012	    BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
2013		device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
2014		return (ENOMEM);
2015	}
2016	bzero(sc->chain_frame_mem, chain_frame_size);
2017	if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
2018	    sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
2019	    &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
2020		device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
2021		return (ENOMEM);
2022	}
2023	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2024	/*
2025	 * Allocate Reply Descriptor Array
2026	 */
2027	reply_desc_size = sc->reply_alloc_sz * count;
2028	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2029	    16, 0,
2030	    BUS_SPACE_MAXADDR_32BIT,
2031	    BUS_SPACE_MAXADDR,
2032	    NULL, NULL,
2033	    reply_desc_size,
2034	    1,
2035	    reply_desc_size,
2036	    BUS_DMA_ALLOCNOW,
2037	    NULL, NULL,
2038	    &sc->reply_desc_tag)) {
2039		device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
2040		return (ENOMEM);
2041	}
2042	if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
2043	    BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
2044		device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
2045		return (ENOMEM);
2046	}
2047	if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
2048	    sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
2049	    &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
2050		device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
2051		return (ENOMEM);
2052	}
2053	/*
2054	 * Allocate Sense Buffer Array.  Keep in lower 4GB
2055	 */
2056	sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
2057	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2058	    64, 0,
2059	    BUS_SPACE_MAXADDR_32BIT,
2060	    BUS_SPACE_MAXADDR,
2061	    NULL, NULL,
2062	    sense_size,
2063	    1,
2064	    sense_size,
2065	    BUS_DMA_ALLOCNOW,
2066	    NULL, NULL,
2067	    &sc->sense_tag)) {
2068		device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
2069		return (ENOMEM);
2070	}
2071	if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
2072	    BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
2073		device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
2074		return (ENOMEM);
2075	}
2076	if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
2077	    sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
2078	    BUS_DMA_NOWAIT)) {
2079		device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
2080		return (ENOMEM);
2081	}
2082
2083	/*
2084	 * Allocate for Event detail structure
2085	 */
2086	evt_detail_size = sizeof(struct mrsas_evt_detail);
2087	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2088	    1, 0,
2089	    BUS_SPACE_MAXADDR_32BIT,
2090	    BUS_SPACE_MAXADDR,
2091	    NULL, NULL,
2092	    evt_detail_size,
2093	    1,
2094	    evt_detail_size,
2095	    BUS_DMA_ALLOCNOW,
2096	    NULL, NULL,
2097	    &sc->evt_detail_tag)) {
2098		device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
2099		return (ENOMEM);
2100	}
2101	if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
2102	    BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
2103		device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
2104		return (ENOMEM);
2105	}
2106	bzero(sc->evt_detail_mem, evt_detail_size);
2107	if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
2108	    sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
2109	    &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
2110		device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
2111		return (ENOMEM);
2112	}
2113
2114	/*
2115	 * Allocate for PD INFO structure
2116	 */
2117	pd_info_size = sizeof(struct mrsas_pd_info);
2118	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2119	    1, 0,
2120	    BUS_SPACE_MAXADDR_32BIT,
2121	    BUS_SPACE_MAXADDR,
2122	    NULL, NULL,
2123	    pd_info_size,
2124	    1,
2125	    pd_info_size,
2126	    BUS_DMA_ALLOCNOW,
2127	    NULL, NULL,
2128	    &sc->pd_info_tag)) {
2129		device_printf(sc->mrsas_dev, "Cannot create PD INFO tag\n");
2130		return (ENOMEM);
2131	}
2132	if (bus_dmamem_alloc(sc->pd_info_tag, (void **)&sc->pd_info_mem,
2133	    BUS_DMA_NOWAIT, &sc->pd_info_dmamap)) {
2134		device_printf(sc->mrsas_dev, "Cannot alloc PD INFO buffer memory\n");
2135		return (ENOMEM);
2136	}
2137	bzero(sc->pd_info_mem, pd_info_size);
2138	if (bus_dmamap_load(sc->pd_info_tag, sc->pd_info_dmamap,
2139	    sc->pd_info_mem, pd_info_size, mrsas_addr_cb,
2140	    &sc->pd_info_phys_addr, BUS_DMA_NOWAIT)) {
2141		device_printf(sc->mrsas_dev, "Cannot load PD INFO buffer memory\n");
2142		return (ENOMEM);
2143	}
2144
2145	/*
2146	 * Create a dma tag for data buffers; size will be the maximum
2147	 * possible I/O size (280kB).
2148	 */
2149	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2150	    1,
2151	    0,
2152	    BUS_SPACE_MAXADDR,
2153	    BUS_SPACE_MAXADDR,
2154	    NULL, NULL,
2155	    maxphys,
2156	    sc->max_num_sge,		/* nsegments */
2157	    maxphys,
2158	    BUS_DMA_ALLOCNOW,
2159	    busdma_lock_mutex,
2160	    &sc->io_lock,
2161	    &sc->data_tag)) {
2162		device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
2163		return (ENOMEM);
2164	}
2165	return (0);
2166}
2167
2168/*
2169 * mrsas_addr_cb:	Callback function of bus_dmamap_load()
2170 * input:			callback argument, machine dependent type
2171 * 					that describes DMA segments, number of segments, error code
2172 *
2173 * This function is for the driver to receive mapping information resultant of
2174 * the bus_dmamap_load(). The information is actually not being used, but the
2175 * address is saved anyway.
2176 */
2177void
2178mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2179{
2180	bus_addr_t *addr;
2181
2182	addr = arg;
2183	*addr = segs[0].ds_addr;
2184}
2185
2186/*
2187 * mrsas_setup_raidmap:	Set up RAID map.
2188 * input:				Adapter instance soft state
2189 *
2190 * Allocate DMA memory for the RAID maps and perform setup.
2191 */
2192static int
2193mrsas_setup_raidmap(struct mrsas_softc *sc)
2194{
2195	int i;
2196
2197	for (i = 0; i < 2; i++) {
2198		sc->ld_drv_map[i] =
2199		    (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
2200		/* Do Error handling */
2201		if (!sc->ld_drv_map[i]) {
2202			device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
2203
2204			if (i == 1)
2205				free(sc->ld_drv_map[0], M_MRSAS);
2206			/* ABORT driver initialization */
2207			goto ABORT;
2208		}
2209	}
2210
2211	for (int i = 0; i < 2; i++) {
2212		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2213		    4, 0,
2214		    BUS_SPACE_MAXADDR_32BIT,
2215		    BUS_SPACE_MAXADDR,
2216		    NULL, NULL,
2217		    sc->max_map_sz,
2218		    1,
2219		    sc->max_map_sz,
2220		    BUS_DMA_ALLOCNOW,
2221		    NULL, NULL,
2222		    &sc->raidmap_tag[i])) {
2223			device_printf(sc->mrsas_dev,
2224			    "Cannot allocate raid map tag.\n");
2225			return (ENOMEM);
2226		}
2227		if (bus_dmamem_alloc(sc->raidmap_tag[i],
2228		    (void **)&sc->raidmap_mem[i],
2229		    BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2230			device_printf(sc->mrsas_dev,
2231			    "Cannot allocate raidmap memory.\n");
2232			return (ENOMEM);
2233		}
2234		bzero(sc->raidmap_mem[i], sc->max_map_sz);
2235
2236		if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2237		    sc->raidmap_mem[i], sc->max_map_sz,
2238		    mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2239		    BUS_DMA_NOWAIT)) {
2240			device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2241			return (ENOMEM);
2242		}
2243		if (!sc->raidmap_mem[i]) {
2244			device_printf(sc->mrsas_dev,
2245			    "Cannot allocate memory for raid map.\n");
2246			return (ENOMEM);
2247		}
2248	}
2249
2250	if (!mrsas_get_map_info(sc))
2251		mrsas_sync_map_info(sc);
2252
2253	return (0);
2254
2255ABORT:
2256	return (1);
2257}
2258
2259/**
2260 * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
2261 * @sc:				Adapter soft state
2262 *
2263 * Return 0 on success.
2264 */
2265void
2266megasas_setup_jbod_map(struct mrsas_softc *sc)
2267{
2268	int i;
2269	uint32_t pd_seq_map_sz;
2270
2271	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2272	    (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2273
2274	if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2275		sc->use_seqnum_jbod_fp = 0;
2276		return;
2277	}
2278	if (sc->jbodmap_mem[0])
2279		goto skip_alloc;
2280
2281	for (i = 0; i < 2; i++) {
2282		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2283		    4, 0,
2284		    BUS_SPACE_MAXADDR_32BIT,
2285		    BUS_SPACE_MAXADDR,
2286		    NULL, NULL,
2287		    pd_seq_map_sz,
2288		    1,
2289		    pd_seq_map_sz,
2290		    BUS_DMA_ALLOCNOW,
2291		    NULL, NULL,
2292		    &sc->jbodmap_tag[i])) {
2293			device_printf(sc->mrsas_dev,
2294			    "Cannot allocate jbod map tag.\n");
2295			return;
2296		}
2297		if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2298		    (void **)&sc->jbodmap_mem[i],
2299		    BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2300			device_printf(sc->mrsas_dev,
2301			    "Cannot allocate jbod map memory.\n");
2302			return;
2303		}
2304		bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2305
2306		if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2307		    sc->jbodmap_mem[i], pd_seq_map_sz,
2308		    mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2309		    BUS_DMA_NOWAIT)) {
2310			device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2311			return;
2312		}
2313		if (!sc->jbodmap_mem[i]) {
2314			device_printf(sc->mrsas_dev,
2315			    "Cannot allocate memory for jbod map.\n");
2316			sc->use_seqnum_jbod_fp = 0;
2317			return;
2318		}
2319	}
2320
2321skip_alloc:
2322	if (!megasas_sync_pd_seq_num(sc, false) &&
2323	    !megasas_sync_pd_seq_num(sc, true))
2324		sc->use_seqnum_jbod_fp = 1;
2325	else
2326		sc->use_seqnum_jbod_fp = 0;
2327
2328	device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2329}
2330
2331/*
2332 * mrsas_init_fw:	Initialize Firmware
2333 * input:			Adapter soft state
2334 *
2335 * Calls transition_to_ready() to make sure Firmware is in operational state and
2336 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware.  It
2337 * issues internal commands to get the controller info after the IOC_INIT
2338 * command response is received by Firmware.  Note:  code relating to
2339 * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2340 * is left here as placeholder.
2341 */
2342static int
2343mrsas_init_fw(struct mrsas_softc *sc)
2344{
2345
2346	int ret, loop, ocr = 0;
2347	u_int32_t max_sectors_1;
2348	u_int32_t max_sectors_2;
2349	u_int32_t tmp_sectors;
2350	u_int32_t scratch_pad_2, scratch_pad_3, scratch_pad_4;
2351	int msix_enable = 0;
2352	int fw_msix_count = 0;
2353	int i, j;
2354
2355	/* Make sure Firmware is ready */
2356	ret = mrsas_transition_to_ready(sc, ocr);
2357	if (ret != SUCCESS) {
2358		return (ret);
2359	}
2360	if (sc->is_ventura || sc->is_aero) {
2361		scratch_pad_3 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_3));
2362#if VD_EXT_DEBUG
2363		device_printf(sc->mrsas_dev, "scratch_pad_3 0x%x\n", scratch_pad_3);
2364#endif
2365		sc->maxRaidMapSize = ((scratch_pad_3 >>
2366		    MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
2367		    MR_MAX_RAID_MAP_SIZE_MASK);
2368	}
2369	/* MSI-x index 0- reply post host index register */
2370	sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2371	/* Check if MSI-X is supported while in ready state */
2372	msix_enable = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2373
2374	if (msix_enable) {
2375		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2376		    outbound_scratch_pad_2));
2377
2378		/* Check max MSI-X vectors */
2379		if (sc->device_id == MRSAS_TBOLT) {
2380			sc->msix_vectors = (scratch_pad_2
2381			    & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2382			fw_msix_count = sc->msix_vectors;
2383		} else {
2384			/* Invader/Fury supports 96 MSI-X vectors */
2385			sc->msix_vectors = ((scratch_pad_2
2386			    & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2387			    >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2388			fw_msix_count = sc->msix_vectors;
2389
2390			if ((sc->mrsas_gen3_ctrl && (sc->msix_vectors > 8)) ||
2391				((sc->is_ventura || sc->is_aero) && (sc->msix_vectors > 16)))
2392				sc->msix_combined = true;
2393			/*
2394			 * Save 1-15 reply post index
2395			 * address to local memory Index 0
2396			 * is already saved from reg offset
2397			 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
2398			 */
2399			for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2400			    loop++) {
2401				sc->msix_reg_offset[loop] =
2402				    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2403				    (loop * 0x10);
2404			}
2405		}
2406
2407		/* Don't bother allocating more MSI-X vectors than cpus */
2408		sc->msix_vectors = min(sc->msix_vectors,
2409		    mp_ncpus);
2410
2411		/* Allocate MSI-x vectors */
2412		if (mrsas_allocate_msix(sc) == SUCCESS)
2413			sc->msix_enable = 1;
2414		else
2415			sc->msix_enable = 0;
2416
2417		device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2418		    "Online CPU %d Current MSIX <%d>\n",
2419		    fw_msix_count, mp_ncpus, sc->msix_vectors);
2420	}
2421	/*
2422     * MSI-X host index 0 is common for all adapter.
2423     * It is used for all MPT based Adapters.
2424	 */
2425	if (sc->msix_combined) {
2426		sc->msix_reg_offset[0] =
2427		    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET;
2428	}
2429	if (mrsas_init_adapter(sc) != SUCCESS) {
2430		device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2431		return (1);
2432	}
2433
2434	if (sc->is_ventura || sc->is_aero) {
2435		scratch_pad_4 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2436		    outbound_scratch_pad_4));
2437		if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT)
2438			sc->nvme_page_size = 1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK);
2439
2440		device_printf(sc->mrsas_dev, "NVME page size\t: (%d)\n", sc->nvme_page_size);
2441	}
2442
2443	/* Allocate internal commands for pass-thru */
2444	if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2445		device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2446		return (1);
2447	}
2448	sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2449	if (!sc->ctrl_info) {
2450		device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2451		return (1);
2452	}
2453	/*
2454	 * Get the controller info from FW, so that the MAX VD support
2455	 * availability can be decided.
2456	 */
2457	if (mrsas_get_ctrl_info(sc)) {
2458		device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2459		return (1);
2460	}
2461	sc->secure_jbod_support =
2462	    (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2463
2464	if (sc->secure_jbod_support)
2465		device_printf(sc->mrsas_dev, "FW supports SED \n");
2466
2467	if (sc->use_seqnum_jbod_fp)
2468		device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2469
2470	if (sc->support_morethan256jbod)
2471		device_printf(sc->mrsas_dev, "FW supports JBOD Map Ext \n");
2472
2473	if (mrsas_setup_raidmap(sc) != SUCCESS) {
2474		device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2475		    "There seems to be some problem in the controller\n"
2476		    "Please contact to the SUPPORT TEAM if the problem persists\n");
2477	}
2478	megasas_setup_jbod_map(sc);
2479
2480	memset(sc->target_list, 0,
2481		MRSAS_MAX_TM_TARGETS * sizeof(struct mrsas_target));
2482	for (i = 0; i < MRSAS_MAX_TM_TARGETS; i++)
2483		sc->target_list[i].target_id = 0xffff;
2484
2485	/* For pass-thru, get PD/LD list and controller info */
2486	memset(sc->pd_list, 0,
2487	    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2488	if (mrsas_get_pd_list(sc) != SUCCESS) {
2489		device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2490		return (1);
2491	}
2492	memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2493	if (mrsas_get_ld_list(sc) != SUCCESS) {
2494		device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2495		return (1);
2496	}
2497
2498	if ((sc->is_ventura || sc->is_aero) && sc->drv_stream_detection) {
2499		sc->streamDetectByLD = malloc(sizeof(PTR_LD_STREAM_DETECT) *
2500						MAX_LOGICAL_DRIVES_EXT, M_MRSAS, M_NOWAIT);
2501		if (!sc->streamDetectByLD) {
2502			device_printf(sc->mrsas_dev,
2503				"unable to allocate stream detection for pool of LDs\n");
2504			return (1);
2505		}
2506		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
2507			sc->streamDetectByLD[i] = malloc(sizeof(LD_STREAM_DETECT), M_MRSAS, M_NOWAIT);
2508			if (!sc->streamDetectByLD[i]) {
2509				device_printf(sc->mrsas_dev, "unable to allocate stream detect by LD\n");
2510				for (j = 0; j < i; ++j)
2511					free(sc->streamDetectByLD[j], M_MRSAS);
2512				free(sc->streamDetectByLD, M_MRSAS);
2513				sc->streamDetectByLD = NULL;
2514				return (1);
2515			}
2516			memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
2517			sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
2518		}
2519	}
2520
2521	/*
2522	 * Compute the max allowed sectors per IO: The controller info has
2523	 * two limits on max sectors. Driver should use the minimum of these
2524	 * two.
2525	 *
2526	 * 1 << stripe_sz_ops.min = max sectors per strip
2527	 *
2528	 * Note that older firmwares ( < FW ver 30) didn't report information to
2529	 * calculate max_sectors_1. So the number ended up as zero always.
2530	 */
2531	tmp_sectors = 0;
2532	max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2533	    sc->ctrl_info->max_strips_per_io;
2534	max_sectors_2 = sc->ctrl_info->max_request_size;
2535	tmp_sectors = min(max_sectors_1, max_sectors_2);
2536	sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
2537
2538	if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2539		sc->max_sectors_per_req = tmp_sectors;
2540
2541	sc->disableOnlineCtrlReset =
2542	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2543	sc->UnevenSpanSupport =
2544	    sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2545	if (sc->UnevenSpanSupport) {
2546		device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2547		    sc->UnevenSpanSupport);
2548
2549		if (MR_ValidateMapInfo(sc))
2550			sc->fast_path_io = 1;
2551		else
2552			sc->fast_path_io = 0;
2553	}
2554
2555	device_printf(sc->mrsas_dev, "max_fw_cmds: %u  max_scsi_cmds: %u\n",
2556		sc->max_fw_cmds, sc->max_scsi_cmds);
2557	return (0);
2558}
2559
2560/*
2561 * mrsas_init_adapter:	Initializes the adapter/controller
2562 * input:				Adapter soft state
2563 *
2564 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2565 * ROC/controller.  The FW register is read to determined the number of
2566 * commands that is supported.  All memory allocations for IO is based on
2567 * max_cmd.  Appropriate calculations are performed in this function.
2568 */
2569int
2570mrsas_init_adapter(struct mrsas_softc *sc)
2571{
2572	uint32_t status;
2573	u_int32_t scratch_pad_2;
2574	int ret;
2575	int i = 0;
2576
2577	/* Read FW status register */
2578	status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2579
2580	sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2581
2582	/* Decrement the max supported by 1, to correlate with FW */
2583	sc->max_fw_cmds = sc->max_fw_cmds - 1;
2584	sc->max_scsi_cmds = sc->max_fw_cmds - MRSAS_MAX_MFI_CMDS;
2585
2586	/* Determine allocation size of command frames */
2587	sc->reply_q_depth = ((sc->max_fw_cmds + 1 + 15) / 16 * 16) * 2;
2588	sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds;
2589	sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2590	sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
2591	    (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1));
2592	scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2593	    outbound_scratch_pad_2));
2594
2595	mrsas_dprint(sc, MRSAS_TRACE, "%s: sc->reply_q_depth 0x%x,"
2596	    "sc->request_alloc_sz 0x%x, sc->reply_alloc_sz 0x%x,"
2597	    "sc->io_frames_alloc_sz 0x%x\n", __func__,
2598	    sc->reply_q_depth, sc->request_alloc_sz,
2599	    sc->reply_alloc_sz, sc->io_frames_alloc_sz);
2600
2601	/*
2602	 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2603	 * Firmware support extended IO chain frame which is 4 time more
2604	 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2605	 * 1K 1M IO Firmware  - Frame size is (8 * 128 * 4)  = 4K
2606	 */
2607	if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2608		sc->max_chain_frame_sz =
2609		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2610		    * MEGASAS_1MB_IO;
2611	else
2612		sc->max_chain_frame_sz =
2613		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2614		    * MEGASAS_256K_IO;
2615
2616	sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * sc->max_fw_cmds;
2617	sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2618	    offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2619
2620	sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2621	sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2622
2623	mrsas_dprint(sc, MRSAS_INFO,
2624	    "max sge: 0x%x, max chain frame size: 0x%x, "
2625	    "max fw cmd: 0x%x sc->chain_frames_alloc_sz: 0x%x\n",
2626	    sc->max_num_sge,
2627	    sc->max_chain_frame_sz, sc->max_fw_cmds,
2628	    sc->chain_frames_alloc_sz);
2629
2630	/* Used for pass thru MFI frame (DCMD) */
2631	sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2632
2633	sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2634	    sizeof(MPI2_SGE_IO_UNION)) / 16;
2635
2636	int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2637
2638	for (i = 0; i < count; i++)
2639		sc->last_reply_idx[i] = 0;
2640
2641	ret = mrsas_alloc_mem(sc);
2642	if (ret != SUCCESS)
2643		return (ret);
2644
2645	ret = mrsas_alloc_mpt_cmds(sc);
2646	if (ret != SUCCESS)
2647		return (ret);
2648
2649	ret = mrsas_ioc_init(sc);
2650	if (ret != SUCCESS)
2651		return (ret);
2652
2653	return (0);
2654}
2655
2656/*
2657 * mrsas_alloc_ioc_cmd:	Allocates memory for IOC Init command
2658 * input:				Adapter soft state
2659 *
2660 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2661 */
2662int
2663mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2664{
2665	int ioc_init_size;
2666
2667	/* Allocate IOC INIT command */
2668	ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2669	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2670	    1, 0,
2671	    BUS_SPACE_MAXADDR_32BIT,
2672	    BUS_SPACE_MAXADDR,
2673	    NULL, NULL,
2674	    ioc_init_size,
2675	    1,
2676	    ioc_init_size,
2677	    BUS_DMA_ALLOCNOW,
2678	    NULL, NULL,
2679	    &sc->ioc_init_tag)) {
2680		device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2681		return (ENOMEM);
2682	}
2683	if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2684	    BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2685		device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2686		return (ENOMEM);
2687	}
2688	bzero(sc->ioc_init_mem, ioc_init_size);
2689	if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2690	    sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2691	    &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2692		device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2693		return (ENOMEM);
2694	}
2695	return (0);
2696}
2697
2698/*
2699 * mrsas_free_ioc_cmd:	Allocates memory for IOC Init command
2700 * input:				Adapter soft state
2701 *
2702 * Deallocates memory of the IOC Init cmd.
2703 */
2704void
2705mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2706{
2707	if (sc->ioc_init_phys_mem)
2708		bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2709	if (sc->ioc_init_mem != NULL)
2710		bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2711	if (sc->ioc_init_tag != NULL)
2712		bus_dma_tag_destroy(sc->ioc_init_tag);
2713}
2714
2715/*
2716 * mrsas_ioc_init:	Sends IOC Init command to FW
2717 * input:			Adapter soft state
2718 *
2719 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2720 */
2721int
2722mrsas_ioc_init(struct mrsas_softc *sc)
2723{
2724	struct mrsas_init_frame *init_frame;
2725	pMpi2IOCInitRequest_t IOCInitMsg;
2726	MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2727	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2728	bus_addr_t phys_addr;
2729	int i, retcode = 0;
2730	u_int32_t scratch_pad_2;
2731
2732	/* Allocate memory for the IOC INIT command */
2733	if (mrsas_alloc_ioc_cmd(sc)) {
2734		device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2735		return (1);
2736	}
2737
2738	if (!sc->block_sync_cache) {
2739		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2740		    outbound_scratch_pad_2));
2741		sc->fw_sync_cache_support = (scratch_pad_2 &
2742		    MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
2743	}
2744
2745	IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2746	IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2747	IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2748	IOCInitMsg->MsgVersion = htole16(MPI2_VERSION);
2749	IOCInitMsg->HeaderVersion = htole16(MPI2_HEADER_VERSION);
2750	IOCInitMsg->SystemRequestFrameSize = htole16(MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
2751	IOCInitMsg->ReplyDescriptorPostQueueDepth = htole16(sc->reply_q_depth);
2752	IOCInitMsg->ReplyDescriptorPostQueueAddress = htole64(sc->reply_desc_phys_addr);
2753	IOCInitMsg->SystemRequestFrameBaseAddress = htole64(sc->io_request_phys_addr);
2754	IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2755	IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
2756
2757	init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2758	init_frame->cmd = MFI_CMD_INIT;
2759	init_frame->cmd_status = 0xFF;
2760	init_frame->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
2761
2762	/* driver support Extended MSIX */
2763	if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
2764		init_frame->driver_operations.
2765		    mfi_capabilities.support_additional_msix = 1;
2766	}
2767	if (sc->verbuf_mem) {
2768		snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2769		    MRSAS_VERSION);
2770		init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2771		init_frame->driver_ver_hi = 0;
2772	}
2773	init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2774	init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2775	init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2776	if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2777		init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2778
2779	init_frame->driver_operations.reg = htole32(init_frame->driver_operations.reg);
2780
2781	phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2782	init_frame->queue_info_new_phys_addr_lo = htole32(phys_addr);
2783	init_frame->data_xfer_len = htole32(sizeof(Mpi2IOCInitRequest_t));
2784
2785	req_desc.addr.Words = htole64((bus_addr_t)sc->ioc_init_phys_mem);
2786	req_desc.MFAIo.RequestFlags =
2787	    (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2788
2789	mrsas_disable_intr(sc);
2790	mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2791	mrsas_write_64bit_req_desc(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2792
2793	/*
2794	 * Poll response timer to wait for Firmware response.  While this
2795	 * timer with the DELAY call could block CPU, the time interval for
2796	 * this is only 1 millisecond.
2797	 */
2798	if (init_frame->cmd_status == 0xFF) {
2799		for (i = 0; i < (max_wait * 1000); i++) {
2800			if (init_frame->cmd_status == 0xFF)
2801				DELAY(1000);
2802			else
2803				break;
2804		}
2805	}
2806	if (init_frame->cmd_status == 0)
2807		mrsas_dprint(sc, MRSAS_OCR,
2808		    "IOC INIT response received from FW.\n");
2809	else {
2810		if (init_frame->cmd_status == 0xFF)
2811			device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2812		else
2813			device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2814		retcode = 1;
2815	}
2816
2817	if (sc->is_aero) {
2818		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2819		    outbound_scratch_pad_2));
2820		sc->atomic_desc_support = (scratch_pad_2 &
2821			MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0;
2822		device_printf(sc->mrsas_dev, "FW supports atomic descriptor: %s\n",
2823			sc->atomic_desc_support ? "Yes" : "No");
2824	}
2825
2826	mrsas_free_ioc_cmd(sc);
2827	return (retcode);
2828}
2829
2830/*
2831 * mrsas_alloc_mpt_cmds:	Allocates the command packets
2832 * input:					Adapter instance soft state
2833 *
2834 * This function allocates the internal commands for IOs. Each command that is
2835 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2836 * array is allocated with mrsas_mpt_cmd context.  The free commands are
2837 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2838 * max_fw_cmds.
2839 */
2840int
2841mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2842{
2843	int i, j;
2844	u_int32_t max_fw_cmds, count;
2845	struct mrsas_mpt_cmd *cmd;
2846	pMpi2ReplyDescriptorsUnion_t reply_desc;
2847	u_int32_t offset, chain_offset, sense_offset;
2848	bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2849	u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2850
2851	max_fw_cmds = sc->max_fw_cmds;
2852
2853	sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2854	if (!sc->req_desc) {
2855		device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2856		return (ENOMEM);
2857	}
2858	memset(sc->req_desc, 0, sc->request_alloc_sz);
2859
2860	/*
2861	 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2862	 * Allocate the dynamic array first and then allocate individual
2863	 * commands.
2864	 */
2865	sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds,
2866	    M_MRSAS, M_NOWAIT);
2867	if (!sc->mpt_cmd_list) {
2868		device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2869		return (ENOMEM);
2870	}
2871	memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds);
2872	for (i = 0; i < max_fw_cmds; i++) {
2873		sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2874		    M_MRSAS, M_NOWAIT);
2875		if (!sc->mpt_cmd_list[i]) {
2876			for (j = 0; j < i; j++)
2877				free(sc->mpt_cmd_list[j], M_MRSAS);
2878			free(sc->mpt_cmd_list, M_MRSAS);
2879			sc->mpt_cmd_list = NULL;
2880			return (ENOMEM);
2881		}
2882	}
2883
2884	io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2885	io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2886	chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2887	chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2888	sense_base = (u_int8_t *)sc->sense_mem;
2889	sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2890	for (i = 0; i < max_fw_cmds; i++) {
2891		cmd = sc->mpt_cmd_list[i];
2892		offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2893		chain_offset = sc->max_chain_frame_sz * i;
2894		sense_offset = MRSAS_SENSE_LEN * i;
2895		memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2896		cmd->index = i + 1;
2897		cmd->ccb_ptr = NULL;
2898		cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
2899		callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0);
2900		cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2901		cmd->sc = sc;
2902		cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2903		memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2904		cmd->io_request_phys_addr = io_req_base_phys + offset;
2905		cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2906		cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2907		cmd->sense = sense_base + sense_offset;
2908		cmd->sense_phys_addr = sense_base_phys + sense_offset;
2909		if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2910			return (FAIL);
2911		}
2912		TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2913	}
2914
2915	/* Initialize reply descriptor array to 0xFFFFFFFF */
2916	reply_desc = sc->reply_desc_mem;
2917	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2918	for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2919		reply_desc->Words = MRSAS_ULONG_MAX;
2920	}
2921	return (0);
2922}
2923
2924/*
2925 * mrsas_write_64bit_req_dsc:	Writes 64 bit request descriptor to FW
2926 * input:			Adapter softstate
2927 * 				request descriptor address low
2928 * 				request descriptor address high
2929 */
2930void
2931mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2932    u_int32_t req_desc_hi)
2933{
2934	mtx_lock(&sc->pci_lock);
2935	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2936	    le32toh(req_desc_lo));
2937	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2938	    le32toh(req_desc_hi));
2939	mtx_unlock(&sc->pci_lock);
2940}
2941
2942/*
2943 * mrsas_fire_cmd:	Sends command to FW
2944 * input:		Adapter softstate
2945 * 			request descriptor address low
2946 * 			request descriptor address high
2947 *
2948 * This functions fires the command to Firmware by writing to the
2949 * inbound_low_queue_port and inbound_high_queue_port.
2950 */
2951void
2952mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2953    u_int32_t req_desc_hi)
2954{
2955	if (sc->atomic_desc_support)
2956		mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_single_queue_port),
2957		    le32toh(req_desc_lo));
2958	else
2959		mrsas_write_64bit_req_desc(sc, req_desc_lo, req_desc_hi);
2960}
2961
2962/*
2963 * mrsas_transition_to_ready:  Move FW to Ready state input:
2964 * Adapter instance soft state
2965 *
2966 * During the initialization, FW passes can potentially be in any one of several
2967 * possible states. If the FW in operational, waiting-for-handshake states,
2968 * driver must take steps to bring it to ready state. Otherwise, it has to
2969 * wait for the ready state.
2970 */
2971int
2972mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2973{
2974	int i;
2975	u_int8_t max_wait;
2976	u_int32_t val, fw_state;
2977	u_int32_t cur_state;
2978	u_int32_t abs_state, curr_abs_state;
2979
2980	val = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2981	fw_state = val & MFI_STATE_MASK;
2982	max_wait = MRSAS_RESET_WAIT_TIME;
2983
2984	if (fw_state != MFI_STATE_READY)
2985		device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2986
2987	while (fw_state != MFI_STATE_READY) {
2988		abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2989		switch (fw_state) {
2990		case MFI_STATE_FAULT:
2991			device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2992			if (ocr) {
2993				cur_state = MFI_STATE_FAULT;
2994				break;
2995			} else
2996				return -ENODEV;
2997		case MFI_STATE_WAIT_HANDSHAKE:
2998			/* Set the CLR bit in inbound doorbell */
2999			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3000			    MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
3001			cur_state = MFI_STATE_WAIT_HANDSHAKE;
3002			break;
3003		case MFI_STATE_BOOT_MESSAGE_PENDING:
3004			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3005			    MFI_INIT_HOTPLUG);
3006			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3007			break;
3008		case MFI_STATE_OPERATIONAL:
3009			/*
3010			 * Bring it to READY state; assuming max wait 10
3011			 * secs
3012			 */
3013			mrsas_disable_intr(sc);
3014			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
3015			for (i = 0; i < max_wait * 1000; i++) {
3016				if (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
3017					DELAY(1000);
3018				else
3019					break;
3020			}
3021			cur_state = MFI_STATE_OPERATIONAL;
3022			break;
3023		case MFI_STATE_UNDEFINED:
3024			/*
3025			 * This state should not last for more than 2
3026			 * seconds
3027			 */
3028			cur_state = MFI_STATE_UNDEFINED;
3029			break;
3030		case MFI_STATE_BB_INIT:
3031			cur_state = MFI_STATE_BB_INIT;
3032			break;
3033		case MFI_STATE_FW_INIT:
3034			cur_state = MFI_STATE_FW_INIT;
3035			break;
3036		case MFI_STATE_FW_INIT_2:
3037			cur_state = MFI_STATE_FW_INIT_2;
3038			break;
3039		case MFI_STATE_DEVICE_SCAN:
3040			cur_state = MFI_STATE_DEVICE_SCAN;
3041			break;
3042		case MFI_STATE_FLUSH_CACHE:
3043			cur_state = MFI_STATE_FLUSH_CACHE;
3044			break;
3045		default:
3046			device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
3047			return -ENODEV;
3048		}
3049
3050		/*
3051		 * The cur_state should not last for more than max_wait secs
3052		 */
3053		for (i = 0; i < (max_wait * 1000); i++) {
3054			fw_state = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3055			    outbound_scratch_pad)) & MFI_STATE_MASK);
3056			curr_abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3057			    outbound_scratch_pad));
3058			if (abs_state == curr_abs_state)
3059				DELAY(1000);
3060			else
3061				break;
3062		}
3063
3064		/*
3065		 * Return error if fw_state hasn't changed after max_wait
3066		 */
3067		if (curr_abs_state == abs_state) {
3068			device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
3069			    "in %d secs\n", fw_state, max_wait);
3070			return -ENODEV;
3071		}
3072	}
3073	mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
3074	return 0;
3075}
3076
3077/*
3078 * mrsas_get_mfi_cmd:	Get a cmd from free command pool
3079 * input:				Adapter soft state
3080 *
3081 * This function removes an MFI command from the command list.
3082 */
3083struct mrsas_mfi_cmd *
3084mrsas_get_mfi_cmd(struct mrsas_softc *sc)
3085{
3086	struct mrsas_mfi_cmd *cmd = NULL;
3087
3088	mtx_lock(&sc->mfi_cmd_pool_lock);
3089	if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
3090		cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
3091		TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
3092	}
3093	mtx_unlock(&sc->mfi_cmd_pool_lock);
3094
3095	return cmd;
3096}
3097
3098/*
3099 * mrsas_ocr_thread:	Thread to handle OCR/Kill Adapter.
3100 * input:				Adapter Context.
3101 *
3102 * This function will check FW status register and flag do_timeout_reset flag.
3103 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
3104 * trigger reset.
3105 */
3106static void
3107mrsas_ocr_thread(void *arg)
3108{
3109	struct mrsas_softc *sc;
3110	u_int32_t fw_status, fw_state;
3111	u_int8_t tm_target_reset_failed = 0;
3112
3113	sc = (struct mrsas_softc *)arg;
3114
3115	mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
3116	sc->ocr_thread_active = 1;
3117	mtx_lock(&sc->sim_lock);
3118	for (;;) {
3119		/* Sleep for 1 second and check the queue status */
3120		msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3121		    "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
3122		if (sc->remove_in_progress ||
3123		    sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3124			mrsas_dprint(sc, MRSAS_OCR,
3125			    "Exit due to %s from %s\n",
3126			    sc->remove_in_progress ? "Shutdown" :
3127			    "Hardware critical error", __func__);
3128			break;
3129		}
3130		fw_status = mrsas_read_reg_with_retries(sc,
3131		    offsetof(mrsas_reg_set, outbound_scratch_pad));
3132		fw_state = fw_status & MFI_STATE_MASK;
3133		if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset ||
3134			mrsas_atomic_read(&sc->target_reset_outstanding)) {
3135			/* First, freeze further IOs to come to the SIM */
3136			mrsas_xpt_freeze(sc);
3137
3138			/* If this is an IO timeout then go for target reset */
3139			if (mrsas_atomic_read(&sc->target_reset_outstanding)) {
3140				device_printf(sc->mrsas_dev, "Initiating Target RESET "
3141				    "because of SCSI IO timeout!\n");
3142
3143				/* Let the remaining IOs to complete */
3144				msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3145				      "mrsas_reset_targets", 5 * hz);
3146
3147				/* Try to reset the target device */
3148				if (mrsas_reset_targets(sc) == FAIL)
3149					tm_target_reset_failed = 1;
3150			}
3151
3152			/* If this is a DCMD timeout or FW fault,
3153			 * then go for controller reset
3154			 */
3155			if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed ||
3156			    (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) {
3157				if (tm_target_reset_failed)
3158					device_printf(sc->mrsas_dev, "Initiaiting OCR because of "
3159					    "TM FAILURE!\n");
3160				else
3161					device_printf(sc->mrsas_dev, "Initiaiting OCR "
3162						"because of %s!\n", sc->do_timedout_reset ?
3163						"DCMD IO Timeout" : "FW fault");
3164
3165				mtx_lock_spin(&sc->ioctl_lock);
3166				sc->reset_in_progress = 1;
3167				mtx_unlock_spin(&sc->ioctl_lock);
3168				sc->reset_count++;
3169
3170				/*
3171				 * Wait for the AEN task to be completed if it is running.
3172				 */
3173				mtx_unlock(&sc->sim_lock);
3174				taskqueue_drain(sc->ev_tq, &sc->ev_task);
3175				mtx_lock(&sc->sim_lock);
3176
3177				taskqueue_block(sc->ev_tq);
3178				/* Try to reset the controller */
3179				mrsas_reset_ctrl(sc, sc->do_timedout_reset);
3180
3181				sc->do_timedout_reset = 0;
3182				sc->reset_in_progress = 0;
3183				tm_target_reset_failed = 0;
3184				mrsas_atomic_set(&sc->target_reset_outstanding, 0);
3185				memset(sc->target_reset_pool, 0,
3186				    sizeof(sc->target_reset_pool));
3187				taskqueue_unblock(sc->ev_tq);
3188			}
3189
3190			/* Now allow IOs to come to the SIM */
3191			 mrsas_xpt_release(sc);
3192		}
3193	}
3194	mtx_unlock(&sc->sim_lock);
3195	sc->ocr_thread_active = 0;
3196	mrsas_kproc_exit(0);
3197}
3198
3199/*
3200 * mrsas_reset_reply_desc:	Reset Reply descriptor as part of OCR.
3201 * input:					Adapter Context.
3202 *
3203 * This function will clear reply descriptor so that post OCR driver and FW will
3204 * lost old history.
3205 */
3206void
3207mrsas_reset_reply_desc(struct mrsas_softc *sc)
3208{
3209	int i, count;
3210	pMpi2ReplyDescriptorsUnion_t reply_desc;
3211
3212	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3213	for (i = 0; i < count; i++)
3214		sc->last_reply_idx[i] = 0;
3215
3216	reply_desc = sc->reply_desc_mem;
3217	for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
3218		reply_desc->Words = MRSAS_ULONG_MAX;
3219	}
3220}
3221
3222/*
3223 * mrsas_reset_ctrl:	Core function to OCR/Kill adapter.
3224 * input:				Adapter Context.
3225 *
3226 * This function will run from thread context so that it can sleep. 1. Do not
3227 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
3228 * to complete for 180 seconds. 3. If #2 does not find any outstanding
3229 * command Controller is in working state, so skip OCR. Otherwise, do
3230 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
3231 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
3232 * OCR, Re-fire Management command and move Controller to Operation state.
3233 */
3234int
3235mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
3236{
3237	int retval = SUCCESS, i, j, retry = 0;
3238	u_int32_t host_diag, abs_state, status_reg, reset_adapter;
3239	union ccb *ccb;
3240	struct mrsas_mfi_cmd *mfi_cmd;
3241	struct mrsas_mpt_cmd *mpt_cmd;
3242	union mrsas_evt_class_locale class_locale;
3243	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3244
3245	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3246		device_printf(sc->mrsas_dev,
3247		    "mrsas: Hardware critical error, returning FAIL.\n");
3248		return FAIL;
3249	}
3250	mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3251	sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
3252	mrsas_disable_intr(sc);
3253	msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
3254	    sc->mrsas_fw_fault_check_delay * hz);
3255
3256	/* First try waiting for commands to complete */
3257	if (mrsas_wait_for_outstanding(sc, reset_reason)) {
3258		mrsas_dprint(sc, MRSAS_OCR,
3259		    "resetting adapter from %s.\n",
3260		    __func__);
3261		/* Now return commands back to the CAM layer */
3262		mtx_unlock(&sc->sim_lock);
3263		for (i = 0; i < sc->max_fw_cmds; i++) {
3264			mpt_cmd = sc->mpt_cmd_list[i];
3265
3266			if (mpt_cmd->peer_cmd) {
3267				mrsas_dprint(sc, MRSAS_OCR,
3268				    "R1 FP command [%d] - (mpt_cmd) %p, (peer_cmd) %p\n",
3269				    i, mpt_cmd, mpt_cmd->peer_cmd);
3270			}
3271
3272			if (mpt_cmd->ccb_ptr) {
3273				if (mpt_cmd->callout_owner) {
3274					ccb = (union ccb *)(mpt_cmd->ccb_ptr);
3275					ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3276					mrsas_cmd_done(sc, mpt_cmd);
3277				} else {
3278					mpt_cmd->ccb_ptr = NULL;
3279					mrsas_release_mpt_cmd(mpt_cmd);
3280				}
3281			}
3282		}
3283
3284		mrsas_atomic_set(&sc->fw_outstanding, 0);
3285
3286		mtx_lock(&sc->sim_lock);
3287
3288		status_reg = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3289		    outbound_scratch_pad));
3290		abs_state = status_reg & MFI_STATE_MASK;
3291		reset_adapter = status_reg & MFI_RESET_ADAPTER;
3292		if (sc->disableOnlineCtrlReset ||
3293		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
3294			/* Reset not supported, kill adapter */
3295			mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
3296			mrsas_kill_hba(sc);
3297			retval = FAIL;
3298			goto out;
3299		}
3300		/* Now try to reset the chip */
3301		for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
3302			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3303			    MPI2_WRSEQ_FLUSH_KEY_VALUE);
3304			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3305			    MPI2_WRSEQ_1ST_KEY_VALUE);
3306			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3307			    MPI2_WRSEQ_2ND_KEY_VALUE);
3308			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3309			    MPI2_WRSEQ_3RD_KEY_VALUE);
3310			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3311			    MPI2_WRSEQ_4TH_KEY_VALUE);
3312			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3313			    MPI2_WRSEQ_5TH_KEY_VALUE);
3314			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3315			    MPI2_WRSEQ_6TH_KEY_VALUE);
3316
3317			/* Check that the diag write enable (DRWE) bit is on */
3318			host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3319			    fusion_host_diag));
3320			retry = 0;
3321			while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
3322				DELAY(100 * 1000);
3323				host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3324				    fusion_host_diag));
3325				if (retry++ == 100) {
3326					mrsas_dprint(sc, MRSAS_OCR,
3327					    "Host diag unlock failed!\n");
3328					break;
3329				}
3330			}
3331			if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
3332				continue;
3333
3334			/* Send chip reset command */
3335			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
3336			    host_diag | HOST_DIAG_RESET_ADAPTER);
3337			DELAY(3000 * 1000);
3338
3339			/* Make sure reset adapter bit is cleared */
3340			host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3341			    fusion_host_diag));
3342			retry = 0;
3343			while (host_diag & HOST_DIAG_RESET_ADAPTER) {
3344				DELAY(100 * 1000);
3345				host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3346				    fusion_host_diag));
3347				if (retry++ == 1000) {
3348					mrsas_dprint(sc, MRSAS_OCR,
3349					    "Diag reset adapter never cleared!\n");
3350					break;
3351				}
3352			}
3353			if (host_diag & HOST_DIAG_RESET_ADAPTER)
3354				continue;
3355
3356			abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3357			    outbound_scratch_pad)) & MFI_STATE_MASK;
3358			retry = 0;
3359
3360			while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3361				DELAY(100 * 1000);
3362				abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3363				    outbound_scratch_pad)) & MFI_STATE_MASK;
3364			}
3365			if (abs_state <= MFI_STATE_FW_INIT) {
3366				mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
3367				    " state = 0x%x\n", abs_state);
3368				continue;
3369			}
3370			/* Wait for FW to become ready */
3371			if (mrsas_transition_to_ready(sc, 1)) {
3372				mrsas_dprint(sc, MRSAS_OCR,
3373				    "mrsas: Failed to transition controller to ready.\n");
3374				continue;
3375			}
3376			mrsas_reset_reply_desc(sc);
3377			if (mrsas_ioc_init(sc)) {
3378				mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
3379				continue;
3380			}
3381			for (j = 0; j < sc->max_fw_cmds; j++) {
3382				mpt_cmd = sc->mpt_cmd_list[j];
3383				if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3384					mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
3385					/* If not an IOCTL then release the command else re-fire */
3386					if (!mfi_cmd->sync_cmd) {
3387						mrsas_release_mfi_cmd(mfi_cmd);
3388					} else {
3389						req_desc = mrsas_get_request_desc(sc,
3390						    mfi_cmd->cmd_id.context.smid - 1);
3391						mrsas_dprint(sc, MRSAS_OCR,
3392						    "Re-fire command DCMD opcode 0x%x index %d\n ",
3393						    mfi_cmd->frame->dcmd.opcode, j);
3394						if (!req_desc)
3395							device_printf(sc->mrsas_dev,
3396							    "Cannot build MPT cmd.\n");
3397						else
3398							mrsas_fire_cmd(sc, req_desc->addr.u.low,
3399							    req_desc->addr.u.high);
3400					}
3401				}
3402			}
3403
3404			/* Reset load balance info */
3405			memset(sc->load_balance_info, 0,
3406			    sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
3407
3408			if (mrsas_get_ctrl_info(sc)) {
3409				mrsas_kill_hba(sc);
3410				retval = FAIL;
3411				goto out;
3412			}
3413			if (!mrsas_get_map_info(sc))
3414				mrsas_sync_map_info(sc);
3415
3416			megasas_setup_jbod_map(sc);
3417
3418			if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
3419				for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
3420					memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
3421					sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
3422				}
3423			}
3424
3425			mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3426			mrsas_enable_intr(sc);
3427			sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3428
3429			/* Register AEN with FW for last sequence number */
3430			class_locale.members.reserved = 0;
3431			class_locale.members.locale = MR_EVT_LOCALE_ALL;
3432			class_locale.members.class = MR_EVT_CLASS_DEBUG;
3433
3434			mtx_unlock(&sc->sim_lock);
3435			if (mrsas_register_aen(sc, sc->last_seq_num,
3436			    class_locale.word)) {
3437				device_printf(sc->mrsas_dev,
3438				    "ERROR: AEN registration FAILED from OCR !!! "
3439				    "Further events from the controller cannot be notified."
3440				    "Either there is some problem in the controller"
3441				    "or the controller does not support AEN.\n"
3442				    "Please contact to the SUPPORT TEAM if the problem persists\n");
3443			}
3444			mtx_lock(&sc->sim_lock);
3445
3446			/* Adapter reset completed successfully */
3447			device_printf(sc->mrsas_dev, "Reset successful\n");
3448			retval = SUCCESS;
3449			goto out;
3450		}
3451		/* Reset failed, kill the adapter */
3452		device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3453		mrsas_kill_hba(sc);
3454		retval = FAIL;
3455	} else {
3456		mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3457		mrsas_enable_intr(sc);
3458		sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3459	}
3460out:
3461	mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3462	mrsas_dprint(sc, MRSAS_OCR,
3463	    "Reset Exit with %d.\n", retval);
3464	return retval;
3465}
3466
3467/*
3468 * mrsas_kill_hba:	Kill HBA when OCR is not supported
3469 * input:			Adapter Context.
3470 *
3471 * This function will kill HBA when OCR is not supported.
3472 */
3473void
3474mrsas_kill_hba(struct mrsas_softc *sc)
3475{
3476	sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3477	DELAY(1000 * 1000);
3478	mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3479	mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3480	    MFI_STOP_ADP);
3481	/* Flush */
3482	mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3483	mrsas_complete_outstanding_ioctls(sc);
3484}
3485
3486/**
3487 * mrsas_complete_outstanding_ioctls	Complete pending IOCTLS after kill_hba
3488 * input:			Controller softc
3489 *
3490 * Returns void
3491 */
3492void
3493mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3494{
3495	int i;
3496	struct mrsas_mpt_cmd *cmd_mpt;
3497	struct mrsas_mfi_cmd *cmd_mfi;
3498	u_int32_t count, MSIxIndex;
3499
3500	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3501	for (i = 0; i < sc->max_fw_cmds; i++) {
3502		cmd_mpt = sc->mpt_cmd_list[i];
3503
3504		if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3505			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3506			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3507				for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3508					mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3509					    cmd_mpt->io_request->RaidContext.raid_context.status);
3510			}
3511		}
3512	}
3513}
3514
3515/*
3516 * mrsas_wait_for_outstanding:	Wait for outstanding commands
3517 * input:						Adapter Context.
3518 *
3519 * This function will wait for 180 seconds for outstanding commands to be
3520 * completed.
3521 */
3522int
3523mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3524{
3525	int i, outstanding, retval = 0;
3526	u_int32_t fw_state, count, MSIxIndex;
3527
3528	for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3529		if (sc->remove_in_progress) {
3530			mrsas_dprint(sc, MRSAS_OCR,
3531			    "Driver remove or shutdown called.\n");
3532			retval = 1;
3533			goto out;
3534		}
3535		/* Check if firmware is in fault state */
3536		fw_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3537		    outbound_scratch_pad)) & MFI_STATE_MASK;
3538		if (fw_state == MFI_STATE_FAULT) {
3539			mrsas_dprint(sc, MRSAS_OCR,
3540			    "Found FW in FAULT state, will reset adapter.\n");
3541			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3542			mtx_unlock(&sc->sim_lock);
3543			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3544				mrsas_complete_cmd(sc, MSIxIndex);
3545			mtx_lock(&sc->sim_lock);
3546			retval = 1;
3547			goto out;
3548		}
3549		if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3550			mrsas_dprint(sc, MRSAS_OCR,
3551			    "DCMD IO TIMEOUT detected, will reset adapter.\n");
3552			retval = 1;
3553			goto out;
3554		}
3555		outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3556		if (!outstanding)
3557			goto out;
3558
3559		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3560			mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3561			    "commands to complete\n", i, outstanding);
3562			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3563			mtx_unlock(&sc->sim_lock);
3564			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3565				mrsas_complete_cmd(sc, MSIxIndex);
3566			mtx_lock(&sc->sim_lock);
3567		}
3568		DELAY(1000 * 1000);
3569	}
3570
3571	if (mrsas_atomic_read(&sc->fw_outstanding)) {
3572		mrsas_dprint(sc, MRSAS_OCR,
3573		    " pending commands remain after waiting,"
3574		    " will reset adapter.\n");
3575		retval = 1;
3576	}
3577out:
3578	return retval;
3579}
3580
3581/*
3582 * mrsas_release_mfi_cmd:	Return a cmd to free command pool
3583 * input:					Command packet for return to free cmd pool
3584 *
3585 * This function returns the MFI & MPT command to the command list.
3586 */
3587void
3588mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi)
3589{
3590	struct mrsas_softc *sc = cmd_mfi->sc;
3591	struct mrsas_mpt_cmd *cmd_mpt;
3592
3593	mtx_lock(&sc->mfi_cmd_pool_lock);
3594	/*
3595	 * Release the mpt command (if at all it is allocated
3596	 * associated with the mfi command
3597	 */
3598	if (cmd_mfi->cmd_id.context.smid) {
3599		mtx_lock(&sc->mpt_cmd_pool_lock);
3600		/* Get the mpt cmd from mfi cmd frame's smid value */
3601		cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1];
3602		cmd_mpt->flags = 0;
3603		cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
3604		TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next);
3605		mtx_unlock(&sc->mpt_cmd_pool_lock);
3606	}
3607	/* Release the mfi command */
3608	cmd_mfi->ccb_ptr = NULL;
3609	cmd_mfi->cmd_id.frame_count = 0;
3610	TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next);
3611	mtx_unlock(&sc->mfi_cmd_pool_lock);
3612
3613	return;
3614}
3615
3616/*
3617 * mrsas_get_controller_info:	Returns FW's controller structure
3618 * input:						Adapter soft state
3619 * 								Controller information structure
3620 *
3621 * Issues an internal command (DCMD) to get the FW's controller structure. This
3622 * information is mainly used to find out the maximum IO transfer per command
3623 * supported by the FW.
3624 */
3625static int
3626mrsas_get_ctrl_info(struct mrsas_softc *sc)
3627{
3628	int retcode = 0;
3629	u_int8_t do_ocr = 1;
3630	struct mrsas_mfi_cmd *cmd;
3631	struct mrsas_dcmd_frame *dcmd;
3632
3633	cmd = mrsas_get_mfi_cmd(sc);
3634
3635	if (!cmd) {
3636		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3637		return -ENOMEM;
3638	}
3639	dcmd = &cmd->frame->dcmd;
3640
3641	if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3642		device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3643		mrsas_release_mfi_cmd(cmd);
3644		return -ENOMEM;
3645	}
3646	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3647
3648	dcmd->cmd = MFI_CMD_DCMD;
3649	dcmd->cmd_status = 0xFF;
3650	dcmd->sge_count = 1;
3651	dcmd->flags = MFI_FRAME_DIR_READ;
3652	dcmd->timeout = 0;
3653	dcmd->pad_0 = 0;
3654	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_ctrl_info));
3655	dcmd->opcode = htole32(MR_DCMD_CTRL_GET_INFO);
3656	dcmd->sgl.sge32[0].phys_addr = htole32(sc->ctlr_info_phys_addr & 0xFFFFFFFF);
3657	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_ctrl_info));
3658
3659	if (!sc->mask_interrupts)
3660		retcode = mrsas_issue_blocked_cmd(sc, cmd);
3661	else
3662		retcode = mrsas_issue_polled(sc, cmd);
3663
3664	if (retcode == ETIMEDOUT)
3665		goto dcmd_timeout;
3666	else {
3667		memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3668		le32_to_cpus(&sc->ctrl_info->properties.OnOffProperties);
3669		le32_to_cpus(&sc->ctrl_info->adapterOperations2);
3670		le32_to_cpus(&sc->ctrl_info->adapterOperations3);
3671		le16_to_cpus(&sc->ctrl_info->adapterOperations4);
3672	}
3673
3674	do_ocr = 0;
3675	mrsas_update_ext_vd_details(sc);
3676
3677	sc->use_seqnum_jbod_fp =
3678	    sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3679	sc->support_morethan256jbod =
3680		sc->ctrl_info->adapterOperations4.supportPdMapTargetId;
3681
3682	sc->disableOnlineCtrlReset =
3683	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
3684
3685dcmd_timeout:
3686	mrsas_free_ctlr_info_cmd(sc);
3687
3688	if (do_ocr)
3689		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3690
3691	if (!sc->mask_interrupts)
3692		mrsas_release_mfi_cmd(cmd);
3693
3694	return (retcode);
3695}
3696
3697/*
3698 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3699 * input:
3700 *	sc - Controller's softc
3701*/
3702static void
3703mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3704{
3705	u_int32_t ventura_map_sz = 0;
3706	sc->max256vdSupport =
3707		sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3708
3709	/* Below is additional check to address future FW enhancement */
3710	if (sc->ctrl_info->max_lds > 64)
3711		sc->max256vdSupport = 1;
3712
3713	sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3714	    * MRSAS_MAX_DEV_PER_CHANNEL;
3715	sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3716	    * MRSAS_MAX_DEV_PER_CHANNEL;
3717	if (sc->max256vdSupport) {
3718		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3719		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3720	} else {
3721		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3722		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3723	}
3724
3725	if (sc->maxRaidMapSize) {
3726		ventura_map_sz = sc->maxRaidMapSize *
3727		    MR_MIN_MAP_SIZE;
3728		sc->current_map_sz = ventura_map_sz;
3729		sc->max_map_sz = ventura_map_sz;
3730	} else {
3731		sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3732		    (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1));
3733		sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3734		sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3735		if (sc->max256vdSupport)
3736			sc->current_map_sz = sc->new_map_sz;
3737		else
3738			sc->current_map_sz = sc->old_map_sz;
3739	}
3740
3741	sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP_ALL);
3742#if VD_EXT_DEBUG
3743	device_printf(sc->mrsas_dev, "sc->maxRaidMapSize 0x%x \n",
3744	    sc->maxRaidMapSize);
3745	device_printf(sc->mrsas_dev,
3746	    "new_map_sz = 0x%x, old_map_sz = 0x%x, "
3747	    "ventura_map_sz = 0x%x, current_map_sz = 0x%x "
3748	    "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx \n",
3749	    sc->new_map_sz, sc->old_map_sz, ventura_map_sz,
3750	    sc->current_map_sz, sc->drv_map_sz, sizeof(MR_DRV_RAID_MAP_ALL));
3751#endif
3752}
3753
3754/*
3755 * mrsas_alloc_ctlr_info_cmd:	Allocates memory for controller info command
3756 * input:						Adapter soft state
3757 *
3758 * Allocates DMAable memory for the controller info internal command.
3759 */
3760int
3761mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3762{
3763	int ctlr_info_size;
3764
3765	/* Allocate get controller info command */
3766	ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3767	if (bus_dma_tag_create(sc->mrsas_parent_tag,
3768	    1, 0,
3769	    BUS_SPACE_MAXADDR_32BIT,
3770	    BUS_SPACE_MAXADDR,
3771	    NULL, NULL,
3772	    ctlr_info_size,
3773	    1,
3774	    ctlr_info_size,
3775	    BUS_DMA_ALLOCNOW,
3776	    NULL, NULL,
3777	    &sc->ctlr_info_tag)) {
3778		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3779		return (ENOMEM);
3780	}
3781	if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3782	    BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3783		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3784		return (ENOMEM);
3785	}
3786	if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3787	    sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3788	    &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3789		device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3790		return (ENOMEM);
3791	}
3792	memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3793	return (0);
3794}
3795
3796/*
3797 * mrsas_free_ctlr_info_cmd:	Free memory for controller info command
3798 * input:						Adapter soft state
3799 *
3800 * Deallocates memory of the get controller info cmd.
3801 */
3802void
3803mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3804{
3805	if (sc->ctlr_info_phys_addr)
3806		bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3807	if (sc->ctlr_info_mem != NULL)
3808		bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3809	if (sc->ctlr_info_tag != NULL)
3810		bus_dma_tag_destroy(sc->ctlr_info_tag);
3811}
3812
3813/*
3814 * mrsas_issue_polled:	Issues a polling command
3815 * inputs:				Adapter soft state
3816 * 						Command packet to be issued
3817 *
3818 * This function is for posting of internal commands to Firmware.  MFI requires
3819 * the cmd_status to be set to 0xFF before posting.  The maximun wait time of
3820 * the poll response timer is 180 seconds.
3821 */
3822int
3823mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3824{
3825	struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3826	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3827	int i, retcode = SUCCESS;
3828
3829	frame_hdr->cmd_status = 0xFF;
3830	frame_hdr->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
3831
3832	/* Issue the frame using inbound queue port */
3833	if (mrsas_issue_dcmd(sc, cmd)) {
3834		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3835		return (1);
3836	}
3837	/*
3838	 * Poll response timer to wait for Firmware response.  While this
3839	 * timer with the DELAY call could block CPU, the time interval for
3840	 * this is only 1 millisecond.
3841	 */
3842	if (frame_hdr->cmd_status == 0xFF) {
3843		for (i = 0; i < (max_wait * 1000); i++) {
3844			if (frame_hdr->cmd_status == 0xFF)
3845				DELAY(1000);
3846			else
3847				break;
3848		}
3849	}
3850	if (frame_hdr->cmd_status == 0xFF) {
3851		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3852		    "seconds from %s\n", max_wait, __func__);
3853		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3854		    cmd->frame->dcmd.opcode);
3855		retcode = ETIMEDOUT;
3856	}
3857	return (retcode);
3858}
3859
3860/*
3861 * mrsas_issue_dcmd:	Issues a MFI Pass thru cmd
3862 * input:				Adapter soft state mfi cmd pointer
3863 *
3864 * This function is called by mrsas_issued_blocked_cmd() and
3865 * mrsas_issued_polled(), to build the MPT command and then fire the command
3866 * to Firmware.
3867 */
3868int
3869mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3870{
3871	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3872
3873	req_desc = mrsas_build_mpt_cmd(sc, cmd);
3874	if (!req_desc) {
3875		device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3876		return (1);
3877	}
3878	mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3879
3880	return (0);
3881}
3882
3883/*
3884 * mrsas_build_mpt_cmd:	Calls helper function to build Passthru cmd
3885 * input:				Adapter soft state mfi cmd to build
3886 *
3887 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3888 * command and prepares the MPT command to send to Firmware.
3889 */
3890MRSAS_REQUEST_DESCRIPTOR_UNION *
3891mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3892{
3893	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3894	u_int16_t index;
3895
3896	if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3897		device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3898		return NULL;
3899	}
3900	index = cmd->cmd_id.context.smid;
3901
3902	req_desc = mrsas_get_request_desc(sc, index - 1);
3903	if (!req_desc)
3904		return NULL;
3905
3906	req_desc->addr.Words = 0;
3907	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3908
3909	req_desc->SCSIIO.SMID = htole16(index);
3910
3911	return (req_desc);
3912}
3913
3914/*
3915 * mrsas_build_mptmfi_passthru:	Builds a MPT MFI Passthru command
3916 * input:						Adapter soft state mfi cmd pointer
3917 *
3918 * The MPT command and the io_request are setup as a passthru command. The SGE
3919 * chain address is set to frame_phys_addr of the MFI command.
3920 */
3921u_int8_t
3922mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3923{
3924	MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3925	PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3926	struct mrsas_mpt_cmd *mpt_cmd;
3927	struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3928
3929	mpt_cmd = mrsas_get_mpt_cmd(sc);
3930	if (!mpt_cmd)
3931		return (1);
3932
3933	/* Save the smid. To be used for returning the cmd */
3934	mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3935
3936	mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3937
3938	/*
3939	 * For cmds where the flag is set, store the flag and check on
3940	 * completion. For cmds with this flag, don't call
3941	 * mrsas_complete_cmd.
3942	 */
3943
3944	if (frame_hdr->flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
3945		mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3946
3947	io_req = mpt_cmd->io_request;
3948
3949	if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
3950		pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3951
3952		sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3953		sgl_ptr_end->Flags = 0;
3954	}
3955	mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3956
3957	io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3958	io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3959	io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3960
3961	mpi25_ieee_chain->Address = htole64(mfi_cmd->frame_phys_addr);
3962
3963	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3964	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3965
3966	mpi25_ieee_chain->Length = htole32(sc->max_chain_frame_sz);
3967
3968	return (0);
3969}
3970
3971/*
3972 * mrsas_issue_blocked_cmd:	Synchronous wrapper around regular FW cmds
3973 * input:					Adapter soft state Command to be issued
3974 *
3975 * This function waits on an event for the command to be returned from the ISR.
3976 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3977 * internal and ioctl commands.
3978 */
3979int
3980mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3981{
3982	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3983	unsigned long total_time = 0;
3984	int retcode = SUCCESS;
3985
3986	/* Initialize cmd_status */
3987	cmd->cmd_status = 0xFF;
3988
3989	/* Build MPT-MFI command for issue to FW */
3990	if (mrsas_issue_dcmd(sc, cmd)) {
3991		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3992		return (1);
3993	}
3994	sc->chan = (void *)&cmd;
3995
3996	while (1) {
3997		if (cmd->cmd_status == 0xFF) {
3998			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3999		} else
4000			break;
4001
4002		if (!cmd->sync_cmd) {	/* cmd->sync will be set for an IOCTL
4003					 * command */
4004			total_time++;
4005			if (total_time >= max_wait) {
4006				device_printf(sc->mrsas_dev,
4007				    "Internal command timed out after %d seconds.\n", max_wait);
4008				retcode = 1;
4009				break;
4010			}
4011		}
4012	}
4013
4014	if (cmd->cmd_status == 0xFF) {
4015		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
4016		    "seconds from %s\n", max_wait, __func__);
4017		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
4018		    cmd->frame->dcmd.opcode);
4019		retcode = ETIMEDOUT;
4020	}
4021	return (retcode);
4022}
4023
4024/*
4025 * mrsas_complete_mptmfi_passthru:	Completes a command
4026 * input:	@sc:					Adapter soft state
4027 * 			@cmd:					Command to be completed
4028 * 			@status:				cmd completion status
4029 *
4030 * This function is called from mrsas_complete_cmd() after an interrupt is
4031 * received from Firmware, and io_request->Function is
4032 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
4033 */
4034void
4035mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
4036    u_int8_t status)
4037{
4038	struct mrsas_header *hdr = &cmd->frame->hdr;
4039	u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
4040
4041	/* Reset the retry counter for future re-tries */
4042	cmd->retry_for_fw_reset = 0;
4043
4044	if (cmd->ccb_ptr)
4045		cmd->ccb_ptr = NULL;
4046
4047	switch (hdr->cmd) {
4048	case MFI_CMD_INVALID:
4049		device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
4050		break;
4051	case MFI_CMD_PD_SCSI_IO:
4052	case MFI_CMD_LD_SCSI_IO:
4053		/*
4054		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
4055		 * issued either through an IO path or an IOCTL path. If it
4056		 * was via IOCTL, we will send it to internal completion.
4057		 */
4058		if (cmd->sync_cmd) {
4059			cmd->sync_cmd = 0;
4060			mrsas_wakeup(sc, cmd);
4061			break;
4062		}
4063	case MFI_CMD_SMP:
4064	case MFI_CMD_STP:
4065	case MFI_CMD_DCMD:
4066		/* Check for LD map update */
4067		if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
4068		    (cmd->frame->dcmd.mbox.b[1] == 1)) {
4069			sc->fast_path_io = 0;
4070			mtx_lock(&sc->raidmap_lock);
4071			sc->map_update_cmd = NULL;
4072			if (cmd_status != 0) {
4073				if (cmd_status != MFI_STAT_NOT_FOUND)
4074					device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
4075				else {
4076					mrsas_release_mfi_cmd(cmd);
4077					mtx_unlock(&sc->raidmap_lock);
4078					break;
4079				}
4080			} else
4081				sc->map_id++;
4082			mrsas_release_mfi_cmd(cmd);
4083			if (MR_ValidateMapInfo(sc))
4084				sc->fast_path_io = 0;
4085			else
4086				sc->fast_path_io = 1;
4087			mrsas_sync_map_info(sc);
4088			mtx_unlock(&sc->raidmap_lock);
4089			break;
4090		}
4091		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
4092		    cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
4093			sc->mrsas_aen_triggered = 0;
4094		}
4095		/* FW has an updated PD sequence */
4096		if ((cmd->frame->dcmd.opcode ==
4097		    MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
4098		    (cmd->frame->dcmd.mbox.b[0] == 1)) {
4099			mtx_lock(&sc->raidmap_lock);
4100			sc->jbod_seq_cmd = NULL;
4101			mrsas_release_mfi_cmd(cmd);
4102
4103			if (cmd_status == MFI_STAT_OK) {
4104				sc->pd_seq_map_id++;
4105				/* Re-register a pd sync seq num cmd */
4106				if (megasas_sync_pd_seq_num(sc, true))
4107					sc->use_seqnum_jbod_fp = 0;
4108			} else {
4109				sc->use_seqnum_jbod_fp = 0;
4110				device_printf(sc->mrsas_dev,
4111				    "Jbod map sync failed, status=%x\n", cmd_status);
4112			}
4113			mtx_unlock(&sc->raidmap_lock);
4114			break;
4115		}
4116		/* See if got an event notification */
4117		if (le32toh(cmd->frame->dcmd.opcode) == MR_DCMD_CTRL_EVENT_WAIT)
4118			mrsas_complete_aen(sc, cmd);
4119		else
4120			mrsas_wakeup(sc, cmd);
4121		break;
4122	case MFI_CMD_ABORT:
4123		/* Command issued to abort another cmd return */
4124		mrsas_complete_abort(sc, cmd);
4125		break;
4126	default:
4127		device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
4128		break;
4129	}
4130}
4131
4132/*
4133 * mrsas_wakeup:	Completes an internal command
4134 * input:			Adapter soft state
4135 * 					Command to be completed
4136 *
4137 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
4138 * timer is started.  This function is called from
4139 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
4140 * from the command wait.
4141 */
4142void
4143mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4144{
4145	cmd->cmd_status = cmd->frame->io.cmd_status;
4146
4147	if (cmd->cmd_status == 0xFF)
4148		cmd->cmd_status = 0;
4149
4150	sc->chan = (void *)&cmd;
4151	wakeup_one((void *)&sc->chan);
4152	return;
4153}
4154
4155/*
4156 * mrsas_shutdown_ctlr:       Instructs FW to shutdown the controller input:
4157 * Adapter soft state Shutdown/Hibernate
4158 *
4159 * This function issues a DCMD internal command to Firmware to initiate shutdown
4160 * of the controller.
4161 */
4162static void
4163mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
4164{
4165	struct mrsas_mfi_cmd *cmd;
4166	struct mrsas_dcmd_frame *dcmd;
4167
4168	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4169		return;
4170
4171	cmd = mrsas_get_mfi_cmd(sc);
4172	if (!cmd) {
4173		device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
4174		return;
4175	}
4176	if (sc->aen_cmd)
4177		mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
4178	if (sc->map_update_cmd)
4179		mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
4180	if (sc->jbod_seq_cmd)
4181		mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
4182
4183	dcmd = &cmd->frame->dcmd;
4184	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4185
4186	dcmd->cmd = MFI_CMD_DCMD;
4187	dcmd->cmd_status = 0x0;
4188	dcmd->sge_count = 0;
4189	dcmd->flags = MFI_FRAME_DIR_NONE;
4190	dcmd->timeout = 0;
4191	dcmd->pad_0 = 0;
4192	dcmd->data_xfer_len = 0;
4193	dcmd->opcode = opcode;
4194
4195	device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
4196
4197	mrsas_issue_blocked_cmd(sc, cmd);
4198	mrsas_release_mfi_cmd(cmd);
4199
4200	return;
4201}
4202
4203/*
4204 * mrsas_flush_cache:         Requests FW to flush all its caches input:
4205 * Adapter soft state
4206 *
4207 * This function is issues a DCMD internal command to Firmware to initiate
4208 * flushing of all caches.
4209 */
4210static void
4211mrsas_flush_cache(struct mrsas_softc *sc)
4212{
4213	struct mrsas_mfi_cmd *cmd;
4214	struct mrsas_dcmd_frame *dcmd;
4215
4216	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4217		return;
4218
4219	cmd = mrsas_get_mfi_cmd(sc);
4220	if (!cmd) {
4221		device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
4222		return;
4223	}
4224	dcmd = &cmd->frame->dcmd;
4225	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4226
4227	dcmd->cmd = MFI_CMD_DCMD;
4228	dcmd->cmd_status = 0x0;
4229	dcmd->sge_count = 0;
4230	dcmd->flags = MFI_FRAME_DIR_NONE;
4231	dcmd->timeout = 0;
4232	dcmd->pad_0 = 0;
4233	dcmd->data_xfer_len = 0;
4234	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
4235	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
4236
4237	mrsas_issue_blocked_cmd(sc, cmd);
4238	mrsas_release_mfi_cmd(cmd);
4239
4240	return;
4241}
4242
4243int
4244megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
4245{
4246	int retcode = 0;
4247	u_int8_t do_ocr = 1;
4248	struct mrsas_mfi_cmd *cmd;
4249	struct mrsas_dcmd_frame *dcmd;
4250	uint32_t pd_seq_map_sz;
4251	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
4252	bus_addr_t pd_seq_h;
4253
4254	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
4255	    (sizeof(struct MR_PD_CFG_SEQ) *
4256	    (MAX_PHYSICAL_DEVICES - 1));
4257
4258	cmd = mrsas_get_mfi_cmd(sc);
4259	if (!cmd) {
4260		device_printf(sc->mrsas_dev,
4261		    "Cannot alloc for ld map info cmd.\n");
4262		return 1;
4263	}
4264	dcmd = &cmd->frame->dcmd;
4265
4266	pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
4267	pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
4268	if (!pd_sync) {
4269		device_printf(sc->mrsas_dev,
4270		    "Failed to alloc mem for jbod map info.\n");
4271		mrsas_release_mfi_cmd(cmd);
4272		return (ENOMEM);
4273	}
4274	memset(pd_sync, 0, pd_seq_map_sz);
4275	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4276	dcmd->cmd = MFI_CMD_DCMD;
4277	dcmd->cmd_status = 0xFF;
4278	dcmd->sge_count = 1;
4279	dcmd->timeout = 0;
4280	dcmd->pad_0 = 0;
4281	dcmd->data_xfer_len = htole32(pd_seq_map_sz);
4282	dcmd->opcode = htole32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
4283	dcmd->sgl.sge32[0].phys_addr = htole32(pd_seq_h & 0xFFFFFFFF);
4284	dcmd->sgl.sge32[0].length = htole32(pd_seq_map_sz);
4285
4286	if (pend) {
4287		dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
4288		dcmd->flags = htole16(MFI_FRAME_DIR_WRITE);
4289		sc->jbod_seq_cmd = cmd;
4290		if (mrsas_issue_dcmd(sc, cmd)) {
4291			device_printf(sc->mrsas_dev,
4292			    "Fail to send sync map info command.\n");
4293			return 1;
4294		} else
4295			return 0;
4296	} else
4297		dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4298
4299	retcode = mrsas_issue_polled(sc, cmd);
4300	if (retcode == ETIMEDOUT)
4301		goto dcmd_timeout;
4302
4303	if (le32toh(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
4304		device_printf(sc->mrsas_dev,
4305		    "driver supports max %d JBOD, but FW reports %d\n",
4306		    MAX_PHYSICAL_DEVICES, pd_sync->count);
4307		retcode = -EINVAL;
4308	}
4309	if (!retcode)
4310		sc->pd_seq_map_id++;
4311	do_ocr = 0;
4312
4313dcmd_timeout:
4314	if (do_ocr)
4315		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4316
4317	return (retcode);
4318}
4319
4320/*
4321 * mrsas_get_map_info:        Load and validate RAID map input:
4322 * Adapter instance soft state
4323 *
4324 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
4325 * and validate RAID map.  It returns 0 if successful, 1 other- wise.
4326 */
4327static int
4328mrsas_get_map_info(struct mrsas_softc *sc)
4329{
4330	uint8_t retcode = 0;
4331
4332	sc->fast_path_io = 0;
4333	if (!mrsas_get_ld_map_info(sc)) {
4334		retcode = MR_ValidateMapInfo(sc);
4335		if (retcode == 0) {
4336			sc->fast_path_io = 1;
4337			return 0;
4338		}
4339	}
4340	return 1;
4341}
4342
4343/*
4344 * mrsas_get_ld_map_info:      Get FW's ld_map structure input:
4345 * Adapter instance soft state
4346 *
4347 * Issues an internal command (DCMD) to get the FW's controller PD list
4348 * structure.
4349 */
4350static int
4351mrsas_get_ld_map_info(struct mrsas_softc *sc)
4352{
4353	int retcode = 0;
4354	struct mrsas_mfi_cmd *cmd;
4355	struct mrsas_dcmd_frame *dcmd;
4356	void *map;
4357	bus_addr_t map_phys_addr = 0;
4358
4359	cmd = mrsas_get_mfi_cmd(sc);
4360	if (!cmd) {
4361		device_printf(sc->mrsas_dev,
4362		    "Cannot alloc for ld map info cmd.\n");
4363		return 1;
4364	}
4365	dcmd = &cmd->frame->dcmd;
4366
4367	map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
4368	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
4369	if (!map) {
4370		device_printf(sc->mrsas_dev,
4371		    "Failed to alloc mem for ld map info.\n");
4372		mrsas_release_mfi_cmd(cmd);
4373		return (ENOMEM);
4374	}
4375	memset(map, 0, sizeof(sc->max_map_sz));
4376	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4377
4378	dcmd->cmd = MFI_CMD_DCMD;
4379	dcmd->cmd_status = 0xFF;
4380	dcmd->sge_count = 1;
4381	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4382	dcmd->timeout = 0;
4383	dcmd->pad_0 = 0;
4384	dcmd->data_xfer_len = htole32(sc->current_map_sz);
4385	dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO);
4386	dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF);
4387	dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz);
4388
4389	retcode = mrsas_issue_polled(sc, cmd);
4390	if (retcode == ETIMEDOUT)
4391		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4392
4393	return (retcode);
4394}
4395
4396/*
4397 * mrsas_sync_map_info:        Get FW's ld_map structure input:
4398 * Adapter instance soft state
4399 *
4400 * Issues an internal command (DCMD) to get the FW's controller PD list
4401 * structure.
4402 */
4403static int
4404mrsas_sync_map_info(struct mrsas_softc *sc)
4405{
4406	int retcode = 0, i;
4407	struct mrsas_mfi_cmd *cmd;
4408	struct mrsas_dcmd_frame *dcmd;
4409	uint32_t size_sync_info, num_lds;
4410	MR_LD_TARGET_SYNC *target_map = NULL;
4411	MR_DRV_RAID_MAP_ALL *map;
4412	MR_LD_RAID *raid;
4413	MR_LD_TARGET_SYNC *ld_sync;
4414	bus_addr_t map_phys_addr = 0;
4415
4416	cmd = mrsas_get_mfi_cmd(sc);
4417	if (!cmd) {
4418		device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
4419		return ENOMEM;
4420	}
4421	map = sc->ld_drv_map[sc->map_id & 1];
4422	num_lds = map->raidMap.ldCount;
4423
4424	dcmd = &cmd->frame->dcmd;
4425	size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
4426	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4427
4428	target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
4429	memset(target_map, 0, sc->max_map_sz);
4430
4431	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
4432
4433	ld_sync = (MR_LD_TARGET_SYNC *) target_map;
4434
4435	for (i = 0; i < num_lds; i++, ld_sync++) {
4436		raid = MR_LdRaidGet(i, map);
4437		ld_sync->targetId = MR_GetLDTgtId(i, map);
4438		ld_sync->seqNum = raid->seqNum;
4439	}
4440
4441	dcmd->cmd = MFI_CMD_DCMD;
4442	dcmd->cmd_status = 0xFF;
4443	dcmd->sge_count = 1;
4444	dcmd->flags = htole16(MFI_FRAME_DIR_WRITE);
4445	dcmd->timeout = 0;
4446	dcmd->pad_0 = 0;
4447	dcmd->data_xfer_len = htole32(sc->current_map_sz);
4448	dcmd->mbox.b[0] = num_lds;
4449	dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
4450	dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO);
4451	dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF);
4452	dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz);
4453
4454	sc->map_update_cmd = cmd;
4455	if (mrsas_issue_dcmd(sc, cmd)) {
4456		device_printf(sc->mrsas_dev,
4457		    "Fail to send sync map info command.\n");
4458		return (1);
4459	}
4460	return (retcode);
4461}
4462
4463/* Input:	dcmd.opcode		- MR_DCMD_PD_GET_INFO
4464  *		dcmd.mbox.s[0]		- deviceId for this physical drive
4465  *		dcmd.sge IN		- ptr to returned MR_PD_INFO structure
4466  * Desc:	Firmware return the physical drive info structure
4467  *
4468  */
4469static void
4470mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id)
4471{
4472	int retcode;
4473	u_int8_t do_ocr = 1;
4474	struct mrsas_mfi_cmd *cmd;
4475	struct mrsas_dcmd_frame *dcmd;
4476
4477	cmd = mrsas_get_mfi_cmd(sc);
4478
4479	if (!cmd) {
4480		device_printf(sc->mrsas_dev,
4481		    "Cannot alloc for get PD info cmd\n");
4482		return;
4483	}
4484	dcmd = &cmd->frame->dcmd;
4485
4486	memset(sc->pd_info_mem, 0, sizeof(struct mrsas_pd_info));
4487	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4488
4489	dcmd->mbox.s[0] = htole16(device_id);
4490	dcmd->cmd = MFI_CMD_DCMD;
4491	dcmd->cmd_status = 0xFF;
4492	dcmd->sge_count = 1;
4493	dcmd->flags = MFI_FRAME_DIR_READ;
4494	dcmd->timeout = 0;
4495	dcmd->pad_0 = 0;
4496	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_pd_info));
4497	dcmd->opcode = htole32(MR_DCMD_PD_GET_INFO);
4498	dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->pd_info_phys_addr & 0xFFFFFFFF);
4499	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_pd_info));
4500
4501	if (!sc->mask_interrupts)
4502		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4503	else
4504		retcode = mrsas_issue_polled(sc, cmd);
4505
4506	if (retcode == ETIMEDOUT)
4507		goto dcmd_timeout;
4508
4509	sc->target_list[device_id].interface_type =
4510		le16toh(sc->pd_info_mem->state.ddf.pdType.intf);
4511
4512	do_ocr = 0;
4513
4514dcmd_timeout:
4515
4516	if (do_ocr)
4517		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4518
4519	if (!sc->mask_interrupts)
4520		mrsas_release_mfi_cmd(cmd);
4521}
4522
4523/*
4524 * mrsas_add_target:				Add target ID of system PD/VD to driver's data structure.
4525 * sc:						Adapter's soft state
4526 * target_id:					Unique target id per controller(managed by driver)
4527 *						for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4528 *						for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4529 * return:					void
4530 * Descripton:					This function will be called whenever system PD or VD is created.
4531 */
4532static void mrsas_add_target(struct mrsas_softc *sc,
4533	u_int16_t target_id)
4534{
4535	sc->target_list[target_id].target_id = target_id;
4536
4537	device_printf(sc->mrsas_dev,
4538		"%s created target ID: 0x%x\n",
4539		(target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4540		(target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4541	/*
4542	 * If interrupts are enabled, then only fire DCMD to get pd_info
4543	 * for system PDs
4544	 */
4545	if (!sc->mask_interrupts && sc->pd_info_mem &&
4546		(target_id < MRSAS_MAX_PD))
4547		mrsas_get_pd_info(sc, target_id);
4548
4549}
4550
4551/*
4552 * mrsas_remove_target:			Remove target ID of system PD/VD from driver's data structure.
4553 * sc:						Adapter's soft state
4554 * target_id:					Unique target id per controller(managed by driver)
4555 *						for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4556 *						for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4557 * return:					void
4558 * Descripton:					This function will be called whenever system PD or VD is deleted
4559 */
4560static void mrsas_remove_target(struct mrsas_softc *sc,
4561	u_int16_t target_id)
4562{
4563	sc->target_list[target_id].target_id = 0xffff;
4564	device_printf(sc->mrsas_dev,
4565		"%s deleted target ID: 0x%x\n",
4566		(target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4567		(target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4568}
4569
4570/*
4571 * mrsas_get_pd_list:           Returns FW's PD list structure input:
4572 * Adapter soft state
4573 *
4574 * Issues an internal command (DCMD) to get the FW's controller PD list
4575 * structure.  This information is mainly used to find out about system
4576 * supported by Firmware.
4577 */
4578static int
4579mrsas_get_pd_list(struct mrsas_softc *sc)
4580{
4581	int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4582	u_int8_t do_ocr = 1;
4583	struct mrsas_mfi_cmd *cmd;
4584	struct mrsas_dcmd_frame *dcmd;
4585	struct MR_PD_LIST *pd_list_mem;
4586	struct MR_PD_ADDRESS *pd_addr;
4587	bus_addr_t pd_list_phys_addr = 0;
4588	struct mrsas_tmp_dcmd *tcmd;
4589	u_int16_t dev_id;
4590
4591	cmd = mrsas_get_mfi_cmd(sc);
4592	if (!cmd) {
4593		device_printf(sc->mrsas_dev,
4594		    "Cannot alloc for get PD list cmd\n");
4595		return 1;
4596	}
4597	dcmd = &cmd->frame->dcmd;
4598
4599	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4600	pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4601	if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4602		device_printf(sc->mrsas_dev,
4603		    "Cannot alloc dmamap for get PD list cmd\n");
4604		mrsas_release_mfi_cmd(cmd);
4605		mrsas_free_tmp_dcmd(tcmd);
4606		free(tcmd, M_MRSAS);
4607		return (ENOMEM);
4608	} else {
4609		pd_list_mem = tcmd->tmp_dcmd_mem;
4610		pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4611	}
4612	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4613
4614	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4615	dcmd->mbox.b[1] = 0;
4616	dcmd->cmd = MFI_CMD_DCMD;
4617	dcmd->cmd_status = 0xFF;
4618	dcmd->sge_count = 1;
4619	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4620	dcmd->timeout = 0;
4621	dcmd->pad_0 = 0;
4622	dcmd->data_xfer_len = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST));
4623	dcmd->opcode = htole32(MR_DCMD_PD_LIST_QUERY);
4624	dcmd->sgl.sge32[0].phys_addr = htole32(pd_list_phys_addr & 0xFFFFFFFF);
4625	dcmd->sgl.sge32[0].length = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST));
4626
4627	if (!sc->mask_interrupts)
4628		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4629	else
4630		retcode = mrsas_issue_polled(sc, cmd);
4631
4632	if (retcode == ETIMEDOUT)
4633		goto dcmd_timeout;
4634
4635	/* Get the instance PD list */
4636	pd_count = MRSAS_MAX_PD;
4637	pd_addr = pd_list_mem->addr;
4638	if (le32toh(pd_list_mem->count) < pd_count) {
4639		memset(sc->local_pd_list, 0,
4640		    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4641		for (pd_index = 0; pd_index < le32toh(pd_list_mem->count); pd_index++) {
4642			dev_id = le16toh(pd_addr->deviceId);
4643			sc->local_pd_list[dev_id].tid = dev_id;
4644			sc->local_pd_list[dev_id].driveType =
4645			    le16toh(pd_addr->scsiDevType);
4646			sc->local_pd_list[dev_id].driveState =
4647			    MR_PD_STATE_SYSTEM;
4648			if (sc->target_list[dev_id].target_id == 0xffff)
4649				mrsas_add_target(sc, dev_id);
4650			pd_addr++;
4651		}
4652		for (pd_index = 0; pd_index < MRSAS_MAX_PD; pd_index++) {
4653			if ((sc->local_pd_list[pd_index].driveState !=
4654				MR_PD_STATE_SYSTEM) &&
4655				(sc->target_list[pd_index].target_id !=
4656				0xffff)) {
4657				mrsas_remove_target(sc, pd_index);
4658			}
4659		}
4660		/*
4661		 * Use mutext/spinlock if pd_list component size increase more than
4662		 * 32 bit.
4663		 */
4664		memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4665		do_ocr = 0;
4666	}
4667dcmd_timeout:
4668	mrsas_free_tmp_dcmd(tcmd);
4669	free(tcmd, M_MRSAS);
4670
4671	if (do_ocr)
4672		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4673
4674	if (!sc->mask_interrupts)
4675		mrsas_release_mfi_cmd(cmd);
4676
4677	return (retcode);
4678}
4679
4680/*
4681 * mrsas_get_ld_list:           Returns FW's LD list structure input:
4682 * Adapter soft state
4683 *
4684 * Issues an internal command (DCMD) to get the FW's controller PD list
4685 * structure.  This information is mainly used to find out about supported by
4686 * the FW.
4687 */
4688static int
4689mrsas_get_ld_list(struct mrsas_softc *sc)
4690{
4691	int ld_list_size, retcode = 0, ld_index = 0, ids = 0, drv_tgt_id;
4692	u_int8_t do_ocr = 1;
4693	struct mrsas_mfi_cmd *cmd;
4694	struct mrsas_dcmd_frame *dcmd;
4695	struct MR_LD_LIST *ld_list_mem;
4696	bus_addr_t ld_list_phys_addr = 0;
4697	struct mrsas_tmp_dcmd *tcmd;
4698
4699	cmd = mrsas_get_mfi_cmd(sc);
4700	if (!cmd) {
4701		device_printf(sc->mrsas_dev,
4702		    "Cannot alloc for get LD list cmd\n");
4703		return 1;
4704	}
4705	dcmd = &cmd->frame->dcmd;
4706
4707	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4708	ld_list_size = sizeof(struct MR_LD_LIST);
4709	if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4710		device_printf(sc->mrsas_dev,
4711		    "Cannot alloc dmamap for get LD list cmd\n");
4712		mrsas_release_mfi_cmd(cmd);
4713		mrsas_free_tmp_dcmd(tcmd);
4714		free(tcmd, M_MRSAS);
4715		return (ENOMEM);
4716	} else {
4717		ld_list_mem = tcmd->tmp_dcmd_mem;
4718		ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4719	}
4720	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4721
4722	if (sc->max256vdSupport)
4723		dcmd->mbox.b[0] = 1;
4724
4725	dcmd->cmd = MFI_CMD_DCMD;
4726	dcmd->cmd_status = 0xFF;
4727	dcmd->sge_count = 1;
4728	dcmd->flags = MFI_FRAME_DIR_READ;
4729	dcmd->timeout = 0;
4730	dcmd->data_xfer_len = htole32(sizeof(struct MR_LD_LIST));
4731	dcmd->opcode = htole32(MR_DCMD_LD_GET_LIST);
4732	dcmd->sgl.sge32[0].phys_addr = htole32(ld_list_phys_addr);
4733	dcmd->sgl.sge32[0].length = htole32(sizeof(struct MR_LD_LIST));
4734	dcmd->pad_0 = 0;
4735
4736	if (!sc->mask_interrupts)
4737		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4738	else
4739		retcode = mrsas_issue_polled(sc, cmd);
4740
4741	if (retcode == ETIMEDOUT)
4742		goto dcmd_timeout;
4743
4744#if VD_EXT_DEBUG
4745	printf("Number of LDs %d\n", ld_list_mem->ldCount);
4746#endif
4747
4748	/* Get the instance LD list */
4749	if (le32toh(ld_list_mem->ldCount) <= sc->fw_supported_vd_count) {
4750		sc->CurLdCount = le32toh(ld_list_mem->ldCount);
4751		memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4752		for (ld_index = 0; ld_index < le32toh(ld_list_mem->ldCount); ld_index++) {
4753			ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4754			drv_tgt_id = ids + MRSAS_MAX_PD;
4755			if (ld_list_mem->ldList[ld_index].state != 0) {
4756				sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4757				if (sc->target_list[drv_tgt_id].target_id ==
4758					0xffff)
4759					mrsas_add_target(sc, drv_tgt_id);
4760			} else {
4761				if (sc->target_list[drv_tgt_id].target_id !=
4762					0xffff)
4763					mrsas_remove_target(sc,
4764						drv_tgt_id);
4765			}
4766		}
4767
4768		do_ocr = 0;
4769	}
4770dcmd_timeout:
4771	mrsas_free_tmp_dcmd(tcmd);
4772	free(tcmd, M_MRSAS);
4773
4774	if (do_ocr)
4775		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4776	if (!sc->mask_interrupts)
4777		mrsas_release_mfi_cmd(cmd);
4778
4779	return (retcode);
4780}
4781
4782/*
4783 * mrsas_alloc_tmp_dcmd:       Allocates memory for temporary command input:
4784 * Adapter soft state Temp command Size of alloction
4785 *
4786 * Allocates DMAable memory for a temporary internal command. The allocated
4787 * memory is initialized to all zeros upon successful loading of the dma
4788 * mapped memory.
4789 */
4790int
4791mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4792    struct mrsas_tmp_dcmd *tcmd, int size)
4793{
4794	if (bus_dma_tag_create(sc->mrsas_parent_tag,
4795	    1, 0,
4796	    BUS_SPACE_MAXADDR_32BIT,
4797	    BUS_SPACE_MAXADDR,
4798	    NULL, NULL,
4799	    size,
4800	    1,
4801	    size,
4802	    BUS_DMA_ALLOCNOW,
4803	    NULL, NULL,
4804	    &tcmd->tmp_dcmd_tag)) {
4805		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4806		return (ENOMEM);
4807	}
4808	if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4809	    BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4810		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4811		return (ENOMEM);
4812	}
4813	if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4814	    tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4815	    &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4816		device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4817		return (ENOMEM);
4818	}
4819	memset(tcmd->tmp_dcmd_mem, 0, size);
4820	return (0);
4821}
4822
4823/*
4824 * mrsas_free_tmp_dcmd:      Free memory for temporary command input:
4825 * temporary dcmd pointer
4826 *
4827 * Deallocates memory of the temporary command for use in the construction of
4828 * the internal DCMD.
4829 */
4830void
4831mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4832{
4833	if (tmp->tmp_dcmd_phys_addr)
4834		bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4835	if (tmp->tmp_dcmd_mem != NULL)
4836		bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4837	if (tmp->tmp_dcmd_tag != NULL)
4838		bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4839}
4840
4841/*
4842 * mrsas_issue_blocked_abort_cmd:       Aborts previously issued cmd input:
4843 * Adapter soft state Previously issued cmd to be aborted
4844 *
4845 * This function is used to abort previously issued commands, such as AEN and
4846 * RAID map sync map commands.  The abort command is sent as a DCMD internal
4847 * command and subsequently the driver will wait for a return status.  The
4848 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4849 */
4850static int
4851mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4852    struct mrsas_mfi_cmd *cmd_to_abort)
4853{
4854	struct mrsas_mfi_cmd *cmd;
4855	struct mrsas_abort_frame *abort_fr;
4856	u_int8_t retcode = 0;
4857	unsigned long total_time = 0;
4858	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4859
4860	cmd = mrsas_get_mfi_cmd(sc);
4861	if (!cmd) {
4862		device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4863		return (1);
4864	}
4865	abort_fr = &cmd->frame->abort;
4866
4867	/* Prepare and issue the abort frame */
4868	abort_fr->cmd = MFI_CMD_ABORT;
4869	abort_fr->cmd_status = 0xFF;
4870	abort_fr->flags = 0;
4871	abort_fr->abort_context = cmd_to_abort->index;
4872	abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4873	abort_fr->abort_mfi_phys_addr_hi = 0;
4874
4875	cmd->sync_cmd = 1;
4876	cmd->cmd_status = 0xFF;
4877
4878	if (mrsas_issue_dcmd(sc, cmd)) {
4879		device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4880		return (1);
4881	}
4882	/* Wait for this cmd to complete */
4883	sc->chan = (void *)&cmd;
4884	while (1) {
4885		if (cmd->cmd_status == 0xFF) {
4886			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4887		} else
4888			break;
4889		total_time++;
4890		if (total_time >= max_wait) {
4891			device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4892			retcode = 1;
4893			break;
4894		}
4895	}
4896
4897	cmd->sync_cmd = 0;
4898	mrsas_release_mfi_cmd(cmd);
4899	return (retcode);
4900}
4901
4902/*
4903 * mrsas_complete_abort:      Completes aborting a command input:
4904 * Adapter soft state Cmd that was issued to abort another cmd
4905 *
4906 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4907 * change after sending the command.  This function is called from
4908 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4909 */
4910void
4911mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4912{
4913	if (cmd->sync_cmd) {
4914		cmd->sync_cmd = 0;
4915		cmd->cmd_status = 0;
4916		sc->chan = (void *)&cmd;
4917		wakeup_one((void *)&sc->chan);
4918	}
4919	return;
4920}
4921
4922/*
4923 * mrsas_aen_handler:	AEN processing callback function from thread context
4924 * input:				Adapter soft state
4925 *
4926 * Asynchronous event handler
4927 */
4928void
4929mrsas_aen_handler(struct mrsas_softc *sc)
4930{
4931	union mrsas_evt_class_locale class_locale;
4932	int doscan = 0;
4933	u_int32_t seq_num;
4934 	int error, fail_aen = 0;
4935
4936	if (sc == NULL) {
4937		printf("invalid instance!\n");
4938		return;
4939	}
4940	if (sc->remove_in_progress || sc->reset_in_progress) {
4941		device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n",
4942			__func__, __LINE__);
4943		return;
4944	}
4945	if (sc->evt_detail_mem) {
4946		switch (sc->evt_detail_mem->code) {
4947		case MR_EVT_PD_INSERTED:
4948			fail_aen = mrsas_get_pd_list(sc);
4949			if (!fail_aen)
4950				mrsas_bus_scan_sim(sc, sc->sim_1);
4951			else
4952				goto skip_register_aen;
4953			break;
4954		case MR_EVT_PD_REMOVED:
4955			fail_aen = mrsas_get_pd_list(sc);
4956			if (!fail_aen)
4957				mrsas_bus_scan_sim(sc, sc->sim_1);
4958			else
4959				goto skip_register_aen;
4960			break;
4961		case MR_EVT_LD_OFFLINE:
4962		case MR_EVT_CFG_CLEARED:
4963		case MR_EVT_LD_DELETED:
4964			mrsas_bus_scan_sim(sc, sc->sim_0);
4965			break;
4966		case MR_EVT_LD_CREATED:
4967			fail_aen = mrsas_get_ld_list(sc);
4968			if (!fail_aen)
4969				mrsas_bus_scan_sim(sc, sc->sim_0);
4970			else
4971				goto skip_register_aen;
4972			break;
4973		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4974		case MR_EVT_FOREIGN_CFG_IMPORTED:
4975		case MR_EVT_LD_STATE_CHANGE:
4976			doscan = 1;
4977			break;
4978		case MR_EVT_CTRL_PROP_CHANGED:
4979			fail_aen = mrsas_get_ctrl_info(sc);
4980			if (fail_aen)
4981				goto skip_register_aen;
4982			break;
4983		default:
4984			break;
4985		}
4986	} else {
4987		device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4988		return;
4989	}
4990	if (doscan) {
4991		fail_aen = mrsas_get_pd_list(sc);
4992		if (!fail_aen) {
4993			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4994			mrsas_bus_scan_sim(sc, sc->sim_1);
4995		} else
4996			goto skip_register_aen;
4997
4998		fail_aen = mrsas_get_ld_list(sc);
4999		if (!fail_aen) {
5000			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
5001			mrsas_bus_scan_sim(sc, sc->sim_0);
5002		} else
5003			goto skip_register_aen;
5004	}
5005	seq_num = sc->evt_detail_mem->seq_num + 1;
5006
5007	/* Register AEN with FW for latest sequence number plus 1 */
5008	class_locale.members.reserved = 0;
5009	class_locale.members.locale = MR_EVT_LOCALE_ALL;
5010	class_locale.members.class = MR_EVT_CLASS_DEBUG;
5011
5012	if (sc->aen_cmd != NULL)
5013		return;
5014
5015	mtx_lock(&sc->aen_lock);
5016	error = mrsas_register_aen(sc, seq_num,
5017	    class_locale.word);
5018	mtx_unlock(&sc->aen_lock);
5019
5020	if (error)
5021		device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
5022
5023skip_register_aen:
5024	return;
5025
5026}
5027
5028/*
5029 * mrsas_complete_aen:	Completes AEN command
5030 * input:				Adapter soft state
5031 * 						Cmd that was issued to abort another cmd
5032 *
5033 * This function will be called from ISR and will continue event processing from
5034 * thread context by enqueuing task in ev_tq (callback function
5035 * "mrsas_aen_handler").
5036 */
5037void
5038mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
5039{
5040	/*
5041	 * Don't signal app if it is just an aborted previously registered
5042	 * aen
5043	 */
5044	if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
5045		sc->mrsas_aen_triggered = 1;
5046		mtx_lock(&sc->aen_lock);
5047		if (sc->mrsas_poll_waiting) {
5048			sc->mrsas_poll_waiting = 0;
5049			selwakeup(&sc->mrsas_select);
5050		}
5051		mtx_unlock(&sc->aen_lock);
5052	} else
5053		cmd->abort_aen = 0;
5054
5055	sc->aen_cmd = NULL;
5056	mrsas_release_mfi_cmd(cmd);
5057
5058	taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
5059
5060	return;
5061}
5062
5063static device_method_t mrsas_methods[] = {
5064	DEVMETHOD(device_probe, mrsas_probe),
5065	DEVMETHOD(device_attach, mrsas_attach),
5066	DEVMETHOD(device_detach, mrsas_detach),
5067	DEVMETHOD(device_shutdown, mrsas_shutdown),
5068	DEVMETHOD(device_suspend, mrsas_suspend),
5069	DEVMETHOD(device_resume, mrsas_resume),
5070	DEVMETHOD(bus_print_child, bus_generic_print_child),
5071	DEVMETHOD(bus_driver_added, bus_generic_driver_added),
5072	{0, 0}
5073};
5074
5075static driver_t mrsas_driver = {
5076	"mrsas",
5077	mrsas_methods,
5078	sizeof(struct mrsas_softc)
5079};
5080
5081static devclass_t mrsas_devclass;
5082
5083DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
5084MODULE_DEPEND(mrsas, cam, 1, 1, 1);
5085