mfi_tbolt.c revision 261535
1 /*-
2 * Redistribution and use in source and binary forms, with or without
3 * modification, are permitted provided that the following conditions
4 * are met:
5 *
6 *            Copyright 1994-2009 The FreeBSD Project.
7 *            All rights reserved.
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 *    THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FREEBSD PROJECT OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * The views and conclusions contained in the software and documentation
28 * are those of the authors and should not be interpreted as representing
29 * official policies,either expressed or implied, of the FreeBSD Project.
30 */
31
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/mfi/mfi_tbolt.c 261535 2014-02-06 02:54:04Z markj $");
35
36#include "opt_mfi.h"
37
38#include <sys/param.h>
39#include <sys/types.h>
40#include <sys/kernel.h>
41#include <sys/selinfo.h>
42#include <sys/bus.h>
43#include <sys/conf.h>
44#include <sys/bio.h>
45#include <sys/ioccom.h>
46#include <sys/eventhandler.h>
47#include <sys/callout.h>
48#include <sys/uio.h>
49#include <machine/bus.h>
50#include <sys/sysctl.h>
51#include <sys/systm.h>
52#include <sys/malloc.h>
53
54#include <dev/mfi/mfireg.h>
55#include <dev/mfi/mfi_ioctl.h>
56#include <dev/mfi/mfivar.h>
57
58struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc *sc, struct mfi_command *);
59union mfi_mpi2_request_descriptor *
60mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index);
61void mfi_tbolt_complete_cmd(struct mfi_softc *sc);
62int mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
63    struct mfi_cmd_tbolt *cmd);
64union mfi_mpi2_request_descriptor *mfi_tbolt_build_mpt_cmd(struct mfi_softc
65    *sc, struct mfi_command *cmd);
66uint8_t
67mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd);
68union mfi_mpi2_request_descriptor *mfi_build_and_issue_cmd(struct mfi_softc
69    *sc, struct mfi_command *mfi_cmd);
70void mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
71    struct mfi_cmd_tbolt *cmd);
72static int mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command
73    *mfi_cmd, pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd);
74void
75map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
76     uint8_t ext_status);
77static void mfi_issue_pending_cmds_again (struct mfi_softc *sc);
78static void mfi_kill_hba (struct mfi_softc *sc);
79static void mfi_process_fw_state_chg_isr(void *arg);
80static void mfi_sync_map_complete(struct mfi_command *);
81static void mfi_queue_map_sync(struct mfi_softc *sc);
82
83#define MFI_FUSION_ENABLE_INTERRUPT_MASK	(0x00000008)
84
85
86extern int	mfi_polled_cmd_timeout;
87static int	mfi_fw_reset_test = 0;
88#ifdef MFI_DEBUG
89TUNABLE_INT("hw.mfi.fw_reset_test", &mfi_fw_reset_test);
90SYSCTL_INT(_hw_mfi, OID_AUTO, fw_reset_test, CTLFLAG_RWTUN, &mfi_fw_reset_test,
91           0, "Force a firmware reset condition");
92#endif
93
94void
95mfi_tbolt_enable_intr_ppc(struct mfi_softc *sc)
96{
97	MFI_WRITE4(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
98	MFI_READ4(sc, MFI_OMSK);
99}
100
101void
102mfi_tbolt_disable_intr_ppc(struct mfi_softc *sc)
103{
104	MFI_WRITE4(sc, MFI_OMSK, 0xFFFFFFFF);
105	MFI_READ4(sc, MFI_OMSK);
106}
107
108int32_t
109mfi_tbolt_read_fw_status_ppc(struct mfi_softc *sc)
110{
111	return MFI_READ4(sc, MFI_OSP0);
112}
113
114int32_t
115mfi_tbolt_check_clear_intr_ppc(struct mfi_softc *sc)
116{
117	int32_t status, mfi_status = 0;
118
119	status = MFI_READ4(sc, MFI_OSTS);
120
121	if (status & 1) {
122		MFI_WRITE4(sc, MFI_OSTS, status);
123		MFI_READ4(sc, MFI_OSTS);
124		if (status & MFI_STATE_CHANGE_INTERRUPT) {
125			mfi_status |= MFI_FIRMWARE_STATE_CHANGE;
126		}
127
128		return mfi_status;
129	}
130	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
131		return 1;
132
133	MFI_READ4(sc, MFI_OSTS);
134	return 0;
135}
136
137
138void
139mfi_tbolt_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
140   uint32_t frame_cnt)
141{
142	bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
143	    << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
144	MFI_WRITE4(sc, MFI_IQPL, (uint32_t)bus_add);
145	MFI_WRITE4(sc, MFI_IQPH, (uint32_t)((uint64_t)bus_add >> 32));
146}
147
148/*
149 * mfi_tbolt_adp_reset - For controller reset
150 * @regs: MFI register set
151 */
152int
153mfi_tbolt_adp_reset(struct mfi_softc *sc)
154{
155	int retry = 0, i = 0;
156	int HostDiag;
157
158	MFI_WRITE4(sc, MFI_WSR, 0xF);
159	MFI_WRITE4(sc, MFI_WSR, 4);
160	MFI_WRITE4(sc, MFI_WSR, 0xB);
161	MFI_WRITE4(sc, MFI_WSR, 2);
162	MFI_WRITE4(sc, MFI_WSR, 7);
163	MFI_WRITE4(sc, MFI_WSR, 0xD);
164
165	for (i = 0; i < 10000; i++) ;
166
167	HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
168
169	while (!( HostDiag & DIAG_WRITE_ENABLE)) {
170		for (i = 0; i < 1000; i++);
171		HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
172		device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, "
173		    "hostdiag=%#x\n", retry, HostDiag);
174
175		if (retry++ >= 100)
176			return 1;
177	}
178
179	device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: HostDiag=%#x\n", HostDiag);
180
181	MFI_WRITE4(sc, MFI_HDR, (HostDiag | DIAG_RESET_ADAPTER));
182
183	for (i=0; i < 10; i++) {
184		for (i = 0; i < 10000; i++);
185	}
186
187	HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
188	while (HostDiag & DIAG_RESET_ADAPTER) {
189		for (i = 0; i < 1000; i++) ;
190		HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
191		device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, "
192		    "hostdiag=%#x\n", retry, HostDiag);
193
194		if (retry++ >= 1000)
195			return 1;
196	}
197	return 0;
198}
199
200/*
201 * This routine initialize Thunderbolt specific device information
202 */
203void
204mfi_tbolt_init_globals(struct mfi_softc *sc)
205{
206	/* Initialize single reply size and Message size */
207	sc->reply_size = MEGASAS_THUNDERBOLT_REPLY_SIZE;
208	sc->raid_io_msg_size = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
209
210	/*
211	 * Calculating how many SGEs allowed in a allocated main message
212	 * (size of the Message - Raid SCSI IO message size(except SGE))
213	 * / size of SGE
214	 * (0x100 - (0x90 - 0x10)) / 0x10 = 8
215	 */
216	sc->max_SGEs_in_main_message =
217	    (uint8_t)((sc->raid_io_msg_size
218	    - (sizeof(struct mfi_mpi2_request_raid_scsi_io)
219	    - sizeof(MPI2_SGE_IO_UNION))) / sizeof(MPI2_SGE_IO_UNION));
220	/*
221	 * (Command frame size allocaed in SRB ext - Raid SCSI IO message size)
222	 * / size of SGL ;
223	 * (1280 - 256) / 16 = 64
224	 */
225	sc->max_SGEs_in_chain_message = (MR_COMMAND_SIZE
226	    - sc->raid_io_msg_size) / sizeof(MPI2_SGE_IO_UNION);
227	/*
228	 * (0x08-1) + 0x40 = 0x47 - 0x01 = 0x46  one is left for command
229	 * colscing
230	*/
231	sc->mfi_max_sge = (sc->max_SGEs_in_main_message - 1)
232	    + sc->max_SGEs_in_chain_message - 1;
233	/*
234	* This is the offset in number of 4 * 32bit words to the next chain
235	* (0x100 - 0x10)/0x10 = 0xF(15)
236	*/
237	sc->chain_offset_value_for_main_message = (sc->raid_io_msg_size
238	    - sizeof(MPI2_SGE_IO_UNION))/16;
239	sc->chain_offset_value_for_mpt_ptmsg
240	    = offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL)/16;
241	sc->mfi_cmd_pool_tbolt = NULL;
242	sc->request_desc_pool = NULL;
243}
244
245/*
246 * This function calculates the memory requirement for Thunderbolt
247 * controller, returns the total required memory in bytes
248 */
249
250uint32_t
251mfi_tbolt_get_memory_requirement(struct mfi_softc *sc)
252{
253	uint32_t size;
254	size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT;	/* for Alignment */
255	size += sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1);
256	size += sc->reply_size * sc->mfi_max_fw_cmds;
257	/* this is for SGL's */
258	size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->mfi_max_fw_cmds;
259	return size;
260}
261
262/*
263 * Description:
264 *      This function will prepare message pools for the Thunderbolt controller
265 * Arguments:
266 *      DevExt - HBA miniport driver's adapter data storage structure
267 *      pMemLocation - start of the memory allocated for Thunderbolt.
268 * Return Value:
269 *      TRUE if successful
270 *      FALSE if failed
271 */
272int
273mfi_tbolt_init_desc_pool(struct mfi_softc *sc, uint8_t* mem_location,
274    uint32_t tbolt_contg_length)
275{
276	uint32_t     offset = 0;
277	uint8_t      *addr = mem_location;
278
279	/* Request Descriptor Base physical Address */
280
281	/* For Request Decriptors Virtual Memory */
282	/* Initialise the aligned IO Frames Virtual Memory Pointer */
283	if (((uintptr_t)addr) & (0xFF)) {
284		addr = &addr[sc->raid_io_msg_size];
285		addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
286		sc->request_message_pool_align = addr;
287	} else
288		sc->request_message_pool_align = addr;
289
290	offset = sc->request_message_pool_align - sc->request_message_pool;
291	sc->request_msg_busaddr = sc->mfi_tb_busaddr + offset;
292
293	/* DJA XXX should this be bus dma ??? */
294	/* Skip request message pool */
295	addr = &addr[sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1)];
296	/* Reply Frame Pool is initialized */
297	sc->reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
298	if (((uintptr_t)addr) & (0xFF)) {
299		addr = &addr[sc->reply_size];
300		addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
301	}
302	sc->reply_frame_pool_align
303		    = (struct mfi_mpi2_reply_header *)addr;
304
305	offset = (uintptr_t)sc->reply_frame_pool_align
306	    - (uintptr_t)sc->request_message_pool;
307	sc->reply_frame_busaddr = sc->mfi_tb_busaddr + offset;
308
309	/* Skip Reply Frame Pool */
310	addr += sc->reply_size * sc->mfi_max_fw_cmds;
311	sc->reply_pool_limit = addr;
312
313	/* initializing reply address to 0xFFFFFFFF */
314	memset((uint8_t *)sc->reply_frame_pool, 0xFF,
315	       (sc->reply_size * sc->mfi_max_fw_cmds));
316
317	offset = sc->reply_size * sc->mfi_max_fw_cmds;
318	sc->sg_frame_busaddr = sc->reply_frame_busaddr + offset;
319	/* initialize the last_reply_idx to 0 */
320	sc->last_reply_idx = 0;
321	MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1);
322	MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
323	offset = (sc->sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
324	    sc->mfi_max_fw_cmds)) - sc->mfi_tb_busaddr;
325	if (offset > tbolt_contg_length)
326		device_printf(sc->mfi_dev, "Error:Initialized more than "
327		    "allocated\n");
328	return 0;
329}
330
331/*
332 * This routine prepare and issue INIT2 frame to the Firmware
333 */
334
335int
336mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
337{
338	struct MPI2_IOC_INIT_REQUEST   *mpi2IocInit;
339	struct mfi_init_frame		*mfi_init;
340	uintptr_t			offset = 0;
341	bus_addr_t			phyAddress;
342	MFI_ADDRESS			*mfiAddressTemp;
343	struct mfi_command		*cm, cmd_tmp;
344	int error;
345
346	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
347
348	/* Check if initialization is already completed */
349	if (sc->MFA_enabled) {
350		device_printf(sc->mfi_dev, "tbolt_init already initialised!\n");
351		return 1;
352	}
353
354	if ((cm = mfi_dequeue_free(sc)) == NULL) {
355		device_printf(sc->mfi_dev, "tbolt_init failed to get command "
356		    " entry!\n");
357		return (EBUSY);
358	}
359
360	cmd_tmp.cm_frame = cm->cm_frame;
361	cmd_tmp.cm_frame_busaddr = cm->cm_frame_busaddr;
362	cmd_tmp.cm_dmamap = cm->cm_dmamap;
363
364	cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_tb_init);
365	cm->cm_frame_busaddr = sc->mfi_tb_init_busaddr;
366	cm->cm_dmamap = sc->mfi_tb_init_dmamap;
367	cm->cm_frame->header.context = 0;
368
369	/*
370	 * Abuse the SG list area of the frame to hold the init_qinfo
371	 * object;
372	 */
373	mfi_init = &cm->cm_frame->init;
374
375	mpi2IocInit = (struct MPI2_IOC_INIT_REQUEST *)sc->mfi_tb_ioc_init_desc;
376	bzero(mpi2IocInit, sizeof(struct MPI2_IOC_INIT_REQUEST));
377	mpi2IocInit->Function  = MPI2_FUNCTION_IOC_INIT;
378	mpi2IocInit->WhoInit   = MPI2_WHOINIT_HOST_DRIVER;
379
380	/* set MsgVersion and HeaderVersion host driver was built with */
381	mpi2IocInit->MsgVersion = MPI2_VERSION;
382	mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
383	mpi2IocInit->SystemRequestFrameSize = sc->raid_io_msg_size/4;
384	mpi2IocInit->ReplyDescriptorPostQueueDepth
385	    = (uint16_t)sc->mfi_max_fw_cmds;
386	mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
387
388	/* Get physical address of reply frame pool */
389	offset = (uintptr_t) sc->reply_frame_pool_align
390	    - (uintptr_t)sc->request_message_pool;
391	phyAddress = sc->mfi_tb_busaddr + offset;
392	mfiAddressTemp =
393	    (MFI_ADDRESS *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
394	mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
395	mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
396
397	/* Get physical address of request message pool */
398	offset = sc->request_message_pool_align - sc->request_message_pool;
399	phyAddress =  sc->mfi_tb_busaddr + offset;
400	mfiAddressTemp = (MFI_ADDRESS *)&mpi2IocInit->SystemRequestFrameBaseAddress;
401	mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
402	mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
403	mpi2IocInit->ReplyFreeQueueAddress =  0; /* Not supported by MR. */
404	mpi2IocInit->TimeStamp = time_uptime;
405
406	if (sc->verbuf) {
407		snprintf((char *)sc->verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
408                MEGASAS_VERSION);
409		mfi_init->driver_ver_lo = (uint32_t)sc->verbuf_h_busaddr;
410		mfi_init->driver_ver_hi =
411		    (uint32_t)((uint64_t)sc->verbuf_h_busaddr >> 32);
412	}
413	/* Get the physical address of the mpi2 ioc init command */
414	phyAddress =  sc->mfi_tb_ioc_init_busaddr;
415	mfi_init->qinfo_new_addr_lo = (uint32_t)phyAddress;
416	mfi_init->qinfo_new_addr_hi = (uint32_t)((uint64_t)phyAddress >> 32);
417	mfi_init->header.flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
418
419	mfi_init->header.cmd = MFI_CMD_INIT;
420	mfi_init->header.data_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
421	mfi_init->header.cmd_status = MFI_STAT_INVALID_STATUS;
422
423	cm->cm_data = NULL;
424	cm->cm_flags |= MFI_CMD_POLLED;
425	cm->cm_timestamp = time_uptime;
426	if ((error = mfi_mapcmd(sc, cm)) != 0) {
427		device_printf(sc->mfi_dev, "failed to send IOC init2 "
428		    "command %d at %lx\n", error, (long)cm->cm_frame_busaddr);
429		goto out;
430	}
431
432	if (mfi_init->header.cmd_status == MFI_STAT_OK) {
433		sc->MFA_enabled = 1;
434	} else {
435		device_printf(sc->mfi_dev, "Init command Failed %#x\n",
436		    mfi_init->header.cmd_status);
437		error = mfi_init->header.cmd_status;
438		goto out;
439	}
440
441out:
442	cm->cm_frame = cmd_tmp.cm_frame;
443	cm->cm_frame_busaddr = cmd_tmp.cm_frame_busaddr;
444	cm->cm_dmamap = cmd_tmp.cm_dmamap;
445	mfi_release_command(cm);
446
447	return (error);
448
449}
450
451int
452mfi_tbolt_alloc_cmd(struct mfi_softc *sc)
453{
454	struct mfi_cmd_tbolt *cmd;
455	bus_addr_t io_req_base_phys;
456	uint8_t *io_req_base;
457	int i = 0, j = 0, offset = 0;
458
459	/*
460	 * sc->mfi_cmd_pool_tbolt is an array of struct mfi_cmd_tbolt pointers.
461	 * Allocate the dynamic array first and then allocate individual
462	 * commands.
463	 */
464	sc->request_desc_pool = malloc(sizeof(
465	    union mfi_mpi2_request_descriptor) * sc->mfi_max_fw_cmds,
466	    M_MFIBUF, M_NOWAIT|M_ZERO);
467
468	if (sc->request_desc_pool == NULL) {
469		device_printf(sc->mfi_dev, "Could not alloc "
470		    "memory for request_desc_pool\n");
471		return (ENOMEM);
472	}
473
474	sc->mfi_cmd_pool_tbolt = malloc(sizeof(struct mfi_cmd_tbolt*)
475	    * sc->mfi_max_fw_cmds, M_MFIBUF, M_NOWAIT|M_ZERO);
476
477	if (sc->mfi_cmd_pool_tbolt == NULL) {
478		free(sc->request_desc_pool, M_MFIBUF);
479		device_printf(sc->mfi_dev, "Could not alloc "
480		    "memory for cmd_pool_tbolt\n");
481		return (ENOMEM);
482	}
483
484	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
485		sc->mfi_cmd_pool_tbolt[i] = malloc(sizeof(
486		    struct mfi_cmd_tbolt),M_MFIBUF, M_NOWAIT|M_ZERO);
487
488		if (!sc->mfi_cmd_pool_tbolt[i]) {
489			device_printf(sc->mfi_dev, "Could not alloc "
490			    "cmd_pool_tbolt entry\n");
491
492			for (j = 0; j < i; j++)
493				free(sc->mfi_cmd_pool_tbolt[j], M_MFIBUF);
494
495			free(sc->request_desc_pool, M_MFIBUF);
496			sc->request_desc_pool = NULL;
497			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
498			sc->mfi_cmd_pool_tbolt = NULL;
499
500			return (ENOMEM);
501		}
502	}
503
504	/*
505	 * The first 256 bytes (SMID 0) is not used. Don't add to the cmd
506	 * list
507	 */
508	io_req_base = sc->request_message_pool_align
509		+ MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
510	io_req_base_phys = sc->request_msg_busaddr
511		+ MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
512
513	/*
514	 * Add all the commands to command pool (instance->cmd_pool)
515	 */
516	/* SMID 0 is reserved. Set SMID/index from 1 */
517
518	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
519		cmd = sc->mfi_cmd_pool_tbolt[i];
520		offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
521		cmd->index = i + 1;
522		cmd->request_desc = (union mfi_mpi2_request_descriptor *)
523		    (sc->request_desc_pool + i);
524		cmd->io_request = (struct mfi_mpi2_request_raid_scsi_io *)
525		    (io_req_base + offset);
526		cmd->io_request_phys_addr = io_req_base_phys + offset;
527		cmd->sg_frame = (MPI2_SGE_IO_UNION *)(sc->reply_pool_limit
528		    + i * MEGASAS_MAX_SZ_CHAIN_FRAME);
529		cmd->sg_frame_phys_addr = sc->sg_frame_busaddr + i
530		    * MEGASAS_MAX_SZ_CHAIN_FRAME;
531		cmd->sync_cmd_idx = sc->mfi_max_fw_cmds;
532
533		TAILQ_INSERT_TAIL(&(sc->mfi_cmd_tbolt_tqh), cmd, next);
534	}
535	return 0;
536}
537
538int
539mfi_tbolt_reset(struct mfi_softc *sc)
540{
541	uint32_t fw_state;
542
543	mtx_lock(&sc->mfi_io_lock);
544	if (sc->hw_crit_error) {
545		device_printf(sc->mfi_dev, "HW CRITICAL ERROR\n");
546		mtx_unlock(&sc->mfi_io_lock);
547		return 1;
548	}
549
550	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
551		fw_state = sc->mfi_read_fw_status(sc);
552		if ((fw_state & MFI_FWSTATE_FAULT) == MFI_FWSTATE_FAULT ||
553		    mfi_fw_reset_test) {
554			if ((sc->disableOnlineCtrlReset == 0)
555			    && (sc->adpreset == 0)) {
556				device_printf(sc->mfi_dev, "Adapter RESET "
557				    "condition is detected\n");
558				sc->adpreset = 1;
559				sc->issuepend_done = 0;
560				sc->MFA_enabled = 0;
561				sc->last_reply_idx = 0;
562				mfi_process_fw_state_chg_isr((void *) sc);
563			}
564			mtx_unlock(&sc->mfi_io_lock);
565			return 0;
566		}
567	}
568	mtx_unlock(&sc->mfi_io_lock);
569	return 1;
570}
571
572/*
573 * mfi_intr_tbolt - isr entry point
574 */
575void
576mfi_intr_tbolt(void *arg)
577{
578	struct mfi_softc *sc = (struct mfi_softc *)arg;
579
580	if (sc->mfi_check_clear_intr(sc) == 1) {
581		return;
582	}
583	if (sc->mfi_detaching)
584		return;
585	mtx_lock(&sc->mfi_io_lock);
586	mfi_tbolt_complete_cmd(sc);
587	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
588	mfi_startio(sc);
589	mtx_unlock(&sc->mfi_io_lock);
590	return;
591}
592
593/*
594 * map_cmd_status -	Maps FW cmd status to OS cmd status
595 * @cmd :		Pointer to cmd
596 * @status :		status of cmd returned by FW
597 * @ext_status :	ext status of cmd returned by FW
598 */
599
600void
601map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
602    uint8_t ext_status)
603{
604	switch (status) {
605	case MFI_STAT_OK:
606		mfi_cmd->cm_frame->header.cmd_status = MFI_STAT_OK;
607		mfi_cmd->cm_frame->dcmd.header.cmd_status = MFI_STAT_OK;
608		mfi_cmd->cm_error = MFI_STAT_OK;
609		break;
610
611	case MFI_STAT_SCSI_IO_FAILED:
612	case MFI_STAT_LD_INIT_IN_PROGRESS:
613		mfi_cmd->cm_frame->header.cmd_status = status;
614		mfi_cmd->cm_frame->header.scsi_status = ext_status;
615		mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
616		mfi_cmd->cm_frame->dcmd.header.scsi_status
617		    = ext_status;
618		break;
619
620	case MFI_STAT_SCSI_DONE_WITH_ERROR:
621		mfi_cmd->cm_frame->header.cmd_status = ext_status;
622		mfi_cmd->cm_frame->dcmd.header.cmd_status = ext_status;
623		break;
624
625	case MFI_STAT_LD_OFFLINE:
626	case MFI_STAT_DEVICE_NOT_FOUND:
627		mfi_cmd->cm_frame->header.cmd_status = status;
628		mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
629		break;
630
631	default:
632		mfi_cmd->cm_frame->header.cmd_status = status;
633		mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
634		break;
635	}
636}
637
638/*
639 * mfi_tbolt_return_cmd -	Return a cmd to free command pool
640 * @instance:		Adapter soft state
641 * @tbolt_cmd:		Tbolt command packet to be returned to free command pool
642 * @mfi_cmd:		Oning MFI command packe
643 */
644void
645mfi_tbolt_return_cmd(struct mfi_softc *sc, struct mfi_cmd_tbolt *tbolt_cmd,
646    struct mfi_command *mfi_cmd)
647{
648	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
649
650	mfi_cmd->cm_flags &= ~MFI_CMD_TBOLT;
651	mfi_cmd->cm_extra_frames = 0;
652	tbolt_cmd->sync_cmd_idx = sc->mfi_max_fw_cmds;
653
654	TAILQ_INSERT_TAIL(&sc->mfi_cmd_tbolt_tqh, tbolt_cmd, next);
655}
656
657void
658mfi_tbolt_complete_cmd(struct mfi_softc *sc)
659{
660	struct mfi_mpi2_reply_header *desc, *reply_desc;
661	struct mfi_command *cmd_mfi;	/* For MFA Cmds */
662	struct mfi_cmd_tbolt *cmd_tbolt;
663	uint16_t smid;
664	uint8_t reply_descript_type;
665	struct mfi_mpi2_request_raid_scsi_io  *scsi_io_req;
666	uint32_t status, extStatus;
667	uint16_t num_completed;
668	union desc_value val;
669	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
670
671	desc = (struct mfi_mpi2_reply_header *)
672		((uintptr_t)sc->reply_frame_pool_align
673		+ sc->last_reply_idx * sc->reply_size);
674	reply_desc = desc;
675
676	if (reply_desc == NULL) {
677		device_printf(sc->mfi_dev, "reply desc is NULL!!\n");
678		return;
679	}
680
681	reply_descript_type = reply_desc->ReplyFlags
682	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
683	if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
684		return;
685
686	num_completed = 0;
687	val.word = ((union mfi_mpi2_reply_descriptor *)desc)->words;
688
689	/* Read Reply descriptor */
690	while ((val.u.low != 0xFFFFFFFF) && (val.u.high != 0xFFFFFFFF)) {
691		smid = reply_desc->SMID;
692		if (smid == 0 || smid > sc->mfi_max_fw_cmds) {
693			device_printf(sc->mfi_dev, "smid is %d cannot "
694			    "proceed - skipping\n", smid);
695			goto next;
696		}
697		cmd_tbolt = sc->mfi_cmd_pool_tbolt[smid - 1];
698		if (cmd_tbolt->sync_cmd_idx == sc->mfi_max_fw_cmds) {
699			device_printf(sc->mfi_dev, "cmd_tbolt %p "
700			    "has invalid sync_cmd_idx=%d - skipping\n",
701			    cmd_tbolt, cmd_tbolt->sync_cmd_idx);
702			goto next;
703		}
704		cmd_mfi = &sc->mfi_commands[cmd_tbolt->sync_cmd_idx];
705		scsi_io_req = cmd_tbolt->io_request;
706
707		status = cmd_mfi->cm_frame->dcmd.header.cmd_status;
708		extStatus = cmd_mfi->cm_frame->dcmd.header.scsi_status;
709		map_tbolt_cmd_status(cmd_mfi, status, extStatus);
710
711		/* mfi_tbolt_return_cmd is handled by mfi complete / return */
712		if ((cmd_mfi->cm_flags & MFI_CMD_SCSI) != 0 &&
713		    (cmd_mfi->cm_flags & MFI_CMD_POLLED) != 0) {
714			/* polled LD/SYSPD IO command */
715			/* XXX mark okay for now DJA */
716			cmd_mfi->cm_frame->header.cmd_status = MFI_STAT_OK;
717
718		} else {
719			/* remove command from busy queue if not polled */
720			if ((cmd_mfi->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
721				mfi_remove_busy(cmd_mfi);
722
723			/* complete the command */
724			mfi_complete(sc, cmd_mfi);
725		}
726
727next:
728		sc->last_reply_idx++;
729		if (sc->last_reply_idx >= sc->mfi_max_fw_cmds) {
730			MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
731			sc->last_reply_idx = 0;
732		}
733
734		/* Set it back to all 0xfff */
735		((union mfi_mpi2_reply_descriptor*)desc)->words =
736			~((uint64_t)0x00);
737
738		num_completed++;
739
740		/* Get the next reply descriptor */
741		desc = (struct mfi_mpi2_reply_header *)
742		    ((uintptr_t)sc->reply_frame_pool_align
743		    + sc->last_reply_idx * sc->reply_size);
744		reply_desc = desc;
745		val.word = ((union mfi_mpi2_reply_descriptor*)desc)->words;
746		reply_descript_type = reply_desc->ReplyFlags
747		    & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
748		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
749			break;
750	}
751
752	if (!num_completed)
753		return;
754
755	/* update replyIndex to FW */
756	if (sc->last_reply_idx)
757		MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
758
759	return;
760}
761
762/*
763 * mfi_get_cmd -	Get a command from the free pool
764 * @instance:		Adapter soft state
765 *
766 * Returns a free command from the pool
767 */
768
769struct mfi_cmd_tbolt *
770mfi_tbolt_get_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
771{
772	struct mfi_cmd_tbolt *cmd = NULL;
773
774	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
775
776	if ((cmd = TAILQ_FIRST(&sc->mfi_cmd_tbolt_tqh)) == NULL)
777		return (NULL);
778	TAILQ_REMOVE(&sc->mfi_cmd_tbolt_tqh, cmd, next);
779	memset((uint8_t *)cmd->sg_frame, 0, MEGASAS_MAX_SZ_CHAIN_FRAME);
780	memset((uint8_t *)cmd->io_request, 0,
781	    MEGASAS_THUNDERBOLT_NEW_MSG_SIZE);
782
783	cmd->sync_cmd_idx = mfi_cmd->cm_index;
784	mfi_cmd->cm_extra_frames = cmd->index; /* Frame count used as SMID */
785	mfi_cmd->cm_flags |= MFI_CMD_TBOLT;
786
787	return cmd;
788}
789
790union mfi_mpi2_request_descriptor *
791mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index)
792{
793	uint8_t *p;
794
795	if (index >= sc->mfi_max_fw_cmds) {
796		device_printf(sc->mfi_dev, "Invalid SMID (0x%x)request "
797		    "for descriptor\n", index);
798		return NULL;
799	}
800	p = sc->request_desc_pool + sizeof(union mfi_mpi2_request_descriptor)
801	    * index;
802	memset(p, 0, sizeof(union mfi_mpi2_request_descriptor));
803	return (union mfi_mpi2_request_descriptor *)p;
804}
805
806
807/* Used to build IOCTL cmd */
808uint8_t
809mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
810{
811	MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
812	struct mfi_mpi2_request_raid_scsi_io *io_req;
813	struct mfi_cmd_tbolt *cmd;
814
815	cmd = mfi_tbolt_get_cmd(sc, mfi_cmd);
816	if (!cmd)
817		return EBUSY;
818	io_req = cmd->io_request;
819	mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
820
821	io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
822	io_req->SGLOffset0 = offsetof(struct mfi_mpi2_request_raid_scsi_io,
823	    SGL) / 4;
824	io_req->ChainOffset = sc->chain_offset_value_for_mpt_ptmsg;
825
826	mpi25_ieee_chain->Address = mfi_cmd->cm_frame_busaddr;
827
828	/*
829	  In MFI pass thru, nextChainOffset will always be zero to
830	  indicate the end of the chain.
831	*/
832	mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
833		| MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
834
835	/* setting the length to the maximum length */
836	mpi25_ieee_chain->Length = 1024;
837
838	return 0;
839}
840
841void
842mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
843    struct mfi_cmd_tbolt *cmd)
844{
845	uint32_t start_lba_lo = 0, start_lba_hi = 0, device_id;
846	struct mfi_mpi2_request_raid_scsi_io	*io_request;
847	struct IO_REQUEST_INFO io_info;
848
849	device_id = mfi_cmd->cm_frame->io.header.target_id;
850	io_request = cmd->io_request;
851	io_request->RaidContext.TargetID = device_id;
852	io_request->RaidContext.Status = 0;
853	io_request->RaidContext.exStatus = 0;
854	io_request->RaidContext.regLockFlags = 0;
855
856	start_lba_lo = mfi_cmd->cm_frame->io.lba_lo;
857	start_lba_hi = mfi_cmd->cm_frame->io.lba_hi;
858
859	memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
860	io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
861	io_info.numBlocks = mfi_cmd->cm_frame->io.header.data_len;
862	io_info.ldTgtId = device_id;
863	if ((mfi_cmd->cm_frame->header.flags & MFI_FRAME_DIR_READ) ==
864	    MFI_FRAME_DIR_READ)
865		io_info.isRead = 1;
866
867	io_request->RaidContext.timeoutValue
868		= MFI_FUSION_FP_DEFAULT_TIMEOUT;
869	io_request->Function = MPI2_FUNCTION_LD_IO_REQUEST;
870	io_request->DevHandle = device_id;
871	cmd->request_desc->header.RequestFlags
872		= (MFI_REQ_DESCRIPT_FLAGS_LD_IO
873		   << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
874	if ((io_request->IoFlags == 6) && (io_info.numBlocks == 0))
875		io_request->RaidContext.RegLockLength = 0x100;
876	io_request->DataLength = mfi_cmd->cm_frame->io.header.data_len
877	    * MFI_SECTOR_LEN;
878}
879
880int
881mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
882    struct mfi_cmd_tbolt *cmd)
883{
884	struct mfi_mpi2_request_raid_scsi_io *io_request;
885	uint32_t sge_count;
886	uint8_t cdb_len;
887	int readop;
888	u_int64_t lba;
889
890	io_request = cmd->io_request;
891	if (!(mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_READ
892	      || mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE))
893		return 1;
894
895	mfi_tbolt_build_ldio(sc, mfi_cmd, cmd);
896
897	/* Convert to SCSI command CDB */
898	bzero(io_request->CDB.CDB32, sizeof(io_request->CDB.CDB32));
899	if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
900		readop = 0;
901	else
902		readop = 1;
903
904	lba =  mfi_cmd->cm_frame->io.lba_hi;
905	lba = (lba << 32) + mfi_cmd->cm_frame->io.lba_lo;
906	cdb_len = mfi_build_cdb(readop, 0, lba,
907	    mfi_cmd->cm_frame->io.header.data_len, io_request->CDB.CDB32);
908
909	/* Just the CDB length, rest of the Flags are zero */
910	io_request->IoFlags = cdb_len;
911
912	/*
913	 * Construct SGL
914	 */
915	sge_count = mfi_tbolt_make_sgl(sc, mfi_cmd,
916	    (pMpi25IeeeSgeChain64_t) &io_request->SGL, cmd);
917	if (sge_count > sc->mfi_max_sge) {
918		device_printf(sc->mfi_dev, "Error. sge_count (0x%x) exceeds "
919		    "max (0x%x) allowed\n", sge_count, sc->mfi_max_sge);
920		return 1;
921	}
922	io_request->RaidContext.numSGE = sge_count;
923	io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
924
925	if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
926		io_request->Control = MPI2_SCSIIO_CONTROL_WRITE;
927	else
928		io_request->Control = MPI2_SCSIIO_CONTROL_READ;
929
930	io_request->SGLOffset0 = offsetof(
931	    struct mfi_mpi2_request_raid_scsi_io, SGL)/4;
932
933	io_request->SenseBufferLowAddress = mfi_cmd->cm_sense_busaddr;
934	io_request->SenseBufferLength = MFI_SENSE_LEN;
935	io_request->RaidContext.Status = MFI_STAT_INVALID_STATUS;
936	io_request->RaidContext.exStatus = MFI_STAT_INVALID_STATUS;
937
938	return 0;
939}
940
941
942static int
943mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
944		   pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd)
945{
946	uint8_t i, sg_processed, sg_to_process;
947	uint8_t sge_count, sge_idx;
948	union mfi_sgl *os_sgl;
949	pMpi25IeeeSgeChain64_t sgl_end;
950
951	/*
952	 * Return 0 if there is no data transfer
953	 */
954	if (!mfi_cmd->cm_sg || !mfi_cmd->cm_len) {
955	 	device_printf(sc->mfi_dev, "Buffer empty \n");
956		return 0;
957	}
958	os_sgl = mfi_cmd->cm_sg;
959	sge_count = mfi_cmd->cm_frame->header.sg_count;
960
961	if (sge_count > sc->mfi_max_sge) {
962		device_printf(sc->mfi_dev, "sgl ptr %p sg_cnt %d \n",
963		    os_sgl, sge_count);
964		return sge_count;
965	}
966
967	if (sge_count > sc->max_SGEs_in_main_message)
968		/* One element to store the chain info */
969		sge_idx = sc->max_SGEs_in_main_message - 1;
970	else
971		sge_idx = sge_count;
972
973	if (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)) {
974		sgl_end = sgl_ptr + (sc->max_SGEs_in_main_message - 1);
975		sgl_end->Flags = 0;
976	}
977
978	for (i = 0; i < sge_idx; i++) {
979		/*
980		 * For 32bit BSD we are getting 32 bit SGL's from OS
981		 * but FW only take 64 bit SGL's so copying from 32 bit
982		 * SGL's to 64.
983		 */
984		if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
985			sgl_ptr->Length = os_sgl->sg_skinny[i].len;
986			sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
987		} else {
988			sgl_ptr->Length = os_sgl->sg32[i].len;
989			sgl_ptr->Address = os_sgl->sg32[i].addr;
990		}
991		if (i == sge_count - 1 &&
992		    (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)))
993			sgl_ptr->Flags = MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
994		else
995			sgl_ptr->Flags = 0;
996		sgl_ptr++;
997		cmd->io_request->ChainOffset = 0;
998	}
999
1000	sg_processed = i;
1001
1002	if (sg_processed < sge_count) {
1003		pMpi25IeeeSgeChain64_t sg_chain;
1004		sg_to_process = sge_count - sg_processed;
1005		cmd->io_request->ChainOffset =
1006		    sc->chain_offset_value_for_main_message;
1007		sg_chain = sgl_ptr;
1008		/* Prepare chain element */
1009		sg_chain->NextChainOffset = 0;
1010		if (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY))
1011			sg_chain->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1012		else
1013			sg_chain->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1014			    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
1015		sg_chain->Length =  (sizeof(MPI2_SGE_IO_UNION) *
1016		    (sge_count - sg_processed));
1017		sg_chain->Address = cmd->sg_frame_phys_addr;
1018		sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->sg_frame;
1019		for (; i < sge_count; i++) {
1020			if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
1021				sgl_ptr->Length = os_sgl->sg_skinny[i].len;
1022				sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
1023			} else {
1024				sgl_ptr->Length = os_sgl->sg32[i].len;
1025				sgl_ptr->Address = os_sgl->sg32[i].addr;
1026			}
1027			if (i == sge_count - 1 &&
1028			    (sc->mfi_flags &
1029			    (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)))
1030				sgl_ptr->Flags =
1031				    MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1032			else
1033				sgl_ptr->Flags = 0;
1034			sgl_ptr++;
1035		}
1036	}
1037	return sge_count;
1038}
1039
1040union mfi_mpi2_request_descriptor *
1041mfi_build_and_issue_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
1042{
1043	struct mfi_cmd_tbolt *cmd;
1044	union mfi_mpi2_request_descriptor *req_desc = NULL;
1045	uint16_t index;
1046	cmd = mfi_tbolt_get_cmd(sc, mfi_cmd);
1047	if (cmd == NULL)
1048		return (NULL);
1049
1050	index = cmd->index;
1051	req_desc = mfi_tbolt_get_request_descriptor(sc, index-1);
1052	if (req_desc == NULL) {
1053		mfi_tbolt_return_cmd(sc, cmd, mfi_cmd);
1054		return (NULL);
1055	}
1056
1057	if (mfi_tbolt_build_io(sc, mfi_cmd, cmd) != 0) {
1058		mfi_tbolt_return_cmd(sc, cmd, mfi_cmd);
1059		return (NULL);
1060	}
1061	req_desc->header.SMID = index;
1062	return req_desc;
1063}
1064
1065union mfi_mpi2_request_descriptor *
1066mfi_tbolt_build_mpt_cmd(struct mfi_softc *sc, struct mfi_command *cmd)
1067{
1068	union mfi_mpi2_request_descriptor *req_desc = NULL;
1069	uint16_t index;
1070	if (mfi_build_mpt_pass_thru(sc, cmd)) {
1071		device_printf(sc->mfi_dev, "Couldn't build MFI pass thru "
1072		    "cmd\n");
1073		return NULL;
1074	}
1075	/* For fusion the frame_count variable is used for SMID */
1076	index = cmd->cm_extra_frames;
1077
1078	req_desc = mfi_tbolt_get_request_descriptor(sc, index - 1);
1079	if (req_desc == NULL)
1080		return NULL;
1081
1082	bzero(req_desc, sizeof(*req_desc));
1083	req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1084	    MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1085	req_desc->header.SMID = index;
1086	return req_desc;
1087}
1088
1089int
1090mfi_tbolt_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1091{
1092	struct mfi_frame_header *hdr;
1093	uint8_t *cdb;
1094	union mfi_mpi2_request_descriptor *req_desc = NULL;
1095	int tm = mfi_polled_cmd_timeout * 1000;
1096
1097	hdr = &cm->cm_frame->header;
1098	cdb = cm->cm_frame->pass.cdb;
1099	if (sc->adpreset)
1100		return 1;
1101	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1102		cm->cm_timestamp = time_uptime;
1103		mfi_enqueue_busy(cm);
1104	} else {	/* still get interrupts for it */
1105		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1106		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1107	}
1108
1109	if (hdr->cmd == MFI_CMD_PD_SCSI_IO) {
1110		/* check for inquiry commands coming from CLI */
1111		if (cdb[0] != 0x28 || cdb[0] != 0x2A) {
1112			if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) ==
1113			    NULL) {
1114				device_printf(sc->mfi_dev, "Mapping from MFI "
1115				    "to MPT Failed \n");
1116				return 1;
1117			}
1118		}
1119		else
1120			device_printf(sc->mfi_dev, "DJA NA XXX SYSPDIO\n");
1121	} else if (hdr->cmd == MFI_CMD_LD_SCSI_IO ||
1122	    hdr->cmd == MFI_CMD_LD_READ || hdr->cmd == MFI_CMD_LD_WRITE) {
1123		cm->cm_flags |= MFI_CMD_SCSI;
1124		if ((req_desc = mfi_build_and_issue_cmd(sc, cm)) == NULL) {
1125			device_printf(sc->mfi_dev, "LDIO Failed \n");
1126			return 1;
1127		}
1128	} else if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == NULL) {
1129		device_printf(sc->mfi_dev, "Mapping from MFI to MPT Failed\n");
1130		return (1);
1131	}
1132
1133	if (cm->cm_flags & MFI_CMD_SCSI) {
1134		/*
1135		 * LD IO needs to be posted since it doesn't get
1136		 * acknowledged via a status update so have the
1137		 * controller reply via mfi_tbolt_complete_cmd.
1138		 */
1139		hdr->flags &= ~MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1140	}
1141
1142	MFI_WRITE4(sc, MFI_ILQP, (req_desc->words & 0xFFFFFFFF));
1143	MFI_WRITE4(sc, MFI_IHQP, (req_desc->words >>0x20));
1144
1145	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1146		return 0;
1147
1148	/*
1149	 * This is a polled command, so busy-wait for it to complete.
1150	 *
1151	 * The value of hdr->cmd_status is updated directly by the hardware
1152	 * so there is no garantee that mfi_tbolt_complete_cmd is called
1153	 * prior to this value changing.
1154	 */
1155	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1156		DELAY(1000);
1157		tm -= 1;
1158		if (tm <= 0)
1159			break;
1160		if (cm->cm_flags & MFI_CMD_SCSI) {
1161			/*
1162			 * Force check reply queue.
1163			 * This ensures that dump works correctly
1164			 */
1165			mfi_tbolt_complete_cmd(sc);
1166		}
1167	}
1168
1169	/* ensure the command cleanup has been processed before returning */
1170	mfi_tbolt_complete_cmd(sc);
1171
1172	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1173		device_printf(sc->mfi_dev, "Frame %p timed out "
1174		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1175		return (ETIMEDOUT);
1176	}
1177	return 0;
1178}
1179
1180static void
1181mfi_issue_pending_cmds_again(struct mfi_softc *sc)
1182{
1183	struct mfi_command *cm, *tmp;
1184	struct mfi_cmd_tbolt *cmd;
1185
1186	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1187	TAILQ_FOREACH_REVERSE_SAFE(cm, &sc->mfi_busy, BUSYQ, cm_link, tmp) {
1188
1189		cm->retry_for_fw_reset++;
1190
1191		/*
1192		 * If a command has continuously been tried multiple times
1193		 * and causing a FW reset condition, no further recoveries
1194		 * should be performed on the controller
1195		 */
1196		if (cm->retry_for_fw_reset == 3) {
1197			device_printf(sc->mfi_dev, "megaraid_sas: command %p "
1198			    "index=%d was tried multiple times during adapter "
1199			    "reset - Shutting down the HBA\n", cm, cm->cm_index);
1200			mfi_kill_hba(sc);
1201			sc->hw_crit_error = 1;
1202			return;
1203		}
1204
1205		mfi_remove_busy(cm);
1206		if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
1207			if (cm->cm_extra_frames != 0 && cm->cm_extra_frames <=
1208			    sc->mfi_max_fw_cmds) {
1209				cmd = sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1];
1210				mfi_tbolt_return_cmd(sc, cmd, cm);
1211			} else {
1212				device_printf(sc->mfi_dev,
1213				    "Invalid extra_frames: %d detected\n",
1214				    cm->cm_extra_frames);
1215			}
1216		}
1217
1218		if (cm->cm_frame->dcmd.opcode != MFI_DCMD_CTRL_EVENT_WAIT) {
1219			device_printf(sc->mfi_dev,
1220			    "APJ ****requeue command %p index=%d\n",
1221			    cm, cm->cm_index);
1222			mfi_requeue_ready(cm);
1223		} else
1224			mfi_release_command(cm);
1225	}
1226	mfi_startio(sc);
1227}
1228
1229static void
1230mfi_kill_hba(struct mfi_softc *sc)
1231{
1232	if (sc->mfi_flags & MFI_FLAGS_TBOLT)
1233		MFI_WRITE4(sc, 0x00, MFI_STOP_ADP);
1234	else
1235		MFI_WRITE4(sc, MFI_IDB, MFI_STOP_ADP);
1236}
1237
1238static void
1239mfi_process_fw_state_chg_isr(void *arg)
1240{
1241	struct mfi_softc *sc= (struct mfi_softc *)arg;
1242	int error, status;
1243
1244	if (sc->adpreset == 1) {
1245		device_printf(sc->mfi_dev, "First stage of FW reset "
1246		     "initiated...\n");
1247
1248		sc->mfi_adp_reset(sc);
1249		sc->mfi_enable_intr(sc);
1250
1251		device_printf(sc->mfi_dev, "First stage of reset complete, "
1252		    "second stage initiated...\n");
1253
1254		sc->adpreset = 2;
1255
1256		/* waiting for about 20 second before start the second init */
1257		for (int wait = 0; wait < 20000; wait++)
1258			DELAY(1000);
1259		device_printf(sc->mfi_dev, "Second stage of FW reset "
1260		     "initiated...\n");
1261		while ((status = MFI_READ4(sc, MFI_RSR)) & 0x04);
1262
1263		sc->mfi_disable_intr(sc);
1264
1265		/* We expect the FW state to be READY */
1266		if (mfi_transition_firmware(sc)) {
1267			device_printf(sc->mfi_dev, "controller is not in "
1268			    "ready state\n");
1269			mfi_kill_hba(sc);
1270			sc->hw_crit_error = 1;
1271			return;
1272		}
1273		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
1274			device_printf(sc->mfi_dev, "Failed to initialise MFI "
1275			    "queue\n");
1276			mfi_kill_hba(sc);
1277			sc->hw_crit_error = 1;
1278			return;
1279		}
1280
1281		/* Init last reply index and max */
1282		MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1);
1283		MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
1284
1285		sc->mfi_enable_intr(sc);
1286		sc->adpreset = 0;
1287		if (sc->mfi_aen_cm != NULL) {
1288			free(sc->mfi_aen_cm->cm_data, M_MFIBUF);
1289			mfi_remove_busy(sc->mfi_aen_cm);
1290			mfi_release_command(sc->mfi_aen_cm);
1291			sc->mfi_aen_cm = NULL;
1292		}
1293
1294		if (sc->mfi_map_sync_cm != NULL) {
1295			mfi_remove_busy(sc->mfi_map_sync_cm);
1296			mfi_release_command(sc->mfi_map_sync_cm);
1297			sc->mfi_map_sync_cm = NULL;
1298		}
1299		mfi_issue_pending_cmds_again(sc);
1300
1301		/*
1302		 * Issue pending command can result in adapter being marked
1303		 * dead because of too many re-tries. Check for that
1304		 * condition before clearing the reset condition on the FW
1305		 */
1306		if (!sc->hw_crit_error) {
1307			/*
1308			 * Initiate AEN (Asynchronous Event Notification) &
1309			 * Sync Map
1310			 */
1311			mfi_aen_setup(sc, sc->last_seq_num);
1312			mfi_tbolt_sync_map_info(sc);
1313
1314			sc->issuepend_done = 1;
1315			device_printf(sc->mfi_dev, "second stage of reset "
1316			    "complete, FW is ready now.\n");
1317		} else {
1318			device_printf(sc->mfi_dev, "second stage of reset "
1319			     "never completed, hba was marked offline.\n");
1320		}
1321	} else {
1322		device_printf(sc->mfi_dev, "mfi_process_fw_state_chg_isr "
1323		    "called with unhandled value:%d\n", sc->adpreset);
1324	}
1325}
1326
1327/*
1328 * The ThunderBolt HW has an option for the driver to directly
1329 * access the underlying disks and operate on the RAID.  To
1330 * do this there needs to be a capability to keep the RAID controller
1331 * and driver in sync.  The FreeBSD driver does not take advantage
1332 * of this feature since it adds a lot of complexity and slows down
1333 * performance.  Performance is gained by using the controller's
1334 * cache etc.
1335 *
1336 * Even though this driver doesn't access the disks directly, an
1337 * AEN like command is used to inform the RAID firmware to "sync"
1338 * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command.  This
1339 * command in write mode will return when the RAID firmware has
1340 * detected a change to the RAID state.  Examples of this type
1341 * of change are removing a disk.  Once the command returns then
1342 * the driver needs to acknowledge this and "sync" all LD's again.
1343 * This repeats until we shutdown.  Then we need to cancel this
1344 * pending command.
1345 *
1346 * If this is not done right the RAID firmware will not remove a
1347 * pulled drive and the RAID won't go degraded etc.  Effectively,
1348 * stopping any RAID mangement to functions.
1349 *
1350 * Doing another LD sync, requires the use of an event since the
1351 * driver needs to do a mfi_wait_command and can't do that in an
1352 * interrupt thread.
1353 *
1354 * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO
1355 * That requires a bunch of structure and it is simplier to just do
1356 * the MFI_DCMD_LD_GET_LIST versus walking the RAID map.
1357 */
1358
1359void
1360mfi_tbolt_sync_map_info(struct mfi_softc *sc)
1361{
1362	int error = 0, i;
1363	struct mfi_command *cmd = NULL;
1364	struct mfi_dcmd_frame *dcmd = NULL;
1365	uint32_t context = 0;
1366	union mfi_ld_ref *ld_sync = NULL;
1367	size_t ld_size;
1368	struct mfi_frame_header *hdr;
1369	struct mfi_command *cm = NULL;
1370	struct mfi_ld_list *list = NULL;
1371
1372	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1373
1374	if (sc->mfi_map_sync_cm != NULL || sc->cm_map_abort)
1375		return;
1376
1377	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1378	    (void **)&list, sizeof(*list));
1379	if (error)
1380		goto out;
1381
1382	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAIN;
1383
1384	if (mfi_wait_command(sc, cm) != 0) {
1385		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1386		goto out;
1387	}
1388
1389	hdr = &cm->cm_frame->header;
1390	if (hdr->cmd_status != MFI_STAT_OK) {
1391		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1392			      hdr->cmd_status);
1393		goto out;
1394	}
1395
1396	ld_size = sizeof(*ld_sync) * list->ld_count;
1397	ld_sync = (union mfi_ld_ref *) malloc(ld_size, M_MFIBUF,
1398	     M_NOWAIT | M_ZERO);
1399	if (ld_sync == NULL) {
1400		device_printf(sc->mfi_dev, "Failed to allocate sync\n");
1401		goto out;
1402	}
1403	for (i = 0; i < list->ld_count; i++)
1404		ld_sync[i].ref = list->ld_list[i].ld.ref;
1405
1406	if ((cmd = mfi_dequeue_free(sc)) == NULL) {
1407		device_printf(sc->mfi_dev, "Failed to get command\n");
1408		free(ld_sync, M_MFIBUF);
1409		goto out;
1410	}
1411
1412	context = cmd->cm_frame->header.context;
1413	bzero(cmd->cm_frame, sizeof(union mfi_frame));
1414	cmd->cm_frame->header.context = context;
1415
1416	dcmd = &cmd->cm_frame->dcmd;
1417	bzero(dcmd->mbox, MFI_MBOX_SIZE);
1418	dcmd->header.cmd = MFI_CMD_DCMD;
1419	dcmd->header.flags = MFI_FRAME_DIR_WRITE;
1420	dcmd->header.timeout = 0;
1421	dcmd->header.data_len = ld_size;
1422	dcmd->header.scsi_status = 0;
1423	dcmd->opcode = MFI_DCMD_LD_MAP_GET_INFO;
1424	cmd->cm_sg = &dcmd->sgl;
1425	cmd->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1426	cmd->cm_data = ld_sync;
1427	cmd->cm_private = ld_sync;
1428
1429	cmd->cm_len = ld_size;
1430	cmd->cm_complete = mfi_sync_map_complete;
1431	sc->mfi_map_sync_cm = cmd;
1432
1433	cmd->cm_flags = MFI_CMD_DATAOUT;
1434	cmd->cm_frame->dcmd.mbox[0] = list->ld_count;
1435	cmd->cm_frame->dcmd.mbox[1] = MFI_DCMD_MBOX_PEND_FLAG;
1436
1437	if ((error = mfi_mapcmd(sc, cmd)) != 0) {
1438		device_printf(sc->mfi_dev, "failed to send map sync\n");
1439		free(ld_sync, M_MFIBUF);
1440		sc->mfi_map_sync_cm = NULL;
1441		mfi_release_command(cmd);
1442		goto out;
1443	}
1444
1445out:
1446	if (list)
1447		free(list, M_MFIBUF);
1448	if (cm)
1449		mfi_release_command(cm);
1450}
1451
1452static void
1453mfi_sync_map_complete(struct mfi_command *cm)
1454{
1455	struct mfi_frame_header *hdr;
1456	struct mfi_softc *sc;
1457	int aborted = 0;
1458
1459	sc = cm->cm_sc;
1460	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1461
1462	hdr = &cm->cm_frame->header;
1463
1464	if (sc->mfi_map_sync_cm == NULL)
1465		return;
1466
1467	if (sc->cm_map_abort ||
1468	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1469		sc->cm_map_abort = 0;
1470		aborted = 1;
1471	}
1472
1473	free(cm->cm_data, M_MFIBUF);
1474	wakeup(&sc->mfi_map_sync_cm);
1475	sc->mfi_map_sync_cm = NULL;
1476	mfi_release_command(cm);
1477
1478	/* set it up again so the driver can catch more events */
1479	if (!aborted)
1480		mfi_queue_map_sync(sc);
1481}
1482
1483static void
1484mfi_queue_map_sync(struct mfi_softc *sc)
1485{
1486	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1487	taskqueue_enqueue(taskqueue_swi, &sc->mfi_map_sync_task);
1488}
1489
1490void
1491mfi_handle_map_sync(void *context, int pending)
1492{
1493	struct mfi_softc *sc;
1494
1495	sc = context;
1496	mtx_lock(&sc->mfi_io_lock);
1497	mfi_tbolt_sync_map_info(sc);
1498	mtx_unlock(&sc->mfi_io_lock);
1499}
1500