1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 *            Copyright 1994-2009 The FreeBSD Project.
9 *            All rights reserved.
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 *    THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FREEBSD PROJECT OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
25 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * The views and conclusions contained in the software and documentation
30 * are those of the authors and should not be interpreted as representing
31 * official policies,either expressed or implied, of the FreeBSD Project.
32 */
33
34#include <sys/cdefs.h>
35#include "opt_mfi.h"
36
37#include <sys/param.h>
38#include <sys/types.h>
39#include <sys/kernel.h>
40#include <sys/selinfo.h>
41#include <sys/bus.h>
42#include <sys/conf.h>
43#include <sys/bio.h>
44#include <sys/ioccom.h>
45#include <sys/eventhandler.h>
46#include <sys/callout.h>
47#include <sys/uio.h>
48#include <machine/bus.h>
49#include <sys/sysctl.h>
50#include <sys/systm.h>
51#include <sys/malloc.h>
52
53#include <dev/mfi/mfireg.h>
54#include <dev/mfi/mfi_ioctl.h>
55#include <dev/mfi/mfivar.h>
56
57struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc *sc, struct mfi_command *);
58union mfi_mpi2_request_descriptor *
59mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index);
60void mfi_tbolt_complete_cmd(struct mfi_softc *sc);
61int mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
62    struct mfi_cmd_tbolt *cmd);
63union mfi_mpi2_request_descriptor *mfi_tbolt_build_mpt_cmd(struct mfi_softc
64    *sc, struct mfi_command *cmd);
65uint8_t
66mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd);
67union mfi_mpi2_request_descriptor *mfi_build_and_issue_cmd(struct mfi_softc
68    *sc, struct mfi_command *mfi_cmd);
69void mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
70    struct mfi_cmd_tbolt *cmd);
71static int mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command
72    *mfi_cmd, pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd);
73void
74map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
75     uint8_t ext_status);
76static void mfi_issue_pending_cmds_again (struct mfi_softc *sc);
77static void mfi_kill_hba (struct mfi_softc *sc);
78static void mfi_process_fw_state_chg_isr(void *arg);
79static void mfi_sync_map_complete(struct mfi_command *);
80static void mfi_queue_map_sync(struct mfi_softc *sc);
81
82#define MFI_FUSION_ENABLE_INTERRUPT_MASK	(0x00000008)
83
84extern int	mfi_polled_cmd_timeout;
85static int	mfi_fw_reset_test = 0;
86#ifdef MFI_DEBUG
87SYSCTL_INT(_hw_mfi, OID_AUTO, fw_reset_test, CTLFLAG_RWTUN, &mfi_fw_reset_test,
88           0, "Force a firmware reset condition");
89#endif
90
91void
92mfi_tbolt_enable_intr_ppc(struct mfi_softc *sc)
93{
94	MFI_WRITE4(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
95	MFI_READ4(sc, MFI_OMSK);
96}
97
98void
99mfi_tbolt_disable_intr_ppc(struct mfi_softc *sc)
100{
101	MFI_WRITE4(sc, MFI_OMSK, 0xFFFFFFFF);
102	MFI_READ4(sc, MFI_OMSK);
103}
104
105int32_t
106mfi_tbolt_read_fw_status_ppc(struct mfi_softc *sc)
107{
108	return MFI_READ4(sc, MFI_OSP0);
109}
110
111int32_t
112mfi_tbolt_check_clear_intr_ppc(struct mfi_softc *sc)
113{
114	int32_t status, mfi_status = 0;
115
116	status = MFI_READ4(sc, MFI_OSTS);
117
118	if (status & 1) {
119		MFI_WRITE4(sc, MFI_OSTS, status);
120		MFI_READ4(sc, MFI_OSTS);
121		if (status & MFI_STATE_CHANGE_INTERRUPT) {
122			mfi_status |= MFI_FIRMWARE_STATE_CHANGE;
123		}
124
125		return mfi_status;
126	}
127	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
128		return 1;
129
130	MFI_READ4(sc, MFI_OSTS);
131	return 0;
132}
133
134void
135mfi_tbolt_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
136   uint32_t frame_cnt)
137{
138	bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
139	    << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
140	MFI_WRITE4(sc, MFI_IQPL, (uint32_t)bus_add);
141	MFI_WRITE4(sc, MFI_IQPH, (uint32_t)((uint64_t)bus_add >> 32));
142}
143
144/*
145 * mfi_tbolt_adp_reset - For controller reset
146 * @regs: MFI register set
147 */
148int
149mfi_tbolt_adp_reset(struct mfi_softc *sc)
150{
151	int retry = 0, i = 0;
152	int HostDiag;
153
154	MFI_WRITE4(sc, MFI_WSR, 0xF);
155	MFI_WRITE4(sc, MFI_WSR, 4);
156	MFI_WRITE4(sc, MFI_WSR, 0xB);
157	MFI_WRITE4(sc, MFI_WSR, 2);
158	MFI_WRITE4(sc, MFI_WSR, 7);
159	MFI_WRITE4(sc, MFI_WSR, 0xD);
160
161	for (i = 0; i < 10000; i++) ;
162
163	HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
164
165	while (!( HostDiag & DIAG_WRITE_ENABLE)) {
166		for (i = 0; i < 1000; i++);
167		HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
168		device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, "
169		    "hostdiag=%#x\n", retry, HostDiag);
170
171		if (retry++ >= 100)
172			return 1;
173	}
174
175	device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: HostDiag=%#x\n", HostDiag);
176
177	MFI_WRITE4(sc, MFI_HDR, (HostDiag | DIAG_RESET_ADAPTER));
178
179	for (i=0; i < 10; i++) {
180		for (i = 0; i < 10000; i++);
181	}
182
183	HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
184	while (HostDiag & DIAG_RESET_ADAPTER) {
185		for (i = 0; i < 1000; i++) ;
186		HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
187		device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, "
188		    "hostdiag=%#x\n", retry, HostDiag);
189
190		if (retry++ >= 1000)
191			return 1;
192	}
193	return 0;
194}
195
196/*
197 * This routine initialize Thunderbolt specific device information
198 */
199void
200mfi_tbolt_init_globals(struct mfi_softc *sc)
201{
202	/* Initialize single reply size and Message size */
203	sc->reply_size = MEGASAS_THUNDERBOLT_REPLY_SIZE;
204	sc->raid_io_msg_size = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
205
206	/*
207	 * Calculating how many SGEs allowed in a allocated main message
208	 * (size of the Message - Raid SCSI IO message size(except SGE))
209	 * / size of SGE
210	 * (0x100 - (0x90 - 0x10)) / 0x10 = 8
211	 */
212	sc->max_SGEs_in_main_message =
213	    (uint8_t)((sc->raid_io_msg_size
214	    - (sizeof(struct mfi_mpi2_request_raid_scsi_io)
215	    - sizeof(MPI2_SGE_IO_UNION))) / sizeof(MPI2_SGE_IO_UNION));
216	/*
217	 * (Command frame size allocaed in SRB ext - Raid SCSI IO message size)
218	 * / size of SGL ;
219	 * (1280 - 256) / 16 = 64
220	 */
221	sc->max_SGEs_in_chain_message = (MR_COMMAND_SIZE
222	    - sc->raid_io_msg_size) / sizeof(MPI2_SGE_IO_UNION);
223	/*
224	 * (0x08-1) + 0x40 = 0x47 - 0x01 = 0x46  one is left for command
225	 * colscing
226	*/
227	sc->mfi_max_sge = (sc->max_SGEs_in_main_message - 1)
228	    + sc->max_SGEs_in_chain_message - 1;
229	/*
230	* This is the offset in number of 4 * 32bit words to the next chain
231	* (0x100 - 0x10)/0x10 = 0xF(15)
232	*/
233	sc->chain_offset_value_for_main_message = (sc->raid_io_msg_size
234	    - sizeof(MPI2_SGE_IO_UNION))/16;
235	sc->chain_offset_value_for_mpt_ptmsg
236	    = offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL)/16;
237	sc->mfi_cmd_pool_tbolt = NULL;
238	sc->request_desc_pool = NULL;
239}
240
241/*
242 * This function calculates the memory requirement for Thunderbolt
243 * controller, returns the total required memory in bytes
244 */
245
246uint32_t
247mfi_tbolt_get_memory_requirement(struct mfi_softc *sc)
248{
249	uint32_t size;
250	size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT;	/* for Alignment */
251	size += sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1);
252	size += sc->reply_size * sc->mfi_max_fw_cmds;
253	/* this is for SGL's */
254	size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->mfi_max_fw_cmds;
255	return size;
256}
257
258/*
259 * Description:
260 *      This function will prepare message pools for the Thunderbolt controller
261 * Arguments:
262 *      DevExt - HBA miniport driver's adapter data storage structure
263 *      pMemLocation - start of the memory allocated for Thunderbolt.
264 * Return Value:
265 *      TRUE if successful
266 *      FALSE if failed
267 */
268int
269mfi_tbolt_init_desc_pool(struct mfi_softc *sc, uint8_t* mem_location,
270    uint32_t tbolt_contg_length)
271{
272	uint32_t     offset = 0;
273	uint8_t      *addr = mem_location;
274
275	/* Request Descriptor Base physical Address */
276
277	/* For Request Decriptors Virtual Memory */
278	/* Initialise the aligned IO Frames Virtual Memory Pointer */
279	if (((uintptr_t)addr) & (0xFF)) {
280		addr = &addr[sc->raid_io_msg_size];
281		addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
282		sc->request_message_pool_align = addr;
283	} else
284		sc->request_message_pool_align = addr;
285
286	offset = sc->request_message_pool_align - sc->request_message_pool;
287	sc->request_msg_busaddr = sc->mfi_tb_busaddr + offset;
288
289	/* DJA XXX should this be bus dma ??? */
290	/* Skip request message pool */
291	addr = &addr[sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1)];
292	/* Reply Frame Pool is initialized */
293	sc->reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
294	if (((uintptr_t)addr) & (0xFF)) {
295		addr = &addr[sc->reply_size];
296		addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
297	}
298	sc->reply_frame_pool_align
299		    = (struct mfi_mpi2_reply_header *)addr;
300
301	offset = (uintptr_t)sc->reply_frame_pool_align
302	    - (uintptr_t)sc->request_message_pool;
303	sc->reply_frame_busaddr = sc->mfi_tb_busaddr + offset;
304
305	/* Skip Reply Frame Pool */
306	addr += sc->reply_size * sc->mfi_max_fw_cmds;
307	sc->reply_pool_limit = addr;
308
309	/* initializing reply address to 0xFFFFFFFF */
310	memset((uint8_t *)sc->reply_frame_pool, 0xFF,
311	       (sc->reply_size * sc->mfi_max_fw_cmds));
312
313	offset = sc->reply_size * sc->mfi_max_fw_cmds;
314	sc->sg_frame_busaddr = sc->reply_frame_busaddr + offset;
315	/* initialize the last_reply_idx to 0 */
316	sc->last_reply_idx = 0;
317	MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1);
318	MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
319	offset = (sc->sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
320	    sc->mfi_max_fw_cmds)) - sc->mfi_tb_busaddr;
321	if (offset > tbolt_contg_length)
322		device_printf(sc->mfi_dev, "Error:Initialized more than "
323		    "allocated\n");
324	return 0;
325}
326
327/*
328 * This routine prepare and issue INIT2 frame to the Firmware
329 */
330
331int
332mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
333{
334	struct MPI2_IOC_INIT_REQUEST   *mpi2IocInit;
335	struct mfi_init_frame		*mfi_init;
336	uintptr_t			offset = 0;
337	bus_addr_t			phyAddress;
338	MFI_ADDRESS			*mfiAddressTemp;
339	struct mfi_command		*cm, cmd_tmp;
340	int error;
341
342	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
343
344	/* Check if initialization is already completed */
345	if (sc->MFA_enabled) {
346		device_printf(sc->mfi_dev, "tbolt_init already initialised!\n");
347		return 1;
348	}
349
350	if ((cm = mfi_dequeue_free(sc)) == NULL) {
351		device_printf(sc->mfi_dev, "tbolt_init failed to get command "
352		    " entry!\n");
353		return (EBUSY);
354	}
355
356	cmd_tmp.cm_frame = cm->cm_frame;
357	cmd_tmp.cm_frame_busaddr = cm->cm_frame_busaddr;
358	cmd_tmp.cm_dmamap = cm->cm_dmamap;
359
360	cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_tb_init);
361	cm->cm_frame_busaddr = sc->mfi_tb_init_busaddr;
362	cm->cm_dmamap = sc->mfi_tb_init_dmamap;
363	cm->cm_frame->header.context = 0;
364
365	/*
366	 * Abuse the SG list area of the frame to hold the init_qinfo
367	 * object;
368	 */
369	mfi_init = &cm->cm_frame->init;
370
371	mpi2IocInit = (struct MPI2_IOC_INIT_REQUEST *)sc->mfi_tb_ioc_init_desc;
372	bzero(mpi2IocInit, sizeof(struct MPI2_IOC_INIT_REQUEST));
373	mpi2IocInit->Function  = MPI2_FUNCTION_IOC_INIT;
374	mpi2IocInit->WhoInit   = MPI2_WHOINIT_HOST_DRIVER;
375
376	/* set MsgVersion and HeaderVersion host driver was built with */
377	mpi2IocInit->MsgVersion = MPI2_VERSION;
378	mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
379	mpi2IocInit->SystemRequestFrameSize = sc->raid_io_msg_size/4;
380	mpi2IocInit->ReplyDescriptorPostQueueDepth
381	    = (uint16_t)sc->mfi_max_fw_cmds;
382	mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
383
384	/* Get physical address of reply frame pool */
385	offset = (uintptr_t) sc->reply_frame_pool_align
386	    - (uintptr_t)sc->request_message_pool;
387	phyAddress = sc->mfi_tb_busaddr + offset;
388	mfiAddressTemp =
389	    (MFI_ADDRESS *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
390	mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
391	mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
392
393	/* Get physical address of request message pool */
394	offset = sc->request_message_pool_align - sc->request_message_pool;
395	phyAddress =  sc->mfi_tb_busaddr + offset;
396	mfiAddressTemp = (MFI_ADDRESS *)&mpi2IocInit->SystemRequestFrameBaseAddress;
397	mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
398	mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
399	mpi2IocInit->ReplyFreeQueueAddress =  0; /* Not supported by MR. */
400	mpi2IocInit->TimeStamp = time_uptime;
401
402	if (sc->verbuf) {
403		snprintf((char *)sc->verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
404                MEGASAS_VERSION);
405		mfi_init->driver_ver_lo = (uint32_t)sc->verbuf_h_busaddr;
406		mfi_init->driver_ver_hi =
407		    (uint32_t)((uint64_t)sc->verbuf_h_busaddr >> 32);
408	}
409	/* Get the physical address of the mpi2 ioc init command */
410	phyAddress =  sc->mfi_tb_ioc_init_busaddr;
411	mfi_init->qinfo_new_addr_lo = (uint32_t)phyAddress;
412	mfi_init->qinfo_new_addr_hi = (uint32_t)((uint64_t)phyAddress >> 32);
413	mfi_init->header.flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
414
415	mfi_init->header.cmd = MFI_CMD_INIT;
416	mfi_init->header.data_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
417	mfi_init->header.cmd_status = MFI_STAT_INVALID_STATUS;
418
419	cm->cm_data = NULL;
420	cm->cm_flags |= MFI_CMD_POLLED;
421	cm->cm_timestamp = time_uptime;
422	if ((error = mfi_mapcmd(sc, cm)) != 0) {
423		device_printf(sc->mfi_dev, "failed to send IOC init2 "
424		    "command %d at %lx\n", error, (long)cm->cm_frame_busaddr);
425		goto out;
426	}
427
428	if (mfi_init->header.cmd_status == MFI_STAT_OK) {
429		sc->MFA_enabled = 1;
430	} else {
431		device_printf(sc->mfi_dev, "Init command Failed %#x\n",
432		    mfi_init->header.cmd_status);
433		error = mfi_init->header.cmd_status;
434		goto out;
435	}
436
437out:
438	cm->cm_frame = cmd_tmp.cm_frame;
439	cm->cm_frame_busaddr = cmd_tmp.cm_frame_busaddr;
440	cm->cm_dmamap = cmd_tmp.cm_dmamap;
441	mfi_release_command(cm);
442
443	return (error);
444
445}
446
447int
448mfi_tbolt_alloc_cmd(struct mfi_softc *sc)
449{
450	struct mfi_cmd_tbolt *cmd;
451	bus_addr_t io_req_base_phys;
452	uint8_t *io_req_base;
453	int i = 0, j = 0, offset = 0;
454
455	/*
456	 * sc->mfi_cmd_pool_tbolt is an array of struct mfi_cmd_tbolt pointers.
457	 * Allocate the dynamic array first and then allocate individual
458	 * commands.
459	 */
460	sc->request_desc_pool = malloc(sizeof(
461	    union mfi_mpi2_request_descriptor) * sc->mfi_max_fw_cmds,
462	    M_MFIBUF, M_NOWAIT|M_ZERO);
463
464	if (sc->request_desc_pool == NULL) {
465		device_printf(sc->mfi_dev, "Could not alloc "
466		    "memory for request_desc_pool\n");
467		return (ENOMEM);
468	}
469
470	sc->mfi_cmd_pool_tbolt = malloc(sizeof(struct mfi_cmd_tbolt*)
471	    * sc->mfi_max_fw_cmds, M_MFIBUF, M_NOWAIT|M_ZERO);
472
473	if (sc->mfi_cmd_pool_tbolt == NULL) {
474		free(sc->request_desc_pool, M_MFIBUF);
475		device_printf(sc->mfi_dev, "Could not alloc "
476		    "memory for cmd_pool_tbolt\n");
477		return (ENOMEM);
478	}
479
480	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
481		sc->mfi_cmd_pool_tbolt[i] = malloc(sizeof(
482		    struct mfi_cmd_tbolt),M_MFIBUF, M_NOWAIT|M_ZERO);
483
484		if (!sc->mfi_cmd_pool_tbolt[i]) {
485			device_printf(sc->mfi_dev, "Could not alloc "
486			    "cmd_pool_tbolt entry\n");
487
488			for (j = 0; j < i; j++)
489				free(sc->mfi_cmd_pool_tbolt[j], M_MFIBUF);
490
491			free(sc->request_desc_pool, M_MFIBUF);
492			sc->request_desc_pool = NULL;
493			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
494			sc->mfi_cmd_pool_tbolt = NULL;
495
496			return (ENOMEM);
497		}
498	}
499
500	/*
501	 * The first 256 bytes (SMID 0) is not used. Don't add to the cmd
502	 * list
503	 */
504	io_req_base = sc->request_message_pool_align
505		+ MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
506	io_req_base_phys = sc->request_msg_busaddr
507		+ MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
508
509	/*
510	 * Add all the commands to command pool (instance->cmd_pool)
511	 */
512	/* SMID 0 is reserved. Set SMID/index from 1 */
513
514	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
515		cmd = sc->mfi_cmd_pool_tbolt[i];
516		offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
517		cmd->index = i + 1;
518		cmd->request_desc = (union mfi_mpi2_request_descriptor *)
519		    (sc->request_desc_pool + i);
520		cmd->io_request = (struct mfi_mpi2_request_raid_scsi_io *)
521		    (io_req_base + offset);
522		cmd->io_request_phys_addr = io_req_base_phys + offset;
523		cmd->sg_frame = (MPI2_SGE_IO_UNION *)(sc->reply_pool_limit
524		    + i * MEGASAS_MAX_SZ_CHAIN_FRAME);
525		cmd->sg_frame_phys_addr = sc->sg_frame_busaddr + i
526		    * MEGASAS_MAX_SZ_CHAIN_FRAME;
527		cmd->sync_cmd_idx = sc->mfi_max_fw_cmds;
528
529		TAILQ_INSERT_TAIL(&(sc->mfi_cmd_tbolt_tqh), cmd, next);
530	}
531	return 0;
532}
533
534int
535mfi_tbolt_reset(struct mfi_softc *sc)
536{
537	uint32_t fw_state;
538
539	mtx_lock(&sc->mfi_io_lock);
540	if (sc->hw_crit_error) {
541		device_printf(sc->mfi_dev, "HW CRITICAL ERROR\n");
542		mtx_unlock(&sc->mfi_io_lock);
543		return 1;
544	}
545
546	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
547		fw_state = sc->mfi_read_fw_status(sc);
548		if ((fw_state & MFI_FWSTATE_FAULT) == MFI_FWSTATE_FAULT ||
549		    mfi_fw_reset_test) {
550			if ((sc->disableOnlineCtrlReset == 0)
551			    && (sc->adpreset == 0)) {
552				device_printf(sc->mfi_dev, "Adapter RESET "
553				    "condition is detected\n");
554				sc->adpreset = 1;
555				sc->issuepend_done = 0;
556				sc->MFA_enabled = 0;
557				sc->last_reply_idx = 0;
558				mfi_process_fw_state_chg_isr((void *) sc);
559			}
560			mtx_unlock(&sc->mfi_io_lock);
561			return 0;
562		}
563	}
564	mtx_unlock(&sc->mfi_io_lock);
565	return 1;
566}
567
568/*
569 * mfi_intr_tbolt - isr entry point
570 */
571void
572mfi_intr_tbolt(void *arg)
573{
574	struct mfi_softc *sc = (struct mfi_softc *)arg;
575
576	if (sc->mfi_check_clear_intr(sc) == 1) {
577		return;
578	}
579	if (sc->mfi_detaching)
580		return;
581	mtx_lock(&sc->mfi_io_lock);
582	mfi_tbolt_complete_cmd(sc);
583	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
584	mfi_startio(sc);
585	mtx_unlock(&sc->mfi_io_lock);
586	return;
587}
588
589/*
590 * map_cmd_status -	Maps FW cmd status to OS cmd status
591 * @cmd :		Pointer to cmd
592 * @status :		status of cmd returned by FW
593 * @ext_status :	ext status of cmd returned by FW
594 */
595
596void
597map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
598    uint8_t ext_status)
599{
600	switch (status) {
601	case MFI_STAT_OK:
602		mfi_cmd->cm_frame->header.cmd_status = MFI_STAT_OK;
603		mfi_cmd->cm_frame->dcmd.header.cmd_status = MFI_STAT_OK;
604		mfi_cmd->cm_error = MFI_STAT_OK;
605		break;
606
607	case MFI_STAT_SCSI_IO_FAILED:
608	case MFI_STAT_LD_INIT_IN_PROGRESS:
609		mfi_cmd->cm_frame->header.cmd_status = status;
610		mfi_cmd->cm_frame->header.scsi_status = ext_status;
611		mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
612		mfi_cmd->cm_frame->dcmd.header.scsi_status
613		    = ext_status;
614		break;
615
616	case MFI_STAT_SCSI_DONE_WITH_ERROR:
617		mfi_cmd->cm_frame->header.cmd_status = ext_status;
618		mfi_cmd->cm_frame->dcmd.header.cmd_status = ext_status;
619		break;
620
621	case MFI_STAT_LD_OFFLINE:
622	case MFI_STAT_DEVICE_NOT_FOUND:
623		mfi_cmd->cm_frame->header.cmd_status = status;
624		mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
625		break;
626
627	default:
628		mfi_cmd->cm_frame->header.cmd_status = status;
629		mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
630		break;
631	}
632}
633
634/*
635 * mfi_tbolt_return_cmd -	Return a cmd to free command pool
636 * @instance:		Adapter soft state
637 * @tbolt_cmd:		Tbolt command packet to be returned to free command pool
638 * @mfi_cmd:		Oning MFI command packe
639 */
640void
641mfi_tbolt_return_cmd(struct mfi_softc *sc, struct mfi_cmd_tbolt *tbolt_cmd,
642    struct mfi_command *mfi_cmd)
643{
644	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
645
646	mfi_cmd->cm_flags &= ~MFI_CMD_TBOLT;
647	mfi_cmd->cm_extra_frames = 0;
648	tbolt_cmd->sync_cmd_idx = sc->mfi_max_fw_cmds;
649
650	TAILQ_INSERT_TAIL(&sc->mfi_cmd_tbolt_tqh, tbolt_cmd, next);
651}
652
653void
654mfi_tbolt_complete_cmd(struct mfi_softc *sc)
655{
656	struct mfi_mpi2_reply_header *desc, *reply_desc;
657	struct mfi_command *cmd_mfi;	/* For MFA Cmds */
658	struct mfi_cmd_tbolt *cmd_tbolt;
659	uint16_t smid;
660	uint8_t reply_descript_type;
661	uint32_t status, extStatus;
662	uint16_t num_completed;
663	union desc_value val;
664	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
665
666	desc = (struct mfi_mpi2_reply_header *)
667		((uintptr_t)sc->reply_frame_pool_align
668		+ sc->last_reply_idx * sc->reply_size);
669	reply_desc = desc;
670
671	if (reply_desc == NULL) {
672		device_printf(sc->mfi_dev, "reply desc is NULL!!\n");
673		return;
674	}
675
676	reply_descript_type = reply_desc->ReplyFlags
677	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
678	if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
679		return;
680
681	num_completed = 0;
682	val.word = ((union mfi_mpi2_reply_descriptor *)desc)->words;
683
684	/* Read Reply descriptor */
685	while ((val.u.low != 0xFFFFFFFF) && (val.u.high != 0xFFFFFFFF)) {
686		smid = reply_desc->SMID;
687		if (smid == 0 || smid > sc->mfi_max_fw_cmds) {
688			device_printf(sc->mfi_dev, "smid is %d cannot "
689			    "proceed - skipping\n", smid);
690			goto next;
691		}
692		cmd_tbolt = sc->mfi_cmd_pool_tbolt[smid - 1];
693		if (cmd_tbolt->sync_cmd_idx == sc->mfi_max_fw_cmds) {
694			device_printf(sc->mfi_dev, "cmd_tbolt %p "
695			    "has invalid sync_cmd_idx=%d - skipping\n",
696			    cmd_tbolt, cmd_tbolt->sync_cmd_idx);
697			goto next;
698		}
699		cmd_mfi = &sc->mfi_commands[cmd_tbolt->sync_cmd_idx];
700
701		status = cmd_mfi->cm_frame->dcmd.header.cmd_status;
702		extStatus = cmd_mfi->cm_frame->dcmd.header.scsi_status;
703		map_tbolt_cmd_status(cmd_mfi, status, extStatus);
704
705		/* mfi_tbolt_return_cmd is handled by mfi complete / return */
706		if ((cmd_mfi->cm_flags & MFI_CMD_SCSI) != 0 &&
707		    (cmd_mfi->cm_flags & MFI_CMD_POLLED) != 0) {
708			/* polled LD/SYSPD IO command */
709			/* XXX mark okay for now DJA */
710			cmd_mfi->cm_frame->header.cmd_status = MFI_STAT_OK;
711
712		} else {
713			/* remove command from busy queue if not polled */
714			if ((cmd_mfi->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
715				mfi_remove_busy(cmd_mfi);
716
717			/* complete the command */
718			mfi_complete(sc, cmd_mfi);
719		}
720
721next:
722		sc->last_reply_idx++;
723		if (sc->last_reply_idx >= sc->mfi_max_fw_cmds) {
724			MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
725			sc->last_reply_idx = 0;
726		}
727
728		/* Set it back to all 0xfff */
729		((union mfi_mpi2_reply_descriptor*)desc)->words =
730			~((uint64_t)0x00);
731
732		num_completed++;
733
734		/* Get the next reply descriptor */
735		desc = (struct mfi_mpi2_reply_header *)
736		    ((uintptr_t)sc->reply_frame_pool_align
737		    + sc->last_reply_idx * sc->reply_size);
738		reply_desc = desc;
739		val.word = ((union mfi_mpi2_reply_descriptor*)desc)->words;
740		reply_descript_type = reply_desc->ReplyFlags
741		    & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
742		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
743			break;
744	}
745
746	if (!num_completed)
747		return;
748
749	/* update replyIndex to FW */
750	if (sc->last_reply_idx)
751		MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
752
753	return;
754}
755
756/*
757 * mfi_get_cmd -	Get a command from the free pool
758 * @instance:		Adapter soft state
759 *
760 * Returns a free command from the pool
761 */
762
763struct mfi_cmd_tbolt *
764mfi_tbolt_get_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
765{
766	struct mfi_cmd_tbolt *cmd = NULL;
767
768	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
769
770	if ((cmd = TAILQ_FIRST(&sc->mfi_cmd_tbolt_tqh)) == NULL)
771		return (NULL);
772	TAILQ_REMOVE(&sc->mfi_cmd_tbolt_tqh, cmd, next);
773	memset((uint8_t *)cmd->sg_frame, 0, MEGASAS_MAX_SZ_CHAIN_FRAME);
774	memset((uint8_t *)cmd->io_request, 0,
775	    MEGASAS_THUNDERBOLT_NEW_MSG_SIZE);
776
777	cmd->sync_cmd_idx = mfi_cmd->cm_index;
778	mfi_cmd->cm_extra_frames = cmd->index; /* Frame count used as SMID */
779	mfi_cmd->cm_flags |= MFI_CMD_TBOLT;
780
781	return cmd;
782}
783
784union mfi_mpi2_request_descriptor *
785mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index)
786{
787	uint8_t *p;
788
789	if (index >= sc->mfi_max_fw_cmds) {
790		device_printf(sc->mfi_dev, "Invalid SMID (0x%x)request "
791		    "for descriptor\n", index);
792		return NULL;
793	}
794	p = sc->request_desc_pool + sizeof(union mfi_mpi2_request_descriptor)
795	    * index;
796	memset(p, 0, sizeof(union mfi_mpi2_request_descriptor));
797	return (union mfi_mpi2_request_descriptor *)p;
798}
799
800/* Used to build IOCTL cmd */
801uint8_t
802mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
803{
804	MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
805	struct mfi_mpi2_request_raid_scsi_io *io_req;
806	struct mfi_cmd_tbolt *cmd;
807
808	cmd = mfi_tbolt_get_cmd(sc, mfi_cmd);
809	if (!cmd)
810		return EBUSY;
811	io_req = cmd->io_request;
812	mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
813
814	io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
815	io_req->SGLOffset0 = offsetof(struct mfi_mpi2_request_raid_scsi_io,
816	    SGL) / 4;
817	io_req->ChainOffset = sc->chain_offset_value_for_mpt_ptmsg;
818
819	mpi25_ieee_chain->Address = mfi_cmd->cm_frame_busaddr;
820
821	/*
822	  In MFI pass thru, nextChainOffset will always be zero to
823	  indicate the end of the chain.
824	*/
825	mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
826		| MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
827
828	/* setting the length to the maximum length */
829	mpi25_ieee_chain->Length = 1024;
830
831	return 0;
832}
833
834void
835mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
836    struct mfi_cmd_tbolt *cmd)
837{
838	uint32_t start_lba_lo = 0, start_lba_hi = 0, device_id;
839	struct mfi_mpi2_request_raid_scsi_io	*io_request;
840	struct IO_REQUEST_INFO io_info;
841
842	device_id = mfi_cmd->cm_frame->io.header.target_id;
843	io_request = cmd->io_request;
844	io_request->RaidContext.TargetID = device_id;
845	io_request->RaidContext.Status = 0;
846	io_request->RaidContext.exStatus = 0;
847	io_request->RaidContext.regLockFlags = 0;
848
849	start_lba_lo = mfi_cmd->cm_frame->io.lba_lo;
850	start_lba_hi = mfi_cmd->cm_frame->io.lba_hi;
851
852	memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
853	io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
854	io_info.numBlocks = mfi_cmd->cm_frame->io.header.data_len;
855	io_info.ldTgtId = device_id;
856	if ((mfi_cmd->cm_frame->header.flags & MFI_FRAME_DIR_READ) ==
857	    MFI_FRAME_DIR_READ)
858		io_info.isRead = 1;
859
860	io_request->RaidContext.timeoutValue
861		= MFI_FUSION_FP_DEFAULT_TIMEOUT;
862	io_request->Function = MPI2_FUNCTION_LD_IO_REQUEST;
863	io_request->DevHandle = device_id;
864	cmd->request_desc->header.RequestFlags
865		= (MFI_REQ_DESCRIPT_FLAGS_LD_IO
866		   << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
867	if ((io_request->IoFlags == 6) && (io_info.numBlocks == 0))
868		io_request->RaidContext.RegLockLength = 0x100;
869	io_request->DataLength = mfi_cmd->cm_frame->io.header.data_len
870	    * MFI_SECTOR_LEN;
871}
872
873int
874mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
875    struct mfi_cmd_tbolt *cmd)
876{
877	struct mfi_mpi2_request_raid_scsi_io *io_request;
878	uint32_t sge_count;
879	uint8_t cdb_len;
880	int readop;
881	u_int64_t lba;
882
883	io_request = cmd->io_request;
884	if (!(mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_READ
885	      || mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE))
886		return 1;
887
888	mfi_tbolt_build_ldio(sc, mfi_cmd, cmd);
889
890	/* Convert to SCSI command CDB */
891	bzero(io_request->CDB.CDB32, sizeof(io_request->CDB.CDB32));
892	if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
893		readop = 0;
894	else
895		readop = 1;
896
897	lba =  mfi_cmd->cm_frame->io.lba_hi;
898	lba = (lba << 32) + mfi_cmd->cm_frame->io.lba_lo;
899	cdb_len = mfi_build_cdb(readop, 0, lba,
900	    mfi_cmd->cm_frame->io.header.data_len, io_request->CDB.CDB32);
901
902	/* Just the CDB length, rest of the Flags are zero */
903	io_request->IoFlags = cdb_len;
904
905	/*
906	 * Construct SGL
907	 */
908	sge_count = mfi_tbolt_make_sgl(sc, mfi_cmd,
909	    (pMpi25IeeeSgeChain64_t) &io_request->SGL, cmd);
910	if (sge_count > sc->mfi_max_sge) {
911		device_printf(sc->mfi_dev, "Error. sge_count (0x%x) exceeds "
912		    "max (0x%x) allowed\n", sge_count, sc->mfi_max_sge);
913		return 1;
914	}
915	io_request->RaidContext.numSGE = sge_count;
916	io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
917
918	if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
919		io_request->Control = MPI2_SCSIIO_CONTROL_WRITE;
920	else
921		io_request->Control = MPI2_SCSIIO_CONTROL_READ;
922
923	io_request->SGLOffset0 = offsetof(
924	    struct mfi_mpi2_request_raid_scsi_io, SGL)/4;
925
926	io_request->SenseBufferLowAddress = mfi_cmd->cm_sense_busaddr;
927	io_request->SenseBufferLength = MFI_SENSE_LEN;
928	io_request->RaidContext.Status = MFI_STAT_INVALID_STATUS;
929	io_request->RaidContext.exStatus = MFI_STAT_INVALID_STATUS;
930
931	return 0;
932}
933
934static int
935mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
936		   pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd)
937{
938	uint8_t i, sg_processed;
939	uint8_t sge_count, sge_idx;
940	union mfi_sgl *os_sgl;
941	pMpi25IeeeSgeChain64_t sgl_end;
942
943	/*
944	 * Return 0 if there is no data transfer
945	 */
946	if (!mfi_cmd->cm_sg || !mfi_cmd->cm_len) {
947	 	device_printf(sc->mfi_dev, "Buffer empty \n");
948		return 0;
949	}
950	os_sgl = mfi_cmd->cm_sg;
951	sge_count = mfi_cmd->cm_frame->header.sg_count;
952
953	if (sge_count > sc->mfi_max_sge) {
954		device_printf(sc->mfi_dev, "sgl ptr %p sg_cnt %d \n",
955		    os_sgl, sge_count);
956		return sge_count;
957	}
958
959	if (sge_count > sc->max_SGEs_in_main_message)
960		/* One element to store the chain info */
961		sge_idx = sc->max_SGEs_in_main_message - 1;
962	else
963		sge_idx = sge_count;
964
965	if (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)) {
966		sgl_end = sgl_ptr + (sc->max_SGEs_in_main_message - 1);
967		sgl_end->Flags = 0;
968	}
969
970	for (i = 0; i < sge_idx; i++) {
971		/*
972		 * For 32bit BSD we are getting 32 bit SGL's from OS
973		 * but FW only take 64 bit SGL's so copying from 32 bit
974		 * SGL's to 64.
975		 */
976		if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
977			sgl_ptr->Length = os_sgl->sg_skinny[i].len;
978			sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
979		} else {
980			sgl_ptr->Length = os_sgl->sg32[i].len;
981			sgl_ptr->Address = os_sgl->sg32[i].addr;
982		}
983		if (i == sge_count - 1 &&
984		    (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)))
985			sgl_ptr->Flags = MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
986		else
987			sgl_ptr->Flags = 0;
988		sgl_ptr++;
989		cmd->io_request->ChainOffset = 0;
990	}
991
992	sg_processed = i;
993
994	if (sg_processed < sge_count) {
995		pMpi25IeeeSgeChain64_t sg_chain;
996
997		cmd->io_request->ChainOffset =
998		    sc->chain_offset_value_for_main_message;
999		sg_chain = sgl_ptr;
1000		/* Prepare chain element */
1001		sg_chain->NextChainOffset = 0;
1002		if (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY))
1003			sg_chain->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1004		else
1005			sg_chain->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1006			    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
1007		sg_chain->Length =  (sizeof(MPI2_SGE_IO_UNION) *
1008		    (sge_count - sg_processed));
1009		sg_chain->Address = cmd->sg_frame_phys_addr;
1010		sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->sg_frame;
1011		for (; i < sge_count; i++) {
1012			if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
1013				sgl_ptr->Length = os_sgl->sg_skinny[i].len;
1014				sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
1015			} else {
1016				sgl_ptr->Length = os_sgl->sg32[i].len;
1017				sgl_ptr->Address = os_sgl->sg32[i].addr;
1018			}
1019			if (i == sge_count - 1 &&
1020			    (sc->mfi_flags &
1021			    (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)))
1022				sgl_ptr->Flags =
1023				    MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1024			else
1025				sgl_ptr->Flags = 0;
1026			sgl_ptr++;
1027		}
1028	}
1029	return sge_count;
1030}
1031
1032union mfi_mpi2_request_descriptor *
1033mfi_build_and_issue_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
1034{
1035	struct mfi_cmd_tbolt *cmd;
1036	union mfi_mpi2_request_descriptor *req_desc = NULL;
1037	uint16_t index;
1038	cmd = mfi_tbolt_get_cmd(sc, mfi_cmd);
1039	if (cmd == NULL)
1040		return (NULL);
1041
1042	index = cmd->index;
1043	req_desc = mfi_tbolt_get_request_descriptor(sc, index-1);
1044	if (req_desc == NULL) {
1045		mfi_tbolt_return_cmd(sc, cmd, mfi_cmd);
1046		return (NULL);
1047	}
1048
1049	if (mfi_tbolt_build_io(sc, mfi_cmd, cmd) != 0) {
1050		mfi_tbolt_return_cmd(sc, cmd, mfi_cmd);
1051		return (NULL);
1052	}
1053	req_desc->header.SMID = index;
1054	return req_desc;
1055}
1056
1057union mfi_mpi2_request_descriptor *
1058mfi_tbolt_build_mpt_cmd(struct mfi_softc *sc, struct mfi_command *cmd)
1059{
1060	union mfi_mpi2_request_descriptor *req_desc = NULL;
1061	uint16_t index;
1062	if (mfi_build_mpt_pass_thru(sc, cmd)) {
1063		device_printf(sc->mfi_dev, "Couldn't build MFI pass thru "
1064		    "cmd\n");
1065		return NULL;
1066	}
1067	/* For fusion the frame_count variable is used for SMID */
1068	index = cmd->cm_extra_frames;
1069
1070	req_desc = mfi_tbolt_get_request_descriptor(sc, index - 1);
1071	if (req_desc == NULL)
1072		return NULL;
1073
1074	bzero(req_desc, sizeof(*req_desc));
1075	req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1076	    MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1077	req_desc->header.SMID = index;
1078	return req_desc;
1079}
1080
1081int
1082mfi_tbolt_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1083{
1084	struct mfi_frame_header *hdr;
1085	union mfi_mpi2_request_descriptor *req_desc = NULL;
1086	int tm = mfi_polled_cmd_timeout * 1000;
1087
1088	hdr = &cm->cm_frame->header;
1089	if (sc->adpreset)
1090		return 1;
1091	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1092		cm->cm_timestamp = time_uptime;
1093		mfi_enqueue_busy(cm);
1094	} else {	/* still get interrupts for it */
1095		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1096		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1097	}
1098
1099	if (hdr->cmd == MFI_CMD_PD_SCSI_IO) {
1100		/* check for inquiry commands coming from CLI */
1101		if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) ==
1102		    NULL) {
1103			device_printf(sc->mfi_dev, "Mapping from MFI "
1104			    "to MPT Failed \n");
1105			return 1;
1106		}
1107	} else if (hdr->cmd == MFI_CMD_LD_SCSI_IO ||
1108	    hdr->cmd == MFI_CMD_LD_READ || hdr->cmd == MFI_CMD_LD_WRITE) {
1109		cm->cm_flags |= MFI_CMD_SCSI;
1110		if ((req_desc = mfi_build_and_issue_cmd(sc, cm)) == NULL) {
1111			device_printf(sc->mfi_dev, "LDIO Failed \n");
1112			return 1;
1113		}
1114	} else if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == NULL) {
1115		device_printf(sc->mfi_dev, "Mapping from MFI to MPT Failed\n");
1116		return (1);
1117	}
1118
1119	if (cm->cm_flags & MFI_CMD_SCSI) {
1120		/*
1121		 * LD IO needs to be posted since it doesn't get
1122		 * acknowledged via a status update so have the
1123		 * controller reply via mfi_tbolt_complete_cmd.
1124		 */
1125		hdr->flags &= ~MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1126	}
1127
1128	MFI_WRITE4(sc, MFI_ILQP, (req_desc->words & 0xFFFFFFFF));
1129	MFI_WRITE4(sc, MFI_IHQP, (req_desc->words >>0x20));
1130
1131	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1132		return 0;
1133
1134	/*
1135	 * This is a polled command, so busy-wait for it to complete.
1136	 *
1137	 * The value of hdr->cmd_status is updated directly by the hardware
1138	 * so there is no guarantee that mfi_tbolt_complete_cmd is called
1139	 * prior to this value changing.
1140	 */
1141	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1142		DELAY(1000);
1143		tm -= 1;
1144		if (tm <= 0)
1145			break;
1146		if (cm->cm_flags & MFI_CMD_SCSI) {
1147			/*
1148			 * Force check reply queue.
1149			 * This ensures that dump works correctly
1150			 */
1151			mfi_tbolt_complete_cmd(sc);
1152		}
1153	}
1154
1155	/* ensure the command cleanup has been processed before returning */
1156	mfi_tbolt_complete_cmd(sc);
1157
1158	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1159		device_printf(sc->mfi_dev, "Frame %p timed out "
1160		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1161		return (ETIMEDOUT);
1162	}
1163	return 0;
1164}
1165
1166static void
1167mfi_issue_pending_cmds_again(struct mfi_softc *sc)
1168{
1169	struct mfi_command *cm, *tmp;
1170	struct mfi_cmd_tbolt *cmd;
1171
1172	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1173	TAILQ_FOREACH_REVERSE_SAFE(cm, &sc->mfi_busy, BUSYQ, cm_link, tmp) {
1174		cm->retry_for_fw_reset++;
1175
1176		/*
1177		 * If a command has continuously been tried multiple times
1178		 * and causing a FW reset condition, no further recoveries
1179		 * should be performed on the controller
1180		 */
1181		if (cm->retry_for_fw_reset == 3) {
1182			device_printf(sc->mfi_dev, "megaraid_sas: command %p "
1183			    "index=%d was tried multiple times during adapter "
1184			    "reset - Shutting down the HBA\n", cm, cm->cm_index);
1185			mfi_kill_hba(sc);
1186			sc->hw_crit_error = 1;
1187			return;
1188		}
1189
1190		mfi_remove_busy(cm);
1191		if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
1192			if (cm->cm_extra_frames != 0 && cm->cm_extra_frames <=
1193			    sc->mfi_max_fw_cmds) {
1194				cmd = sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1];
1195				mfi_tbolt_return_cmd(sc, cmd, cm);
1196			} else {
1197				device_printf(sc->mfi_dev,
1198				    "Invalid extra_frames: %d detected\n",
1199				    cm->cm_extra_frames);
1200			}
1201		}
1202
1203		if (cm->cm_frame->dcmd.opcode != MFI_DCMD_CTRL_EVENT_WAIT) {
1204			device_printf(sc->mfi_dev,
1205			    "APJ ****requeue command %p index=%d\n",
1206			    cm, cm->cm_index);
1207			mfi_requeue_ready(cm);
1208		} else
1209			mfi_release_command(cm);
1210	}
1211	mfi_startio(sc);
1212}
1213
1214static void
1215mfi_kill_hba(struct mfi_softc *sc)
1216{
1217	if (sc->mfi_flags & MFI_FLAGS_TBOLT)
1218		MFI_WRITE4(sc, 0x00, MFI_STOP_ADP);
1219	else
1220		MFI_WRITE4(sc, MFI_IDB, MFI_STOP_ADP);
1221}
1222
1223static void
1224mfi_process_fw_state_chg_isr(void *arg)
1225{
1226	struct mfi_softc *sc= (struct mfi_softc *)arg;
1227	int error, status;
1228
1229	if (sc->adpreset == 1) {
1230		device_printf(sc->mfi_dev, "First stage of FW reset "
1231		     "initiated...\n");
1232
1233		sc->mfi_adp_reset(sc);
1234		sc->mfi_enable_intr(sc);
1235
1236		device_printf(sc->mfi_dev, "First stage of reset complete, "
1237		    "second stage initiated...\n");
1238
1239		sc->adpreset = 2;
1240
1241		/* waiting for about 20 second before start the second init */
1242		for (int wait = 0; wait < 20000; wait++)
1243			DELAY(1000);
1244		device_printf(sc->mfi_dev, "Second stage of FW reset "
1245		     "initiated...\n");
1246		while ((status = MFI_READ4(sc, MFI_RSR)) & 0x04);
1247
1248		sc->mfi_disable_intr(sc);
1249
1250		/* We expect the FW state to be READY */
1251		if (mfi_transition_firmware(sc)) {
1252			device_printf(sc->mfi_dev, "controller is not in "
1253			    "ready state\n");
1254			mfi_kill_hba(sc);
1255			sc->hw_crit_error = 1;
1256			return;
1257		}
1258		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
1259			device_printf(sc->mfi_dev, "Failed to initialise MFI "
1260			    "queue\n");
1261			mfi_kill_hba(sc);
1262			sc->hw_crit_error = 1;
1263			return;
1264		}
1265
1266		/* Init last reply index and max */
1267		MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1);
1268		MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
1269
1270		sc->mfi_enable_intr(sc);
1271		sc->adpreset = 0;
1272		if (sc->mfi_aen_cm != NULL) {
1273			free(sc->mfi_aen_cm->cm_data, M_MFIBUF);
1274			mfi_remove_busy(sc->mfi_aen_cm);
1275			mfi_release_command(sc->mfi_aen_cm);
1276			sc->mfi_aen_cm = NULL;
1277		}
1278
1279		if (sc->mfi_map_sync_cm != NULL) {
1280			mfi_remove_busy(sc->mfi_map_sync_cm);
1281			mfi_release_command(sc->mfi_map_sync_cm);
1282			sc->mfi_map_sync_cm = NULL;
1283		}
1284		mfi_issue_pending_cmds_again(sc);
1285
1286		/*
1287		 * Issue pending command can result in adapter being marked
1288		 * dead because of too many re-tries. Check for that
1289		 * condition before clearing the reset condition on the FW
1290		 */
1291		if (!sc->hw_crit_error) {
1292			/*
1293			 * Initiate AEN (Asynchronous Event Notification) &
1294			 * Sync Map
1295			 */
1296			mfi_aen_setup(sc, sc->last_seq_num);
1297			mfi_tbolt_sync_map_info(sc);
1298
1299			sc->issuepend_done = 1;
1300			device_printf(sc->mfi_dev, "second stage of reset "
1301			    "complete, FW is ready now.\n");
1302		} else {
1303			device_printf(sc->mfi_dev, "second stage of reset "
1304			     "never completed, hba was marked offline.\n");
1305		}
1306	} else {
1307		device_printf(sc->mfi_dev, "mfi_process_fw_state_chg_isr "
1308		    "called with unhandled value:%d\n", sc->adpreset);
1309	}
1310}
1311
1312/*
1313 * The ThunderBolt HW has an option for the driver to directly
1314 * access the underlying disks and operate on the RAID.  To
1315 * do this there needs to be a capability to keep the RAID controller
1316 * and driver in sync.  The FreeBSD driver does not take advantage
1317 * of this feature since it adds a lot of complexity and slows down
1318 * performance.  Performance is gained by using the controller's
1319 * cache etc.
1320 *
1321 * Even though this driver doesn't access the disks directly, an
1322 * AEN like command is used to inform the RAID firmware to "sync"
1323 * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command.  This
1324 * command in write mode will return when the RAID firmware has
1325 * detected a change to the RAID state.  Examples of this type
1326 * of change are removing a disk.  Once the command returns then
1327 * the driver needs to acknowledge this and "sync" all LD's again.
1328 * This repeats until we shutdown.  Then we need to cancel this
1329 * pending command.
1330 *
1331 * If this is not done right the RAID firmware will not remove a
1332 * pulled drive and the RAID won't go degraded etc.  Effectively,
1333 * stopping any RAID mangement to functions.
1334 *
1335 * Doing another LD sync, requires the use of an event since the
1336 * driver needs to do a mfi_wait_command and can't do that in an
1337 * interrupt thread.
1338 *
1339 * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO
1340 * That requires a bunch of structure and it is simpler to just do
1341 * the MFI_DCMD_LD_GET_LIST versus walking the RAID map.
1342 */
1343
1344void
1345mfi_tbolt_sync_map_info(struct mfi_softc *sc)
1346{
1347	int error = 0, i;
1348	struct mfi_command *cmd = NULL;
1349	struct mfi_dcmd_frame *dcmd = NULL;
1350	uint32_t context = 0;
1351	union mfi_ld_ref *ld_sync = NULL;
1352	size_t ld_size;
1353	struct mfi_frame_header *hdr;
1354	struct mfi_command *cm = NULL;
1355	struct mfi_ld_list *list = NULL;
1356
1357	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1358
1359	if (sc->mfi_map_sync_cm != NULL || sc->cm_map_abort)
1360		return;
1361
1362	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1363	    (void **)&list, sizeof(*list));
1364	if (error)
1365		goto out;
1366
1367	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAIN;
1368
1369	if (mfi_wait_command(sc, cm) != 0) {
1370		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1371		goto out;
1372	}
1373
1374	hdr = &cm->cm_frame->header;
1375	if (hdr->cmd_status != MFI_STAT_OK) {
1376		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1377			      hdr->cmd_status);
1378		goto out;
1379	}
1380
1381	ld_size = sizeof(*ld_sync) * list->ld_count;
1382	ld_sync = (union mfi_ld_ref *) malloc(ld_size, M_MFIBUF,
1383	     M_NOWAIT | M_ZERO);
1384	if (ld_sync == NULL) {
1385		device_printf(sc->mfi_dev, "Failed to allocate sync\n");
1386		goto out;
1387	}
1388	for (i = 0; i < list->ld_count; i++)
1389		ld_sync[i].ref = list->ld_list[i].ld.ref;
1390
1391	if ((cmd = mfi_dequeue_free(sc)) == NULL) {
1392		device_printf(sc->mfi_dev, "Failed to get command\n");
1393		free(ld_sync, M_MFIBUF);
1394		goto out;
1395	}
1396
1397	context = cmd->cm_frame->header.context;
1398	bzero(cmd->cm_frame, sizeof(union mfi_frame));
1399	cmd->cm_frame->header.context = context;
1400
1401	dcmd = &cmd->cm_frame->dcmd;
1402	bzero(dcmd->mbox, MFI_MBOX_SIZE);
1403	dcmd->header.cmd = MFI_CMD_DCMD;
1404	dcmd->header.flags = MFI_FRAME_DIR_WRITE;
1405	dcmd->header.timeout = 0;
1406	dcmd->header.data_len = ld_size;
1407	dcmd->header.scsi_status = 0;
1408	dcmd->opcode = MFI_DCMD_LD_MAP_GET_INFO;
1409	cmd->cm_sg = &dcmd->sgl;
1410	cmd->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1411	cmd->cm_data = ld_sync;
1412	cmd->cm_private = ld_sync;
1413
1414	cmd->cm_len = ld_size;
1415	cmd->cm_complete = mfi_sync_map_complete;
1416	sc->mfi_map_sync_cm = cmd;
1417
1418	cmd->cm_flags = MFI_CMD_DATAOUT;
1419	cmd->cm_frame->dcmd.mbox[0] = list->ld_count;
1420	cmd->cm_frame->dcmd.mbox[1] = MFI_DCMD_MBOX_PEND_FLAG;
1421
1422	if ((error = mfi_mapcmd(sc, cmd)) != 0) {
1423		device_printf(sc->mfi_dev, "failed to send map sync\n");
1424		free(ld_sync, M_MFIBUF);
1425		sc->mfi_map_sync_cm = NULL;
1426		mfi_release_command(cmd);
1427		goto out;
1428	}
1429
1430out:
1431	if (list)
1432		free(list, M_MFIBUF);
1433	if (cm)
1434		mfi_release_command(cm);
1435}
1436
1437static void
1438mfi_sync_map_complete(struct mfi_command *cm)
1439{
1440	struct mfi_frame_header *hdr;
1441	struct mfi_softc *sc;
1442	int aborted = 0;
1443
1444	sc = cm->cm_sc;
1445	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1446
1447	hdr = &cm->cm_frame->header;
1448
1449	if (sc->mfi_map_sync_cm == NULL)
1450		return;
1451
1452	if (sc->cm_map_abort ||
1453	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1454		sc->cm_map_abort = 0;
1455		aborted = 1;
1456	}
1457
1458	free(cm->cm_data, M_MFIBUF);
1459	wakeup(&sc->mfi_map_sync_cm);
1460	sc->mfi_map_sync_cm = NULL;
1461	mfi_release_command(cm);
1462
1463	/* set it up again so the driver can catch more events */
1464	if (!aborted)
1465		mfi_queue_map_sync(sc);
1466}
1467
1468static void
1469mfi_queue_map_sync(struct mfi_softc *sc)
1470{
1471	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1472	taskqueue_enqueue(taskqueue_swi, &sc->mfi_map_sync_task);
1473}
1474
1475void
1476mfi_handle_map_sync(void *context, int pending)
1477{
1478	struct mfi_softc *sc;
1479
1480	sc = context;
1481	mtx_lock(&sc->mfi_io_lock);
1482	mfi_tbolt_sync_map_info(sc);
1483	mtx_unlock(&sc->mfi_io_lock);
1484}
1485