mfi_tbolt.c revision 235014
1 /*-
2 * Redistribution and use in source and binary forms, with or without
3 * modification, are permitted provided that the following conditions
4 * are met:
5 *
6 *            Copyright 1994-2009 The FreeBSD Project.
7 *            All rights reserved.
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 *    THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FREEBSD PROJECT OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * The views and conclusions contained in the software and documentation
28 * are those of the authors and should not be interpreted as representing
29 * official policies,either expressed or implied, of the FreeBSD Project.
30 */
31
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/mfi/mfi_tbolt.c 235014 2012-05-04 16:00:39Z ambrisko $");
35
36#include "opt_mfi.h"
37
38#include <sys/param.h>
39#include <sys/types.h>
40#include <sys/kernel.h>
41#include <sys/selinfo.h>
42#include <sys/bus.h>
43#include <sys/conf.h>
44#include <sys/bio.h>
45#include <sys/ioccom.h>
46#include <sys/eventhandler.h>
47#include <sys/callout.h>
48#include <sys/uio.h>
49#include <machine/bus.h>
50#include <sys/sysctl.h>
51#include <sys/systm.h>
52#include <sys/malloc.h>
53
54#include <dev/mfi/mfireg.h>
55#include <dev/mfi/mfi_ioctl.h>
56#include <dev/mfi/mfivar.h>
57
58struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc *sc);
59union mfi_mpi2_request_descriptor *
60mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index);
61void mfi_tbolt_complete_cmd(struct mfi_softc *sc);
62int mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
63    struct mfi_cmd_tbolt *cmd);
64static inline void mfi_tbolt_return_cmd(struct mfi_softc *sc,
65    struct mfi_cmd_tbolt *cmd);
66union mfi_mpi2_request_descriptor *mfi_tbolt_build_mpt_cmd(struct mfi_softc
67    *sc, struct mfi_command *cmd);
68uint8_t
69mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd);
70union mfi_mpi2_request_descriptor *mfi_build_and_issue_cmd(struct mfi_softc
71    *sc, struct mfi_command *mfi_cmd);
72int mfi_tbolt_is_ldio(struct mfi_command *mfi_cmd);
73void mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
74    struct mfi_cmd_tbolt *cmd);
75static int mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command
76    *mfi_cmd, pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd);
77static int mfi_tbolt_build_cdb(struct mfi_softc *sc, struct mfi_command
78    *mfi_cmd, uint8_t *cdb);
79void
80map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
81     uint8_t ext_status);
82static void mfi_issue_pending_cmds_again (struct mfi_softc *sc);
83static void mfi_kill_hba (struct mfi_softc *sc);
84static void mfi_process_fw_state_chg_isr(void *arg);
85static void mfi_sync_map_complete(struct mfi_command *);
86static void mfi_queue_map_sync(struct mfi_softc *sc);
87
88#define MFI_FUSION_ENABLE_INTERRUPT_MASK	(0x00000008)
89
90void
91mfi_tbolt_enable_intr_ppc(struct mfi_softc *sc)
92{
93	MFI_WRITE4(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
94	MFI_READ4(sc, MFI_OMSK);
95}
96
97void
98mfi_tbolt_disable_intr_ppc(struct mfi_softc *sc)
99{
100	MFI_WRITE4(sc, MFI_OMSK, 0xFFFFFFFF);
101	MFI_READ4(sc, MFI_OMSK);
102}
103
104int32_t
105mfi_tbolt_read_fw_status_ppc(struct mfi_softc *sc)
106{
107	return MFI_READ4(sc, MFI_OSP0);
108}
109
110int32_t
111mfi_tbolt_check_clear_intr_ppc(struct mfi_softc *sc)
112{
113	int32_t status, mfi_status = 0;
114
115	status = MFI_READ4(sc, MFI_OSTS);
116
117	if (status & 1) {
118		MFI_WRITE4(sc, MFI_OSTS, status);
119		MFI_READ4(sc, MFI_OSTS);
120		if (status & MFI_STATE_CHANGE_INTERRUPT) {
121			mfi_status |= MFI_FIRMWARE_STATE_CHANGE;
122		}
123
124		return mfi_status;
125	}
126	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
127		return 1;
128
129	MFI_READ4(sc, MFI_OSTS);
130	return 0;
131}
132
133
134void
135mfi_tbolt_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
136   uint32_t frame_cnt)
137{
138	bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
139	    << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
140	MFI_WRITE4(sc, MFI_IQPL, (uint32_t)bus_add);
141	MFI_WRITE4(sc, MFI_IQPH, (uint32_t)((uint64_t)bus_add >> 32));
142}
143
144/**
145 * mfi_tbolt_adp_reset - For controller reset
146 * @regs: MFI register set
147 */
148int mfi_tbolt_adp_reset(struct mfi_softc *sc)
149{
150	int retry = 0, i = 0;
151	int HostDiag;
152
153	MFI_WRITE4(sc, MFI_WSR, 0xF);
154	MFI_WRITE4(sc, MFI_WSR, 4);
155	MFI_WRITE4(sc, MFI_WSR, 0xB);
156	MFI_WRITE4(sc, MFI_WSR, 2);
157	MFI_WRITE4(sc, MFI_WSR, 7);
158	MFI_WRITE4(sc, MFI_WSR, 0xD);
159
160	for (i = 0; i < 10000; i++) ;
161
162	HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
163
164	while (!( HostDiag & DIAG_WRITE_ENABLE)) {
165		for (i = 0; i < 1000; i++);
166		HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
167		device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%x, "
168		    "hostdiag=%x\n", retry, HostDiag);
169
170		if (retry++ >= 100)
171			return 1;
172	}
173
174	device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: HostDiag=%x\n", HostDiag);
175
176	MFI_WRITE4(sc, MFI_HDR, (HostDiag | DIAG_RESET_ADAPTER));
177
178	for (i=0; i < 10; i++) {
179		for (i = 0; i < 10000; i++);
180	}
181
182	HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
183	while (HostDiag & DIAG_RESET_ADAPTER) {
184		for (i = 0; i < 1000; i++) ;
185		HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
186		device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%x, "
187		    "hostdiag=%x\n", retry, HostDiag);
188
189		if (retry++ >= 1000)
190			return 1;
191	}
192	return 0;
193}
194
195/*
196 *******************************************************************************************
197 * Description:
198 *      This routine initialize Thunderbolt specific device information
199 *******************************************************************************************
200 */
201void mfi_tbolt_init_globals(struct mfi_softc *sc)
202{
203	/* Initialize single reply size and Message size */
204	sc->reply_size = MEGASAS_THUNDERBOLT_REPLY_SIZE;
205	sc->raid_io_msg_size = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
206
207	/*
208	 * Calculating how many SGEs allowed in a allocated main message
209	 * (size of the Message - Raid SCSI IO message size(except SGE))
210	 * / size of SGE
211	 * (0x100 - (0x90 - 0x10)) / 0x10 = 8
212	 */
213	sc->max_SGEs_in_main_message =
214	    (uint8_t)((sc->raid_io_msg_size
215	    - (sizeof(struct mfi_mpi2_request_raid_scsi_io)
216	    - sizeof(MPI2_SGE_IO_UNION))) / sizeof(MPI2_SGE_IO_UNION));
217	/*
218	 * (Command frame size allocaed in SRB ext - Raid SCSI IO message size)
219	 * / size of SGL ;
220	 * (1280 - 256) / 16 = 64
221	 */
222	sc->max_SGEs_in_chain_message = (MR_COMMAND_SIZE
223	    - sc->raid_io_msg_size) / sizeof(MPI2_SGE_IO_UNION);
224	/*
225	 * (0x08-1) + 0x40 = 0x47 - 0x01 = 0x46  one is left for command
226	 * colscing
227	*/
228	sc->mfi_max_sge = (sc->max_SGEs_in_main_message - 1)
229	    + sc->max_SGEs_in_chain_message - 1;
230	/*
231	* This is the offset in number of 4 * 32bit words to the next chain
232	* (0x100 - 0x10)/0x10 = 0xF(15)
233	*/
234	sc->chain_offset_value_for_main_message = (sc->raid_io_msg_size
235	    - sizeof(MPI2_SGE_IO_UNION))/16;
236	sc->chain_offset_value_for_mpt_ptmsg
237	    = offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL)/16;
238	sc->mfi_cmd_pool_tbolt = NULL;
239	sc->request_desc_pool = NULL;
240}
241
242/*
243 ****************************************************************************
244 * Description:
245 *      This function calculates the memory requirement for Thunderbolt
246 *      controller
247 * Return Value:
248 *      Total required memory in bytes
249 ****************************************************************************
250 */
251
252uint32_t mfi_tbolt_get_memory_requirement(struct mfi_softc *sc)
253{
254	uint32_t size;
255	size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT;	/* for Alignment */
256	size += sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1);
257	size += sc->reply_size * sc->mfi_max_fw_cmds;
258	/* this is for SGL's */
259	size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->mfi_max_fw_cmds;
260	return size;
261}
262
263/*
264 ****************************************************************************
265 * Description:
266 *      This function will prepare message pools for the Thunderbolt controller
267 * Arguments:
268 *      DevExt - HBA miniport driver's adapter data storage structure
269 *      pMemLocation - start of the memory allocated for Thunderbolt.
270 * Return Value:
271 *      TRUE if successful
272 *      FALSE if failed
273 ****************************************************************************
274 */
275int mfi_tbolt_init_desc_pool(struct mfi_softc *sc, uint8_t* mem_location,
276    uint32_t tbolt_contg_length)
277{
278	uint32_t     offset = 0;
279	uint8_t      *addr = mem_location;
280
281	/* Request Descriptor Base physical Address */
282
283	/* For Request Decriptors Virtual Memory */
284	/* Initialise the aligned IO Frames Virtual Memory Pointer */
285	if (((uintptr_t)addr) & (0xFF)) {
286		addr = &addr[sc->raid_io_msg_size];
287		addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
288		sc->request_message_pool_align = addr;
289	} else
290		sc->request_message_pool_align = addr;
291
292	offset = sc->request_message_pool_align - sc->request_message_pool;
293	sc->request_msg_busaddr = sc->mfi_tb_busaddr + offset;
294
295	/* DJA XXX should this be bus dma ??? */
296	/* Skip request message pool */
297	addr = &addr[sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1)];
298	/* Reply Frame Pool is initialized */
299	sc->reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
300	if (((uintptr_t)addr) & (0xFF)) {
301		addr = &addr[sc->reply_size];
302		addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
303	}
304	sc->reply_frame_pool_align
305		    = (struct mfi_mpi2_reply_header *)addr;
306
307	offset = (uintptr_t)sc->reply_frame_pool_align
308	    - (uintptr_t)sc->request_message_pool;
309	sc->reply_frame_busaddr = sc->mfi_tb_busaddr + offset;
310
311	/* Skip Reply Frame Pool */
312	addr += sc->reply_size * sc->mfi_max_fw_cmds;
313	sc->reply_pool_limit = addr;
314
315	/* initializing reply address to 0xFFFFFFFF */
316	memset((uint8_t *)sc->reply_frame_pool, 0xFF,
317	       (sc->reply_size * sc->mfi_max_fw_cmds));
318
319	offset = sc->reply_size * sc->mfi_max_fw_cmds;
320	sc->sg_frame_busaddr = sc->reply_frame_busaddr + offset;
321	/* initialize the last_reply_idx to 0 */
322	sc->last_reply_idx = 0;
323	offset = (sc->sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
324	    sc->mfi_max_fw_cmds)) - sc->mfi_tb_busaddr;
325	if (offset > tbolt_contg_length)
326		device_printf(sc->mfi_dev, "Error:Initialized more than "
327		    "allocated\n");
328	return 0;
329}
330
331/*
332 ****************************************************************************
333 * Description:
334 *   This routine prepare and issue INIT2 frame to the Firmware
335 ****************************************************************************
336 */
337
338int
339mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
340{
341	struct MPI2_IOC_INIT_REQUEST   *mpi2IocInit;
342	struct mfi_init_frame	*mfi_init;
343	uintptr_t			offset = 0;
344	bus_addr_t			phyAddress;
345	MFI_ADDRESS			*mfiAddressTemp;
346	struct mfi_command *cm;
347	int error;
348
349	mpi2IocInit = (struct MPI2_IOC_INIT_REQUEST *)sc->mfi_tb_ioc_init_desc;
350	/* Check if initialization is already completed */
351	if (sc->MFA_enabled) {
352		return 1;
353	}
354
355	mtx_lock(&sc->mfi_io_lock);
356	if ((cm = mfi_dequeue_free(sc)) == NULL) {
357		mtx_unlock(&sc->mfi_io_lock);
358		return (EBUSY);
359	}
360	cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_tb_init);
361	cm->cm_frame_busaddr = sc->mfi_tb_init_busaddr;
362	cm->cm_dmamap = sc->mfi_tb_init_dmamap;
363	cm->cm_frame->header.context = 0;
364	cm->cm_sc = sc;
365	cm->cm_index = 0;
366
367	/*
368	 * Abuse the SG list area of the frame to hold the init_qinfo
369	 * object;
370	 */
371	mfi_init = &cm->cm_frame->init;
372
373	bzero(mpi2IocInit, sizeof(struct MPI2_IOC_INIT_REQUEST));
374	mpi2IocInit->Function  = MPI2_FUNCTION_IOC_INIT;
375	mpi2IocInit->WhoInit   = MPI2_WHOINIT_HOST_DRIVER;
376
377	/* set MsgVersion and HeaderVersion host driver was built with */
378	mpi2IocInit->MsgVersion = MPI2_VERSION;
379	mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
380	mpi2IocInit->SystemRequestFrameSize = sc->raid_io_msg_size/4;
381	mpi2IocInit->ReplyDescriptorPostQueueDepth
382	    = (uint16_t)sc->mfi_max_fw_cmds;
383	mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
384
385	/* Get physical address of reply frame pool */
386	offset = (uintptr_t) sc->reply_frame_pool_align
387	    - (uintptr_t)sc->request_message_pool;
388	phyAddress = sc->mfi_tb_busaddr + offset;
389	mfiAddressTemp =
390	    (MFI_ADDRESS *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
391	mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
392	mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
393
394	/* Get physical address of request message pool */
395	offset = sc->request_message_pool_align - sc->request_message_pool;
396	phyAddress =  sc->mfi_tb_busaddr + offset;
397	mfiAddressTemp = (MFI_ADDRESS *)&mpi2IocInit->SystemRequestFrameBaseAddress;
398	mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
399	mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
400	mpi2IocInit->ReplyFreeQueueAddress =  0; /* Not supported by MR. */
401	mpi2IocInit->TimeStamp = time_uptime;
402
403	if (sc->verbuf) {
404		snprintf((char *)sc->verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
405                MEGASAS_VERSION);
406		mfi_init->driver_ver_lo = (uint32_t)sc->verbuf_h_busaddr;
407		mfi_init->driver_ver_hi =
408		    (uint32_t)((uint64_t)sc->verbuf_h_busaddr >> 32);
409	}
410	/* Get the physical address of the mpi2 ioc init command */
411	phyAddress =  sc->mfi_tb_ioc_init_busaddr;
412	mfi_init->qinfo_new_addr_lo = (uint32_t)phyAddress;
413	mfi_init->qinfo_new_addr_hi = (uint32_t)((uint64_t)phyAddress >> 32);
414	mfi_init->header.flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
415
416	mfi_init->header.cmd = MFI_CMD_INIT;
417	mfi_init->header.data_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
418	mfi_init->header.cmd_status = MFI_STAT_INVALID_STATUS;
419
420	cm->cm_data = NULL;
421	cm->cm_flags |= MFI_CMD_POLLED;
422	cm->cm_timestamp = time_uptime;
423	if ((error = mfi_mapcmd(sc, cm)) != 0) {
424		device_printf(sc->mfi_dev, "failed to send IOC init2 "
425		    "command %d at %lx\n", error, (long)cm->cm_frame_busaddr);
426		mfi_release_command(cm);
427		mtx_unlock(&sc->mfi_io_lock);
428		return (error);
429	}
430	mfi_release_command(cm);
431	mtx_unlock(&sc->mfi_io_lock);
432
433	if (mfi_init->header.cmd_status == 0) {
434		sc->MFA_enabled = 1;
435	}
436	else {
437		device_printf(sc->mfi_dev, "Init command Failed %x\n",
438		    mfi_init->header.cmd_status);
439		return 1;
440	}
441
442	return 0;
443
444}
445
446int mfi_tbolt_alloc_cmd(struct mfi_softc *sc)
447{
448	struct mfi_cmd_tbolt *cmd;
449	bus_addr_t io_req_base_phys;
450	uint8_t *io_req_base;
451	int i = 0, j = 0, offset = 0;
452
453	/*
454	 * sc->mfi_cmd_pool_tbolt is an array of struct mfi_cmd_tbolt pointers.
455	 * Allocate the dynamic array first and then allocate individual
456	 * commands.
457	 */
458	sc->request_desc_pool = malloc(sizeof(
459	    union mfi_mpi2_request_descriptor) * sc->mfi_max_fw_cmds,
460	    M_MFIBUF, M_NOWAIT|M_ZERO);
461	sc->mfi_cmd_pool_tbolt = malloc(sizeof(struct mfi_cmd_tbolt*)
462	    * sc->mfi_max_fw_cmds, M_MFIBUF, M_NOWAIT|M_ZERO);
463
464	if (!sc->mfi_cmd_pool_tbolt) {
465		device_printf(sc->mfi_dev, "out of memory. Could not alloc "
466		    "memory for cmd_list_fusion\n");
467		return 1;
468	}
469
470	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
471		sc->mfi_cmd_pool_tbolt[i] = malloc(sizeof(
472		    struct mfi_cmd_tbolt),M_MFIBUF, M_NOWAIT|M_ZERO);
473
474		if (!sc->mfi_cmd_pool_tbolt[i]) {
475			device_printf(sc->mfi_dev, "Could not alloc cmd list "
476			    "fusion\n");
477
478			for (j = 0; j < i; j++)
479				free(sc->mfi_cmd_pool_tbolt[j], M_MFIBUF);
480
481			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
482			sc->mfi_cmd_pool_tbolt = NULL;
483		}
484	}
485
486	/*
487	 * The first 256 bytes (SMID 0) is not used. Don't add to the cmd
488	 *list
489	 */
490	io_req_base = sc->request_message_pool_align
491		+ MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
492	io_req_base_phys = sc->request_msg_busaddr
493		+ MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
494
495	/*
496	 * Add all the commands to command pool (instance->cmd_pool)
497	 */
498	/* SMID 0 is reserved. Set SMID/index from 1 */
499
500	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
501		cmd = sc->mfi_cmd_pool_tbolt[i];
502		offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
503		cmd->index = i + 1;
504		cmd->request_desc = (union mfi_mpi2_request_descriptor *)
505		    (sc->request_desc_pool + i);
506		cmd->io_request = (struct mfi_mpi2_request_raid_scsi_io *)
507		    (io_req_base + offset);
508		cmd->io_request_phys_addr = io_req_base_phys + offset;
509		cmd->sg_frame = (MPI2_SGE_IO_UNION *)(sc->reply_pool_limit
510		    + i * MEGASAS_MAX_SZ_CHAIN_FRAME);
511		cmd->sg_frame_phys_addr = sc->sg_frame_busaddr + i
512		    * MEGASAS_MAX_SZ_CHAIN_FRAME;
513
514		TAILQ_INSERT_TAIL(&(sc->mfi_cmd_tbolt_tqh), cmd, next);
515	}
516	return 0;
517}
518
519int mfi_tbolt_reset(struct mfi_softc *sc)
520{
521	uint32_t fw_state;
522
523	mtx_lock(&sc->mfi_io_lock);
524	if (sc->hw_crit_error) {
525		device_printf(sc->mfi_dev, "HW CRITICAL ERROR\n");
526		mtx_unlock(&sc->mfi_io_lock);
527		return 1;
528	}
529
530	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
531		fw_state = sc->mfi_read_fw_status(sc);
532		if ((fw_state & MFI_FWSTATE_FAULT) == MFI_FWSTATE_FAULT) {
533			if ((sc->disableOnlineCtrlReset == 0)
534			    && (sc->adpreset == 0)) {
535				device_printf(sc->mfi_dev, "Adapter RESET "
536				    "condition is detected\n");
537				sc->adpreset = 1;
538				sc->issuepend_done = 0;
539				sc->MFA_enabled = 0;
540				sc->last_reply_idx = 0;
541				mfi_process_fw_state_chg_isr((void *) sc);
542			}
543			mtx_unlock(&sc->mfi_io_lock);
544			return 0;
545		}
546	}
547	mtx_unlock(&sc->mfi_io_lock);
548	return 1;
549}
550
551/*
552 * mfi_intr_tbolt - isr entry point
553 */
554void mfi_intr_tbolt(void *arg)
555{
556	struct mfi_softc *sc = (struct mfi_softc *)arg;
557
558	if (sc->mfi_check_clear_intr(sc) == 1) {
559		return;
560	}
561	if (sc->mfi_detaching)
562		return;
563	mtx_lock(&sc->mfi_io_lock);
564	mfi_tbolt_complete_cmd(sc);
565	if (sc->mfi_flags & MFI_FLAGS_QFRZN)
566		sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
567	mfi_startio(sc);
568	mtx_unlock(&sc->mfi_io_lock);
569	return;
570}
571
572/**
573 * map_cmd_status -	Maps FW cmd status to OS cmd status
574 * @cmd :		Pointer to cmd
575 * @status :		status of cmd returned by FW
576 * @ext_status :	ext status of cmd returned by FW
577 */
578
579void
580map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
581    uint8_t ext_status)
582{
583
584	switch (status) {
585
586		case MFI_STAT_OK:
587			mfi_cmd->cm_frame->header.cmd_status = 0;
588			mfi_cmd->cm_frame->dcmd.header.cmd_status = 0;
589			break;
590
591		case MFI_STAT_SCSI_IO_FAILED:
592		case MFI_STAT_LD_INIT_IN_PROGRESS:
593			mfi_cmd->cm_frame->header.cmd_status = status;
594			mfi_cmd->cm_frame->header.scsi_status = ext_status;
595			mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
596			mfi_cmd->cm_frame->dcmd.header.scsi_status
597			    = ext_status;
598			break;
599
600		case MFI_STAT_SCSI_DONE_WITH_ERROR:
601			mfi_cmd->cm_frame->header.cmd_status = ext_status;
602			mfi_cmd->cm_frame->dcmd.header.cmd_status = ext_status;
603			break;
604
605		case MFI_STAT_LD_OFFLINE:
606		case MFI_STAT_DEVICE_NOT_FOUND:
607			mfi_cmd->cm_frame->header.cmd_status = status;
608			mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
609			break;
610
611		default:
612			mfi_cmd->cm_frame->header.cmd_status = status;
613			mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
614			break;
615		}
616}
617
618/**
619 * mfi_tbolt_return_cmd -	Return a cmd to free command pool
620 * @instance:		Adapter soft state
621 * @cmd:		Command packet to be returned to free command pool
622 */
623static inline void
624mfi_tbolt_return_cmd(struct mfi_softc *sc, struct mfi_cmd_tbolt *cmd)
625{
626	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
627
628	TAILQ_INSERT_TAIL(&sc->mfi_cmd_tbolt_tqh, cmd, next);
629}
630
631void
632mfi_tbolt_complete_cmd(struct mfi_softc *sc)
633{
634	struct mfi_mpi2_reply_header *desc, *reply_desc;
635	struct mfi_command *cmd_mfi, *cmd_mfi_check;	/* For MFA Cmds */
636	struct mfi_cmd_tbolt *cmd_tbolt;
637	uint16_t smid;
638	uint8_t reply_descript_type;
639	struct mfi_mpi2_request_raid_scsi_io  *scsi_io_req;
640	uint32_t status, extStatus;
641	uint16_t num_completed;
642	union desc_value val;
643
644	desc = (struct mfi_mpi2_reply_header *)
645		((uintptr_t)sc->reply_frame_pool_align
646		+ sc->last_reply_idx * sc->reply_size);
647	reply_desc = desc;
648
649	if (!reply_desc)
650		device_printf(sc->mfi_dev, "reply desc is NULL!!\n");
651
652	reply_descript_type = reply_desc->ReplyFlags
653	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
654	if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
655		return;
656
657	num_completed = 0;
658	val.word = ((union mfi_mpi2_reply_descriptor *)desc)->words;
659
660	/* Read Reply descriptor */
661	while ((val.u.low != 0xFFFFFFFF) && (val.u.high != 0xFFFFFFFF)) {
662		smid = reply_desc->SMID;
663		if (!smid || smid > sc->mfi_max_fw_cmds + 1) {
664			device_printf(sc->mfi_dev, "smid is %x. Cannot "
665			    "proceed. Returning \n", smid);
666			return;
667		}
668
669		cmd_tbolt = sc->mfi_cmd_pool_tbolt[smid - 1];
670		cmd_mfi = &sc->mfi_commands[cmd_tbolt->sync_cmd_idx];
671		scsi_io_req = cmd_tbolt->io_request;
672
673		status = cmd_mfi->cm_frame->dcmd.header.cmd_status;
674		extStatus = cmd_mfi->cm_frame->dcmd.header.scsi_status;
675		map_tbolt_cmd_status(cmd_mfi, status, extStatus);
676
677		/* remove command from busy queue if not polled */
678		TAILQ_FOREACH(cmd_mfi_check, &sc->mfi_busy, cm_link) {
679			if (cmd_mfi_check == cmd_mfi) {
680				mfi_remove_busy(cmd_mfi);
681				break;
682			}
683		}
684		cmd_mfi->cm_error = 0;
685		mfi_complete(sc, cmd_mfi);
686		mfi_tbolt_return_cmd(sc, cmd_tbolt);
687
688		sc->last_reply_idx++;
689		if (sc->last_reply_idx >= sc->mfi_max_fw_cmds) {
690			MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
691			sc->last_reply_idx = 0;
692		}
693		/*set it back to all 0xfff.*/
694		((union mfi_mpi2_reply_descriptor*)desc)->words =
695			~((uint64_t)0x00);
696
697		num_completed++;
698
699		/* Get the next reply descriptor */
700		desc = (struct mfi_mpi2_reply_header *)
701		    ((uintptr_t)sc->reply_frame_pool_align
702		    + sc->last_reply_idx * sc->reply_size);
703		reply_desc = desc;
704		val.word = ((union mfi_mpi2_reply_descriptor*)desc)->words;
705		reply_descript_type = reply_desc->ReplyFlags
706		    & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
707		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
708			break;
709	}
710
711	if (!num_completed)
712		return;
713
714	/* update replyIndex to FW */
715	if (sc->last_reply_idx)
716		MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
717
718	return;
719}
720
721/**
722 * mfi_get_cmd -	Get a command from the free pool
723 * @instance:		Adapter soft state
724 *
725 * Returns a free command from the pool
726 */
727
728struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc
729						  *sc)
730{
731	struct mfi_cmd_tbolt *cmd = NULL;
732
733	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
734
735	cmd = TAILQ_FIRST(&sc->mfi_cmd_tbolt_tqh);
736	TAILQ_REMOVE(&sc->mfi_cmd_tbolt_tqh, cmd, next);
737	memset((uint8_t *)cmd->sg_frame, 0, MEGASAS_MAX_SZ_CHAIN_FRAME);
738	memset((uint8_t *)cmd->io_request, 0,
739	    MEGASAS_THUNDERBOLT_NEW_MSG_SIZE);
740	return cmd;
741}
742
743union mfi_mpi2_request_descriptor *
744mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index)
745{
746	uint8_t *p;
747
748	if (index >= sc->mfi_max_fw_cmds) {
749		device_printf(sc->mfi_dev, "Invalid SMID (0x%x)request "
750		    "for descriptor\n", index);
751		return NULL;
752	}
753	p = sc->request_desc_pool + sizeof(union mfi_mpi2_request_descriptor)
754	    * index;
755	memset(p, 0, sizeof(union mfi_mpi2_request_descriptor));
756	return (union mfi_mpi2_request_descriptor *)p;
757}
758
759
760/* Used to build IOCTL cmd */
761uint8_t
762mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
763{
764	MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
765	struct mfi_mpi2_request_raid_scsi_io *io_req;
766	struct mfi_cmd_tbolt *cmd;
767
768	cmd = mfi_tbolt_get_cmd(sc);
769	if (!cmd)
770		return EBUSY;
771	mfi_cmd->cm_extra_frames = cmd->index; /* Frame count used as SMID */
772	cmd->sync_cmd_idx = mfi_cmd->cm_index;
773	io_req = cmd->io_request;
774	mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
775
776	io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
777	io_req->SGLOffset0 = offsetof(struct mfi_mpi2_request_raid_scsi_io,
778	    SGL) / 4;
779	io_req->ChainOffset = sc->chain_offset_value_for_mpt_ptmsg;
780
781	mpi25_ieee_chain->Address = mfi_cmd->cm_frame_busaddr;
782
783	/*
784	  In MFI pass thru, nextChainOffset will always be zero to
785	  indicate the end of the chain.
786	*/
787	mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
788		| MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
789
790	/* setting the length to the maximum length */
791	mpi25_ieee_chain->Length = 1024;
792
793	return 0;
794}
795
796void
797mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
798    struct mfi_cmd_tbolt *cmd)
799{
800	uint32_t start_lba_lo = 0, start_lba_hi = 0, device_id;
801	struct mfi_mpi2_request_raid_scsi_io	*io_request;
802	struct IO_REQUEST_INFO io_info;
803
804	device_id = mfi_cmd->cm_frame->io.header.target_id;
805	io_request = cmd->io_request;
806	io_request->RaidContext.TargetID = device_id;
807	io_request->RaidContext.Status = 0;
808	io_request->RaidContext.exStatus =0;
809
810	start_lba_lo = mfi_cmd->cm_frame->io.lba_lo;
811	start_lba_hi = mfi_cmd->cm_frame->io.lba_hi;
812
813	memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
814	io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
815	io_info.numBlocks = mfi_cmd->cm_frame->io.header.data_len;
816	io_info.ldTgtId = device_id;
817	if ((mfi_cmd->cm_frame->header.flags & MFI_FRAME_DIR_READ) ==
818	    MFI_FRAME_DIR_READ)
819		io_info.isRead = 1;
820
821		io_request->RaidContext.timeoutValue
822		     = MFI_FUSION_FP_DEFAULT_TIMEOUT;
823		io_request->Function = MPI2_FUNCTION_LD_IO_REQUEST;
824		io_request->DevHandle = device_id;
825		cmd->request_desc->header.RequestFlags
826		    = (MFI_REQ_DESCRIPT_FLAGS_LD_IO
827		    << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
828	if ((io_request->IoFlags == 6) && (io_info.numBlocks == 0))
829		io_request->RaidContext.RegLockLength = 0x100;
830	io_request->DataLength = mfi_cmd->cm_frame->io.header.data_len
831	    * MFI_SECTOR_LEN;
832}
833
834int mfi_tbolt_is_ldio(struct mfi_command *mfi_cmd)
835{
836	if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_READ
837	    || mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
838		return 1;
839	else
840		return 0;
841}
842
843int
844mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd, struct mfi_cmd_tbolt *cmd)
845{
846	uint32_t device_id;
847	uint32_t sge_count;
848	uint8_t cdb[32], cdb_len;
849
850	memset(cdb, 0, 32);
851	struct mfi_mpi2_request_raid_scsi_io *io_request = cmd->io_request;
852
853	device_id = mfi_cmd->cm_frame->header.target_id;
854
855	/* Have to build CDB here for TB as BSD don't have a scsi layer */
856	if ((cdb_len = mfi_tbolt_build_cdb(sc, mfi_cmd, cdb)) == 1)
857		return 1;
858
859	/* Just the CDB length,rest of the Flags are zero */
860	io_request->IoFlags = cdb_len;
861	memcpy(io_request->CDB.CDB32, cdb, 32);
862
863	if (mfi_tbolt_is_ldio(mfi_cmd))
864		mfi_tbolt_build_ldio(sc, mfi_cmd , cmd);
865	else
866		return 1;
867
868	/*
869	 * Construct SGL
870	 */
871	sge_count = mfi_tbolt_make_sgl(sc, mfi_cmd,
872	    (pMpi25IeeeSgeChain64_t) &io_request->SGL, cmd);
873	if (sge_count > sc->mfi_max_sge) {
874		device_printf(sc->mfi_dev, "Error. sge_count (0x%x) exceeds "
875		    "max (0x%x) allowed\n", sge_count, sc->mfi_max_sge);
876		return 1;
877	}
878	io_request->RaidContext.numSGE = sge_count;
879	io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
880
881	if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
882		io_request->Control = MPI2_SCSIIO_CONTROL_WRITE;
883	else
884		io_request->Control = MPI2_SCSIIO_CONTROL_READ;
885
886	io_request->SGLOffset0 = offsetof(
887	    struct mfi_mpi2_request_raid_scsi_io, SGL)/4;
888
889	io_request->SenseBufferLowAddress = mfi_cmd->cm_sense_busaddr;
890	io_request->SenseBufferLength = MFI_SENSE_LEN;
891	return 0;
892}
893
894static int
895mfi_tbolt_build_cdb(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
896    uint8_t *cdb)
897{
898	uint32_t lba_lo, lba_hi, num_lba;
899	uint8_t cdb_len;
900
901	if (mfi_cmd == NULL || cdb == NULL)
902		return 1;
903	num_lba = mfi_cmd->cm_frame->io.header.data_len;
904	lba_lo = mfi_cmd->cm_frame->io.lba_lo;
905	lba_hi = mfi_cmd->cm_frame->io.lba_hi;
906
907	if (lba_hi == 0 && (num_lba <= 0xFF) && (lba_lo <= 0x1FFFFF)) {
908		if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
909			/* Read 6 or Write 6 */
910			cdb[0] = (uint8_t) (0x0A);
911		else
912			cdb[0] = (uint8_t) (0x08);
913
914		cdb[4] = (uint8_t) num_lba;
915		cdb[3] = (uint8_t) (lba_lo & 0xFF);
916		cdb[2] = (uint8_t) (lba_lo >> 8);
917		cdb[1] = (uint8_t) ((lba_lo >> 16) & 0x1F);
918		cdb_len = 6;
919	}
920	else if (lba_hi == 0 && (num_lba <= 0xFFFF) && (lba_lo <= 0xFFFFFFFF)) {
921		if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
922			/* Read 10 or Write 10 */
923			cdb[0] = (uint8_t) (0x2A);
924		else
925			cdb[0] = (uint8_t) (0x28);
926		cdb[8] = (uint8_t) (num_lba & 0xFF);
927		cdb[7] = (uint8_t) (num_lba >> 8);
928		cdb[5] = (uint8_t) (lba_lo & 0xFF);
929		cdb[4] = (uint8_t) (lba_lo >> 8);
930		cdb[3] = (uint8_t) (lba_lo >> 16);
931		cdb[2] = (uint8_t) (lba_lo >> 24);
932		cdb_len = 10;
933	}
934	else if ((num_lba > 0xFFFF) && (lba_hi == 0)) {
935		if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
936			/* Read 12 or Write 12 */
937			cdb[0] = (uint8_t) (0xAA);
938		else
939			cdb[0] = (uint8_t) (0xA8);
940		cdb[9] = (uint8_t) (num_lba & 0xFF);
941		cdb[8] = (uint8_t) (num_lba >> 8);
942		cdb[7] = (uint8_t) (num_lba >> 16);
943		cdb[6] = (uint8_t) (num_lba >> 24);
944		cdb[5] = (uint8_t) (lba_lo & 0xFF);
945		cdb[4] = (uint8_t) (lba_lo >> 8);
946		cdb[3] = (uint8_t) (lba_lo >> 16);
947		cdb[2] = (uint8_t) (lba_lo >> 24);
948		cdb_len = 12;
949	} else {
950		if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
951			cdb[0] = (uint8_t) (0x8A);
952		else
953			cdb[0] = (uint8_t) (0x88);
954		cdb[13] = (uint8_t) (num_lba & 0xFF);
955		cdb[12] = (uint8_t) (num_lba >> 8);
956		cdb[11] = (uint8_t) (num_lba >> 16);
957		cdb[10] = (uint8_t) (num_lba >> 24);
958		cdb[9] = (uint8_t) (lba_lo & 0xFF);
959		cdb[8] = (uint8_t) (lba_lo >> 8);
960		cdb[7] = (uint8_t) (lba_lo >> 16);
961		cdb[6] = (uint8_t) (lba_lo >> 24);
962		cdb[5] = (uint8_t) (lba_hi & 0xFF);
963		cdb[4] = (uint8_t) (lba_hi >> 8);
964		cdb[3] = (uint8_t) (lba_hi >> 16);
965		cdb[2] = (uint8_t) (lba_hi >> 24);
966		cdb_len = 16;
967	}
968	return cdb_len;
969}
970
971static int
972mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
973		   pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd)
974{
975	uint8_t i, sg_processed, sg_to_process;
976	uint8_t sge_count, sge_idx;
977	union mfi_sgl *os_sgl;
978
979	/*
980	 * Return 0 if there is no data transfer
981	 */
982	if (!mfi_cmd->cm_sg || !mfi_cmd->cm_len) {
983	 	device_printf(sc->mfi_dev, "Buffer empty \n");
984		return 0;
985	}
986	os_sgl = mfi_cmd->cm_sg;
987	sge_count = mfi_cmd->cm_frame->header.sg_count;
988
989	if (sge_count > sc->mfi_max_sge) {
990		device_printf(sc->mfi_dev, "sgl ptr %p sg_cnt %d \n",
991		    os_sgl, sge_count);
992		return sge_count;
993	}
994
995	if (sge_count > sc->max_SGEs_in_main_message)
996		/* One element to store the chain info */
997		sge_idx = sc->max_SGEs_in_main_message - 1;
998	else
999		sge_idx = sge_count;
1000
1001	for (i = 0; i < sge_idx; i++) {
1002		/*
1003		 * For 32bit BSD we are getting 32 bit SGL's from OS
1004		 * but FW only take 64 bit SGL's so copying from 32 bit
1005		 * SGL's to 64.
1006		 */
1007		if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
1008			sgl_ptr->Length = os_sgl->sg_skinny[i].len;
1009			sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
1010		} else {
1011			sgl_ptr->Length = os_sgl->sg32[i].len;
1012			sgl_ptr->Address = os_sgl->sg32[i].addr;
1013		}
1014		sgl_ptr->Flags = 0;
1015		sgl_ptr++;
1016		cmd->io_request->ChainOffset = 0;
1017	}
1018
1019	sg_processed = i;
1020
1021	if (sg_processed < sge_count) {
1022		pMpi25IeeeSgeChain64_t sg_chain;
1023		sg_to_process = sge_count - sg_processed;
1024		cmd->io_request->ChainOffset =
1025		    sc->chain_offset_value_for_main_message;
1026		sg_chain = sgl_ptr;
1027		/* Prepare chain element */
1028		sg_chain->NextChainOffset = 0;
1029		sg_chain->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1030		    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1031		sg_chain->Length =  (sizeof(MPI2_SGE_IO_UNION) *
1032		    (sge_count - sg_processed));
1033		sg_chain->Address = cmd->sg_frame_phys_addr;
1034		sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->sg_frame;
1035		for (; i < sge_count; i++) {
1036			if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
1037				sgl_ptr->Length = os_sgl->sg_skinny[i].len;
1038				sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
1039			} else {
1040				sgl_ptr->Length = os_sgl->sg32[i].len;
1041				sgl_ptr->Address = os_sgl->sg32[i].addr;
1042			}
1043			sgl_ptr->Flags = 0;
1044			sgl_ptr++;
1045		}
1046	}
1047	return sge_count;
1048}
1049
1050union mfi_mpi2_request_descriptor *
1051mfi_build_and_issue_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
1052{
1053	struct mfi_cmd_tbolt *cmd;
1054	union mfi_mpi2_request_descriptor *req_desc = NULL;
1055	uint16_t index;
1056	cmd = mfi_tbolt_get_cmd(sc);
1057	if (!cmd)
1058		return NULL;
1059	mfi_cmd->cm_extra_frames = cmd->index;
1060	cmd->sync_cmd_idx = mfi_cmd->cm_index;
1061
1062	index = cmd->index;
1063	req_desc = mfi_tbolt_get_request_descriptor(sc, index-1);
1064	if (mfi_tbolt_build_io(sc, mfi_cmd, cmd))
1065		return NULL;
1066	req_desc->header.SMID = index;
1067	return req_desc;
1068}
1069
1070union mfi_mpi2_request_descriptor *
1071mfi_tbolt_build_mpt_cmd(struct mfi_softc *sc, struct mfi_command *cmd)
1072{
1073	union mfi_mpi2_request_descriptor *req_desc = NULL;
1074	uint16_t index;
1075	if (mfi_build_mpt_pass_thru(sc, cmd)) {
1076		device_printf(sc->mfi_dev, "Couldn't build MFI pass thru "
1077		    "cmd\n");
1078		return NULL;
1079	}
1080	/* For fusion the frame_count variable is used for SMID */
1081	index = cmd->cm_extra_frames;
1082
1083	req_desc = mfi_tbolt_get_request_descriptor(sc, index - 1);
1084	if (!req_desc)
1085		return NULL;
1086
1087	bzero(req_desc, sizeof(req_desc));
1088	req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1089	    MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1090	req_desc->header.SMID = index;
1091	return req_desc;
1092}
1093
1094int
1095mfi_tbolt_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1096{
1097	struct mfi_frame_header *hdr;
1098	uint8_t *cdb;
1099	union mfi_mpi2_request_descriptor *req_desc = NULL;
1100	int tm = MFI_POLL_TIMEOUT_SECS * 1000;
1101
1102	hdr = &cm->cm_frame->header;
1103	cdb = cm->cm_frame->pass.cdb;
1104	if (sc->adpreset)
1105		return 1;
1106	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1107		cm->cm_timestamp = time_uptime;
1108		mfi_enqueue_busy(cm);
1109	}
1110	else {	/* still get interrupts for it */
1111		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1112		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1113	}
1114
1115	if (hdr->cmd == MFI_CMD_PD_SCSI_IO) {
1116		/* check for inquiry commands coming from CLI */
1117		if (cdb[0] != 0x28 || cdb[0] != 0x2A) {
1118			if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) ==
1119			    NULL) {
1120				device_printf(sc->mfi_dev, "Mapping from MFI "
1121				    "to MPT Failed \n");
1122				return 1;
1123			}
1124		}
1125		else
1126			device_printf(sc->mfi_dev, "DJA NA XXX SYSPDIO\n");
1127	}
1128	else if (hdr->cmd == MFI_CMD_LD_SCSI_IO ||
1129	    hdr->cmd == MFI_CMD_LD_READ || hdr->cmd == MFI_CMD_LD_WRITE) {
1130		if ((req_desc = mfi_build_and_issue_cmd(sc, cm)) == NULL) {
1131			device_printf(sc->mfi_dev, "LDIO Failed \n");
1132			return 1;
1133		}
1134	} else
1135		if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == NULL) {
1136			device_printf(sc->mfi_dev, "Mapping from MFI to MPT "
1137			    "Failed\n");
1138			return 1;
1139		}
1140	MFI_WRITE4(sc, MFI_ILQP, (req_desc->words & 0xFFFFFFFF));
1141	MFI_WRITE4(sc, MFI_IHQP, (req_desc->words >>0x20));
1142
1143	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1144		return 0;
1145
1146	/* This is a polled command, so busy-wait for it to complete. */
1147	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1148		DELAY(1000);
1149		tm -= 1;
1150		if (tm <= 0)
1151		break;
1152	}
1153
1154	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1155		device_printf(sc->mfi_dev, "Frame %p timed out "
1156		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1157		return (ETIMEDOUT);
1158	}
1159	return 0;
1160}
1161
1162static void mfi_issue_pending_cmds_again (struct mfi_softc *sc)
1163{
1164	struct mfi_command *cm, *tmp;
1165
1166	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1167	TAILQ_FOREACH_REVERSE_SAFE(cm, &sc->mfi_busy, BUSYQ, cm_link, tmp) {
1168
1169		cm->retry_for_fw_reset++;
1170
1171		/*
1172		 * If a command has continuously been tried multiple times
1173		 * and causing a FW reset condition, no further recoveries
1174		 * should be performed on the controller
1175		 */
1176		if (cm->retry_for_fw_reset == 3) {
1177			device_printf(sc->mfi_dev, "megaraid_sas: command %d "
1178			    "was tried multiple times during adapter reset"
1179			    "Shutting down the HBA\n", cm->cm_index);
1180			mfi_kill_hba(sc);
1181			sc->hw_crit_error = 1;
1182			return;
1183		}
1184
1185		if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0) {
1186			struct mfi_cmd_tbolt *cmd;
1187			mfi_remove_busy(cm);
1188			cmd = sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames -
1189			    1 ];
1190			mfi_tbolt_return_cmd(sc, cmd);
1191			if ((cm->cm_flags & MFI_ON_MFIQ_MASK) == 0) {
1192				if (cm->cm_frame->dcmd.opcode !=
1193				    MFI_DCMD_CTRL_EVENT_WAIT) {
1194					device_printf(sc->mfi_dev,
1195					    "APJ ****requeue command %d \n",
1196					    cm->cm_index);
1197					mfi_requeue_ready(cm);
1198				}
1199			}
1200			else
1201				mfi_release_command(cm);
1202		}
1203	}
1204	mfi_startio(sc);
1205}
1206
1207static void mfi_kill_hba (struct mfi_softc *sc)
1208{
1209	if (sc->mfi_flags & MFI_FLAGS_TBOLT)
1210		MFI_WRITE4 (sc, 0x00,MFI_STOP_ADP);
1211	else
1212		MFI_WRITE4 (sc, MFI_IDB,MFI_STOP_ADP);
1213}
1214
1215static void mfi_process_fw_state_chg_isr(void *arg)
1216{
1217	struct mfi_softc *sc= (struct mfi_softc *)arg;
1218	struct mfi_cmd_tbolt *cmd;
1219	int error, status;
1220
1221	if (sc->adpreset == 1) {
1222		device_printf(sc->mfi_dev, "First stage of FW reset "
1223		     "initiated...\n");
1224
1225		sc->mfi_adp_reset(sc);
1226		sc->mfi_enable_intr(sc);
1227
1228		device_printf(sc->mfi_dev, "First stage of reset complete, "
1229		    "second stage initiated...\n");
1230
1231		sc->adpreset = 2;
1232
1233		/* waiting for about 20 second before start the second init */
1234		for (int wait = 0; wait < 20000; wait++)
1235			DELAY(1000);
1236		device_printf(sc->mfi_dev, "Second stage of FW reset "
1237		     "initiated...\n");
1238		while ((status = MFI_READ4(sc, MFI_RSR)) & 0x04);
1239
1240		sc->mfi_disable_intr(sc);
1241
1242		/* We expect the FW state to be READY */
1243		if (mfi_transition_firmware(sc)) {
1244			device_printf(sc->mfi_dev, "controller is not in "
1245			    "ready state\n");
1246			mfi_kill_hba(sc);
1247			sc->hw_crit_error= 1;
1248			return ;
1249		}
1250		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0)
1251				return;
1252
1253		mtx_lock(&sc->mfi_io_lock);
1254
1255		sc->mfi_enable_intr(sc);
1256		sc->adpreset = 0;
1257		free(sc->mfi_aen_cm->cm_data, M_MFIBUF);
1258		mfi_remove_busy(sc->mfi_aen_cm);
1259		cmd = sc->mfi_cmd_pool_tbolt[sc->mfi_aen_cm->cm_extra_frames
1260		    - 1];
1261		mfi_tbolt_return_cmd(sc, cmd);
1262		if (sc->mfi_aen_cm) {
1263			mfi_release_command(sc->mfi_aen_cm);
1264			sc->mfi_aen_cm = NULL;
1265		}
1266		if (sc->mfi_map_sync_cm) {
1267			mfi_release_command(sc->mfi_map_sync_cm);
1268			sc->mfi_map_sync_cm = NULL;
1269		}
1270		mfi_issue_pending_cmds_again(sc);
1271
1272		/*
1273		 * Issue pending command can result in adapter being marked
1274		 * dead because of too many re-tries. Check for that
1275		 * condition before clearing the reset condition on the FW
1276		 */
1277		if (!sc->hw_crit_error) {
1278			/*
1279			 * Initiate AEN (Asynchronous Event Notification)
1280			 */
1281			mfi_aen_setup(sc, sc->last_seq_num);
1282			sc->issuepend_done = 1;
1283			device_printf(sc->mfi_dev, "second stage of reset "
1284			    "complete, FW is ready now.\n");
1285		} else {
1286			device_printf(sc->mfi_dev, "second stage of reset "
1287			     "never completed, hba was marked offline.\n");
1288		}
1289	} else {
1290		device_printf(sc->mfi_dev, "mfi_process_fw_state_chg_isr "
1291		    "called with unhandled value:%d\n", sc->adpreset);
1292	}
1293	mtx_unlock(&sc->mfi_io_lock);
1294}
1295
1296
1297/*
1298 * The ThunderBolt HW has an option for the driver to directly
1299 * access the underlying disks and operate on the RAID.  To
1300 * do this there needs to be a capability to keep the RAID controller
1301 * and driver in sync.  The FreeBSD driver does not take advantage
1302 * of this feature since it adds a lot of complexity and slows down
1303 * performance.  Performance is gained by using the controller's
1304 * cache etc.
1305 *
1306 * Even though this driver doesn't access the disks directly, an
1307 * AEN like command is used to inform the RAID firmware to "sync"
1308 * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command.  This
1309 * command in write mode will return when the RAID firmware has
1310 * detected a change to the RAID state.  Examples of this type
1311 * of change are removing a disk.  Once the command returns then
1312 * the driver needs to acknowledge this and "sync" all LD's again.
1313 * This repeats until we shutdown.  Then we need to cancel this
1314 * pending command.
1315 *
1316 * If this is not done right the RAID firmware will not remove a
1317 * pulled drive and the RAID won't go degraded etc.  Effectively,
1318 * stopping any RAID mangement to functions.
1319 *
1320 * Doing another LD sync, requires the use of an event since the
1321 * driver needs to do a mfi_wait_command and can't do that in an
1322 * interrupt thread.
1323 *
1324 * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO
1325 * That requires a bunch of structure and it is simplier to just do
1326 * the MFI_DCMD_LD_GET_LIST versus walking the RAID map.
1327 */
1328
1329void
1330mfi_tbolt_sync_map_info(struct mfi_softc *sc)
1331{
1332	int error = 0, i;
1333	struct mfi_command *cmd;
1334	struct mfi_dcmd_frame *dcmd;
1335	uint32_t context = 0;
1336	union mfi_ld_ref *ld_sync;
1337	size_t ld_size;
1338	struct mfi_frame_header *hdr;
1339	struct mfi_command *cm = NULL;
1340	struct mfi_ld_list *list = NULL;
1341
1342	if (sc->mfi_map_sync_cm != NULL || sc->cm_map_abort)
1343		return;
1344
1345	mtx_lock(&sc->mfi_io_lock);
1346	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1347	    (void **)&list, sizeof(*list));
1348	if (error)
1349		goto out;
1350
1351	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAIN;
1352	if (mfi_wait_command(sc, cm) != 0) {
1353		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1354		goto out;
1355	}
1356
1357	hdr = &cm->cm_frame->header;
1358	if (hdr->cmd_status != MFI_STAT_OK) {
1359		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1360			      hdr->cmd_status);
1361		goto out;
1362	}
1363
1364	ld_size = sizeof(*ld_sync) * list->ld_count;
1365	mtx_unlock(&sc->mfi_io_lock);
1366	ld_sync = (union mfi_ld_ref *) malloc(ld_size, M_MFIBUF,
1367	     M_WAITOK | M_ZERO);
1368	for (i = 0; i < list->ld_count; i++) {
1369		ld_sync[i].ref = list->ld_list[i].ld.ref;
1370	}
1371
1372	mtx_lock(&sc->mfi_io_lock);
1373	if ((cmd = mfi_dequeue_free(sc)) == NULL)
1374		return;
1375	context = cmd->cm_frame->header.context;
1376	bzero(cmd->cm_frame, sizeof(union mfi_frame));
1377	cmd->cm_frame->header.context = context;
1378
1379	dcmd = &cmd->cm_frame->dcmd;
1380	bzero(dcmd->mbox, MFI_MBOX_SIZE);
1381	dcmd->header.cmd = MFI_CMD_DCMD;
1382	dcmd->header.flags = MFI_FRAME_DIR_WRITE;
1383	dcmd->header.timeout = 0;
1384	dcmd->header.data_len = ld_size;
1385	dcmd->header.scsi_status = 0;
1386	dcmd->opcode = MFI_DCMD_LD_MAP_GET_INFO;
1387	cmd->cm_sg = &dcmd->sgl;
1388	cmd->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1389	cmd->cm_data = ld_sync;
1390	cmd->cm_private = ld_sync;
1391
1392	cmd->cm_len = ld_size;
1393	cmd->cm_complete = mfi_sync_map_complete;
1394	sc->mfi_map_sync_cm = cmd;
1395
1396	cmd->cm_flags = MFI_CMD_DATAOUT;
1397	cmd->cm_frame->dcmd.mbox[0] = list->ld_count;
1398	cmd->cm_frame->dcmd.mbox[1] = MFI_DCMD_MBOX_PEND_FLAG;
1399
1400	if ((error = mfi_mapcmd(sc, cmd)) != 0) {
1401		device_printf(sc->mfi_dev, "failed to send map sync\n");
1402		return;
1403	}
1404
1405out:
1406	if (list)
1407		free(list, M_MFIBUF);
1408	if (cm)
1409		mfi_release_command(cm);
1410	mtx_unlock(&sc->mfi_io_lock);
1411
1412	return;
1413}
1414
1415
1416static void
1417mfi_sync_map_complete(struct mfi_command *cm)
1418{
1419	struct mfi_frame_header *hdr;
1420	struct mfi_softc *sc;
1421	int aborted = 0;
1422
1423	sc = cm->cm_sc;
1424	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1425
1426	hdr = &cm->cm_frame->header;
1427
1428	if (sc->mfi_map_sync_cm == NULL)
1429		return;
1430
1431	if (sc->cm_map_abort ||
1432	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1433		sc->cm_map_abort = 0;
1434		aborted = 1;
1435	}
1436
1437	free(cm->cm_data, M_MFIBUF);
1438	sc->mfi_map_sync_cm = NULL;
1439	wakeup(&sc->mfi_map_sync_cm);
1440	mfi_release_command(cm);
1441
1442	/* set it up again so the driver can catch more events */
1443	if (!aborted) {
1444		mfi_queue_map_sync(sc);
1445	}
1446}
1447
1448static void
1449mfi_queue_map_sync(struct mfi_softc *sc)
1450{
1451	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1452	taskqueue_enqueue(taskqueue_swi, &sc->mfi_map_sync_task);
1453}
1454
1455void
1456mfi_handle_map_sync(void *context, int pending)
1457{
1458	struct mfi_softc *sc;
1459
1460	sc = context;
1461	mfi_tbolt_sync_map_info(sc);
1462}
1463