mfi_tbolt.c revision 331722
1 /*-
2 * Redistribution and use in source and binary forms, with or without
3 * modification, are permitted provided that the following conditions
4 * are met:
5 *
6 *            Copyright 1994-2009 The FreeBSD Project.
7 *            All rights reserved.
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 *    THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FREEBSD PROJECT OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * The views and conclusions contained in the software and documentation
28 * are those of the authors and should not be interpreted as representing
29 * official policies,either expressed or implied, of the FreeBSD Project.
30 */
31
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/11/sys/dev/mfi/mfi_tbolt.c 331722 2018-03-29 02:50:57Z eadler $");
35
36#include "opt_mfi.h"
37
38#include <sys/param.h>
39#include <sys/types.h>
40#include <sys/kernel.h>
41#include <sys/selinfo.h>
42#include <sys/bus.h>
43#include <sys/conf.h>
44#include <sys/bio.h>
45#include <sys/ioccom.h>
46#include <sys/eventhandler.h>
47#include <sys/callout.h>
48#include <sys/uio.h>
49#include <machine/bus.h>
50#include <sys/sysctl.h>
51#include <sys/systm.h>
52#include <sys/malloc.h>
53
54#include <dev/mfi/mfireg.h>
55#include <dev/mfi/mfi_ioctl.h>
56#include <dev/mfi/mfivar.h>
57
58struct mfi_cmd_tbolt *mfi_tbolt_get_cmd(struct mfi_softc *sc, struct mfi_command *);
59union mfi_mpi2_request_descriptor *
60mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index);
61void mfi_tbolt_complete_cmd(struct mfi_softc *sc);
62int mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
63    struct mfi_cmd_tbolt *cmd);
64union mfi_mpi2_request_descriptor *mfi_tbolt_build_mpt_cmd(struct mfi_softc
65    *sc, struct mfi_command *cmd);
66uint8_t
67mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd);
68union mfi_mpi2_request_descriptor *mfi_build_and_issue_cmd(struct mfi_softc
69    *sc, struct mfi_command *mfi_cmd);
70void mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
71    struct mfi_cmd_tbolt *cmd);
72static int mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command
73    *mfi_cmd, pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd);
74void
75map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
76     uint8_t ext_status);
77static void mfi_issue_pending_cmds_again (struct mfi_softc *sc);
78static void mfi_kill_hba (struct mfi_softc *sc);
79static void mfi_process_fw_state_chg_isr(void *arg);
80static void mfi_sync_map_complete(struct mfi_command *);
81static void mfi_queue_map_sync(struct mfi_softc *sc);
82
83#define MFI_FUSION_ENABLE_INTERRUPT_MASK	(0x00000008)
84
85
86extern int	mfi_polled_cmd_timeout;
87static int	mfi_fw_reset_test = 0;
88#ifdef MFI_DEBUG
89SYSCTL_INT(_hw_mfi, OID_AUTO, fw_reset_test, CTLFLAG_RWTUN, &mfi_fw_reset_test,
90           0, "Force a firmware reset condition");
91#endif
92
93void
94mfi_tbolt_enable_intr_ppc(struct mfi_softc *sc)
95{
96	MFI_WRITE4(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK);
97	MFI_READ4(sc, MFI_OMSK);
98}
99
100void
101mfi_tbolt_disable_intr_ppc(struct mfi_softc *sc)
102{
103	MFI_WRITE4(sc, MFI_OMSK, 0xFFFFFFFF);
104	MFI_READ4(sc, MFI_OMSK);
105}
106
107int32_t
108mfi_tbolt_read_fw_status_ppc(struct mfi_softc *sc)
109{
110	return MFI_READ4(sc, MFI_OSP0);
111}
112
113int32_t
114mfi_tbolt_check_clear_intr_ppc(struct mfi_softc *sc)
115{
116	int32_t status, mfi_status = 0;
117
118	status = MFI_READ4(sc, MFI_OSTS);
119
120	if (status & 1) {
121		MFI_WRITE4(sc, MFI_OSTS, status);
122		MFI_READ4(sc, MFI_OSTS);
123		if (status & MFI_STATE_CHANGE_INTERRUPT) {
124			mfi_status |= MFI_FIRMWARE_STATE_CHANGE;
125		}
126
127		return mfi_status;
128	}
129	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
130		return 1;
131
132	MFI_READ4(sc, MFI_OSTS);
133	return 0;
134}
135
136
137void
138mfi_tbolt_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
139   uint32_t frame_cnt)
140{
141	bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA
142	    << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
143	MFI_WRITE4(sc, MFI_IQPL, (uint32_t)bus_add);
144	MFI_WRITE4(sc, MFI_IQPH, (uint32_t)((uint64_t)bus_add >> 32));
145}
146
147/*
148 * mfi_tbolt_adp_reset - For controller reset
149 * @regs: MFI register set
150 */
151int
152mfi_tbolt_adp_reset(struct mfi_softc *sc)
153{
154	int retry = 0, i = 0;
155	int HostDiag;
156
157	MFI_WRITE4(sc, MFI_WSR, 0xF);
158	MFI_WRITE4(sc, MFI_WSR, 4);
159	MFI_WRITE4(sc, MFI_WSR, 0xB);
160	MFI_WRITE4(sc, MFI_WSR, 2);
161	MFI_WRITE4(sc, MFI_WSR, 7);
162	MFI_WRITE4(sc, MFI_WSR, 0xD);
163
164	for (i = 0; i < 10000; i++) ;
165
166	HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
167
168	while (!( HostDiag & DIAG_WRITE_ENABLE)) {
169		for (i = 0; i < 1000; i++);
170		HostDiag = (uint32_t)MFI_READ4(sc, MFI_HDR);
171		device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, "
172		    "hostdiag=%#x\n", retry, HostDiag);
173
174		if (retry++ >= 100)
175			return 1;
176	}
177
178	device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: HostDiag=%#x\n", HostDiag);
179
180	MFI_WRITE4(sc, MFI_HDR, (HostDiag | DIAG_RESET_ADAPTER));
181
182	for (i=0; i < 10; i++) {
183		for (i = 0; i < 10000; i++);
184	}
185
186	HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
187	while (HostDiag & DIAG_RESET_ADAPTER) {
188		for (i = 0; i < 1000; i++) ;
189		HostDiag = (uint32_t)MFI_READ4(sc, MFI_RSR);
190		device_printf(sc->mfi_dev, "ADP_RESET_TBOLT: retry time=%d, "
191		    "hostdiag=%#x\n", retry, HostDiag);
192
193		if (retry++ >= 1000)
194			return 1;
195	}
196	return 0;
197}
198
199/*
200 * This routine initialize Thunderbolt specific device information
201 */
202void
203mfi_tbolt_init_globals(struct mfi_softc *sc)
204{
205	/* Initialize single reply size and Message size */
206	sc->reply_size = MEGASAS_THUNDERBOLT_REPLY_SIZE;
207	sc->raid_io_msg_size = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
208
209	/*
210	 * Calculating how many SGEs allowed in a allocated main message
211	 * (size of the Message - Raid SCSI IO message size(except SGE))
212	 * / size of SGE
213	 * (0x100 - (0x90 - 0x10)) / 0x10 = 8
214	 */
215	sc->max_SGEs_in_main_message =
216	    (uint8_t)((sc->raid_io_msg_size
217	    - (sizeof(struct mfi_mpi2_request_raid_scsi_io)
218	    - sizeof(MPI2_SGE_IO_UNION))) / sizeof(MPI2_SGE_IO_UNION));
219	/*
220	 * (Command frame size allocaed in SRB ext - Raid SCSI IO message size)
221	 * / size of SGL ;
222	 * (1280 - 256) / 16 = 64
223	 */
224	sc->max_SGEs_in_chain_message = (MR_COMMAND_SIZE
225	    - sc->raid_io_msg_size) / sizeof(MPI2_SGE_IO_UNION);
226	/*
227	 * (0x08-1) + 0x40 = 0x47 - 0x01 = 0x46  one is left for command
228	 * colscing
229	*/
230	sc->mfi_max_sge = (sc->max_SGEs_in_main_message - 1)
231	    + sc->max_SGEs_in_chain_message - 1;
232	/*
233	* This is the offset in number of 4 * 32bit words to the next chain
234	* (0x100 - 0x10)/0x10 = 0xF(15)
235	*/
236	sc->chain_offset_value_for_main_message = (sc->raid_io_msg_size
237	    - sizeof(MPI2_SGE_IO_UNION))/16;
238	sc->chain_offset_value_for_mpt_ptmsg
239	    = offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL)/16;
240	sc->mfi_cmd_pool_tbolt = NULL;
241	sc->request_desc_pool = NULL;
242}
243
244/*
245 * This function calculates the memory requirement for Thunderbolt
246 * controller, returns the total required memory in bytes
247 */
248
249uint32_t
250mfi_tbolt_get_memory_requirement(struct mfi_softc *sc)
251{
252	uint32_t size;
253	size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT;	/* for Alignment */
254	size += sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1);
255	size += sc->reply_size * sc->mfi_max_fw_cmds;
256	/* this is for SGL's */
257	size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->mfi_max_fw_cmds;
258	return size;
259}
260
261/*
262 * Description:
263 *      This function will prepare message pools for the Thunderbolt controller
264 * Arguments:
265 *      DevExt - HBA miniport driver's adapter data storage structure
266 *      pMemLocation - start of the memory allocated for Thunderbolt.
267 * Return Value:
268 *      TRUE if successful
269 *      FALSE if failed
270 */
271int
272mfi_tbolt_init_desc_pool(struct mfi_softc *sc, uint8_t* mem_location,
273    uint32_t tbolt_contg_length)
274{
275	uint32_t     offset = 0;
276	uint8_t      *addr = mem_location;
277
278	/* Request Descriptor Base physical Address */
279
280	/* For Request Decriptors Virtual Memory */
281	/* Initialise the aligned IO Frames Virtual Memory Pointer */
282	if (((uintptr_t)addr) & (0xFF)) {
283		addr = &addr[sc->raid_io_msg_size];
284		addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
285		sc->request_message_pool_align = addr;
286	} else
287		sc->request_message_pool_align = addr;
288
289	offset = sc->request_message_pool_align - sc->request_message_pool;
290	sc->request_msg_busaddr = sc->mfi_tb_busaddr + offset;
291
292	/* DJA XXX should this be bus dma ??? */
293	/* Skip request message pool */
294	addr = &addr[sc->raid_io_msg_size * (sc->mfi_max_fw_cmds + 1)];
295	/* Reply Frame Pool is initialized */
296	sc->reply_frame_pool = (struct mfi_mpi2_reply_header *) addr;
297	if (((uintptr_t)addr) & (0xFF)) {
298		addr = &addr[sc->reply_size];
299		addr = (uint8_t *)((uintptr_t)addr & (~0xFF));
300	}
301	sc->reply_frame_pool_align
302		    = (struct mfi_mpi2_reply_header *)addr;
303
304	offset = (uintptr_t)sc->reply_frame_pool_align
305	    - (uintptr_t)sc->request_message_pool;
306	sc->reply_frame_busaddr = sc->mfi_tb_busaddr + offset;
307
308	/* Skip Reply Frame Pool */
309	addr += sc->reply_size * sc->mfi_max_fw_cmds;
310	sc->reply_pool_limit = addr;
311
312	/* initializing reply address to 0xFFFFFFFF */
313	memset((uint8_t *)sc->reply_frame_pool, 0xFF,
314	       (sc->reply_size * sc->mfi_max_fw_cmds));
315
316	offset = sc->reply_size * sc->mfi_max_fw_cmds;
317	sc->sg_frame_busaddr = sc->reply_frame_busaddr + offset;
318	/* initialize the last_reply_idx to 0 */
319	sc->last_reply_idx = 0;
320	MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1);
321	MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
322	offset = (sc->sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME *
323	    sc->mfi_max_fw_cmds)) - sc->mfi_tb_busaddr;
324	if (offset > tbolt_contg_length)
325		device_printf(sc->mfi_dev, "Error:Initialized more than "
326		    "allocated\n");
327	return 0;
328}
329
330/*
331 * This routine prepare and issue INIT2 frame to the Firmware
332 */
333
334int
335mfi_tbolt_init_MFI_queue(struct mfi_softc *sc)
336{
337	struct MPI2_IOC_INIT_REQUEST   *mpi2IocInit;
338	struct mfi_init_frame		*mfi_init;
339	uintptr_t			offset = 0;
340	bus_addr_t			phyAddress;
341	MFI_ADDRESS			*mfiAddressTemp;
342	struct mfi_command		*cm, cmd_tmp;
343	int error;
344
345	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
346
347	/* Check if initialization is already completed */
348	if (sc->MFA_enabled) {
349		device_printf(sc->mfi_dev, "tbolt_init already initialised!\n");
350		return 1;
351	}
352
353	if ((cm = mfi_dequeue_free(sc)) == NULL) {
354		device_printf(sc->mfi_dev, "tbolt_init failed to get command "
355		    " entry!\n");
356		return (EBUSY);
357	}
358
359	cmd_tmp.cm_frame = cm->cm_frame;
360	cmd_tmp.cm_frame_busaddr = cm->cm_frame_busaddr;
361	cmd_tmp.cm_dmamap = cm->cm_dmamap;
362
363	cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_tb_init);
364	cm->cm_frame_busaddr = sc->mfi_tb_init_busaddr;
365	cm->cm_dmamap = sc->mfi_tb_init_dmamap;
366	cm->cm_frame->header.context = 0;
367
368	/*
369	 * Abuse the SG list area of the frame to hold the init_qinfo
370	 * object;
371	 */
372	mfi_init = &cm->cm_frame->init;
373
374	mpi2IocInit = (struct MPI2_IOC_INIT_REQUEST *)sc->mfi_tb_ioc_init_desc;
375	bzero(mpi2IocInit, sizeof(struct MPI2_IOC_INIT_REQUEST));
376	mpi2IocInit->Function  = MPI2_FUNCTION_IOC_INIT;
377	mpi2IocInit->WhoInit   = MPI2_WHOINIT_HOST_DRIVER;
378
379	/* set MsgVersion and HeaderVersion host driver was built with */
380	mpi2IocInit->MsgVersion = MPI2_VERSION;
381	mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION;
382	mpi2IocInit->SystemRequestFrameSize = sc->raid_io_msg_size/4;
383	mpi2IocInit->ReplyDescriptorPostQueueDepth
384	    = (uint16_t)sc->mfi_max_fw_cmds;
385	mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */
386
387	/* Get physical address of reply frame pool */
388	offset = (uintptr_t) sc->reply_frame_pool_align
389	    - (uintptr_t)sc->request_message_pool;
390	phyAddress = sc->mfi_tb_busaddr + offset;
391	mfiAddressTemp =
392	    (MFI_ADDRESS *)&mpi2IocInit->ReplyDescriptorPostQueueAddress;
393	mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
394	mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
395
396	/* Get physical address of request message pool */
397	offset = sc->request_message_pool_align - sc->request_message_pool;
398	phyAddress =  sc->mfi_tb_busaddr + offset;
399	mfiAddressTemp = (MFI_ADDRESS *)&mpi2IocInit->SystemRequestFrameBaseAddress;
400	mfiAddressTemp->u.addressLow = (uint32_t)phyAddress;
401	mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32);
402	mpi2IocInit->ReplyFreeQueueAddress =  0; /* Not supported by MR. */
403	mpi2IocInit->TimeStamp = time_uptime;
404
405	if (sc->verbuf) {
406		snprintf((char *)sc->verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n",
407                MEGASAS_VERSION);
408		mfi_init->driver_ver_lo = (uint32_t)sc->verbuf_h_busaddr;
409		mfi_init->driver_ver_hi =
410		    (uint32_t)((uint64_t)sc->verbuf_h_busaddr >> 32);
411	}
412	/* Get the physical address of the mpi2 ioc init command */
413	phyAddress =  sc->mfi_tb_ioc_init_busaddr;
414	mfi_init->qinfo_new_addr_lo = (uint32_t)phyAddress;
415	mfi_init->qinfo_new_addr_hi = (uint32_t)((uint64_t)phyAddress >> 32);
416	mfi_init->header.flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
417
418	mfi_init->header.cmd = MFI_CMD_INIT;
419	mfi_init->header.data_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
420	mfi_init->header.cmd_status = MFI_STAT_INVALID_STATUS;
421
422	cm->cm_data = NULL;
423	cm->cm_flags |= MFI_CMD_POLLED;
424	cm->cm_timestamp = time_uptime;
425	if ((error = mfi_mapcmd(sc, cm)) != 0) {
426		device_printf(sc->mfi_dev, "failed to send IOC init2 "
427		    "command %d at %lx\n", error, (long)cm->cm_frame_busaddr);
428		goto out;
429	}
430
431	if (mfi_init->header.cmd_status == MFI_STAT_OK) {
432		sc->MFA_enabled = 1;
433	} else {
434		device_printf(sc->mfi_dev, "Init command Failed %#x\n",
435		    mfi_init->header.cmd_status);
436		error = mfi_init->header.cmd_status;
437		goto out;
438	}
439
440out:
441	cm->cm_frame = cmd_tmp.cm_frame;
442	cm->cm_frame_busaddr = cmd_tmp.cm_frame_busaddr;
443	cm->cm_dmamap = cmd_tmp.cm_dmamap;
444	mfi_release_command(cm);
445
446	return (error);
447
448}
449
450int
451mfi_tbolt_alloc_cmd(struct mfi_softc *sc)
452{
453	struct mfi_cmd_tbolt *cmd;
454	bus_addr_t io_req_base_phys;
455	uint8_t *io_req_base;
456	int i = 0, j = 0, offset = 0;
457
458	/*
459	 * sc->mfi_cmd_pool_tbolt is an array of struct mfi_cmd_tbolt pointers.
460	 * Allocate the dynamic array first and then allocate individual
461	 * commands.
462	 */
463	sc->request_desc_pool = malloc(sizeof(
464	    union mfi_mpi2_request_descriptor) * sc->mfi_max_fw_cmds,
465	    M_MFIBUF, M_NOWAIT|M_ZERO);
466
467	if (sc->request_desc_pool == NULL) {
468		device_printf(sc->mfi_dev, "Could not alloc "
469		    "memory for request_desc_pool\n");
470		return (ENOMEM);
471	}
472
473	sc->mfi_cmd_pool_tbolt = malloc(sizeof(struct mfi_cmd_tbolt*)
474	    * sc->mfi_max_fw_cmds, M_MFIBUF, M_NOWAIT|M_ZERO);
475
476	if (sc->mfi_cmd_pool_tbolt == NULL) {
477		free(sc->request_desc_pool, M_MFIBUF);
478		device_printf(sc->mfi_dev, "Could not alloc "
479		    "memory for cmd_pool_tbolt\n");
480		return (ENOMEM);
481	}
482
483	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
484		sc->mfi_cmd_pool_tbolt[i] = malloc(sizeof(
485		    struct mfi_cmd_tbolt),M_MFIBUF, M_NOWAIT|M_ZERO);
486
487		if (!sc->mfi_cmd_pool_tbolt[i]) {
488			device_printf(sc->mfi_dev, "Could not alloc "
489			    "cmd_pool_tbolt entry\n");
490
491			for (j = 0; j < i; j++)
492				free(sc->mfi_cmd_pool_tbolt[j], M_MFIBUF);
493
494			free(sc->request_desc_pool, M_MFIBUF);
495			sc->request_desc_pool = NULL;
496			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
497			sc->mfi_cmd_pool_tbolt = NULL;
498
499			return (ENOMEM);
500		}
501	}
502
503	/*
504	 * The first 256 bytes (SMID 0) is not used. Don't add to the cmd
505	 * list
506	 */
507	io_req_base = sc->request_message_pool_align
508		+ MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
509	io_req_base_phys = sc->request_msg_busaddr
510		+ MEGASAS_THUNDERBOLT_NEW_MSG_SIZE;
511
512	/*
513	 * Add all the commands to command pool (instance->cmd_pool)
514	 */
515	/* SMID 0 is reserved. Set SMID/index from 1 */
516
517	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
518		cmd = sc->mfi_cmd_pool_tbolt[i];
519		offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i;
520		cmd->index = i + 1;
521		cmd->request_desc = (union mfi_mpi2_request_descriptor *)
522		    (sc->request_desc_pool + i);
523		cmd->io_request = (struct mfi_mpi2_request_raid_scsi_io *)
524		    (io_req_base + offset);
525		cmd->io_request_phys_addr = io_req_base_phys + offset;
526		cmd->sg_frame = (MPI2_SGE_IO_UNION *)(sc->reply_pool_limit
527		    + i * MEGASAS_MAX_SZ_CHAIN_FRAME);
528		cmd->sg_frame_phys_addr = sc->sg_frame_busaddr + i
529		    * MEGASAS_MAX_SZ_CHAIN_FRAME;
530		cmd->sync_cmd_idx = sc->mfi_max_fw_cmds;
531
532		TAILQ_INSERT_TAIL(&(sc->mfi_cmd_tbolt_tqh), cmd, next);
533	}
534	return 0;
535}
536
537int
538mfi_tbolt_reset(struct mfi_softc *sc)
539{
540	uint32_t fw_state;
541
542	mtx_lock(&sc->mfi_io_lock);
543	if (sc->hw_crit_error) {
544		device_printf(sc->mfi_dev, "HW CRITICAL ERROR\n");
545		mtx_unlock(&sc->mfi_io_lock);
546		return 1;
547	}
548
549	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
550		fw_state = sc->mfi_read_fw_status(sc);
551		if ((fw_state & MFI_FWSTATE_FAULT) == MFI_FWSTATE_FAULT ||
552		    mfi_fw_reset_test) {
553			if ((sc->disableOnlineCtrlReset == 0)
554			    && (sc->adpreset == 0)) {
555				device_printf(sc->mfi_dev, "Adapter RESET "
556				    "condition is detected\n");
557				sc->adpreset = 1;
558				sc->issuepend_done = 0;
559				sc->MFA_enabled = 0;
560				sc->last_reply_idx = 0;
561				mfi_process_fw_state_chg_isr((void *) sc);
562			}
563			mtx_unlock(&sc->mfi_io_lock);
564			return 0;
565		}
566	}
567	mtx_unlock(&sc->mfi_io_lock);
568	return 1;
569}
570
571/*
572 * mfi_intr_tbolt - isr entry point
573 */
574void
575mfi_intr_tbolt(void *arg)
576{
577	struct mfi_softc *sc = (struct mfi_softc *)arg;
578
579	if (sc->mfi_check_clear_intr(sc) == 1) {
580		return;
581	}
582	if (sc->mfi_detaching)
583		return;
584	mtx_lock(&sc->mfi_io_lock);
585	mfi_tbolt_complete_cmd(sc);
586	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
587	mfi_startio(sc);
588	mtx_unlock(&sc->mfi_io_lock);
589	return;
590}
591
592/*
593 * map_cmd_status -	Maps FW cmd status to OS cmd status
594 * @cmd :		Pointer to cmd
595 * @status :		status of cmd returned by FW
596 * @ext_status :	ext status of cmd returned by FW
597 */
598
599void
600map_tbolt_cmd_status(struct mfi_command *mfi_cmd, uint8_t status,
601    uint8_t ext_status)
602{
603	switch (status) {
604	case MFI_STAT_OK:
605		mfi_cmd->cm_frame->header.cmd_status = MFI_STAT_OK;
606		mfi_cmd->cm_frame->dcmd.header.cmd_status = MFI_STAT_OK;
607		mfi_cmd->cm_error = MFI_STAT_OK;
608		break;
609
610	case MFI_STAT_SCSI_IO_FAILED:
611	case MFI_STAT_LD_INIT_IN_PROGRESS:
612		mfi_cmd->cm_frame->header.cmd_status = status;
613		mfi_cmd->cm_frame->header.scsi_status = ext_status;
614		mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
615		mfi_cmd->cm_frame->dcmd.header.scsi_status
616		    = ext_status;
617		break;
618
619	case MFI_STAT_SCSI_DONE_WITH_ERROR:
620		mfi_cmd->cm_frame->header.cmd_status = ext_status;
621		mfi_cmd->cm_frame->dcmd.header.cmd_status = ext_status;
622		break;
623
624	case MFI_STAT_LD_OFFLINE:
625	case MFI_STAT_DEVICE_NOT_FOUND:
626		mfi_cmd->cm_frame->header.cmd_status = status;
627		mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
628		break;
629
630	default:
631		mfi_cmd->cm_frame->header.cmd_status = status;
632		mfi_cmd->cm_frame->dcmd.header.cmd_status = status;
633		break;
634	}
635}
636
637/*
638 * mfi_tbolt_return_cmd -	Return a cmd to free command pool
639 * @instance:		Adapter soft state
640 * @tbolt_cmd:		Tbolt command packet to be returned to free command pool
641 * @mfi_cmd:		Oning MFI command packe
642 */
643void
644mfi_tbolt_return_cmd(struct mfi_softc *sc, struct mfi_cmd_tbolt *tbolt_cmd,
645    struct mfi_command *mfi_cmd)
646{
647	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
648
649	mfi_cmd->cm_flags &= ~MFI_CMD_TBOLT;
650	mfi_cmd->cm_extra_frames = 0;
651	tbolt_cmd->sync_cmd_idx = sc->mfi_max_fw_cmds;
652
653	TAILQ_INSERT_TAIL(&sc->mfi_cmd_tbolt_tqh, tbolt_cmd, next);
654}
655
656void
657mfi_tbolt_complete_cmd(struct mfi_softc *sc)
658{
659	struct mfi_mpi2_reply_header *desc, *reply_desc;
660	struct mfi_command *cmd_mfi;	/* For MFA Cmds */
661	struct mfi_cmd_tbolt *cmd_tbolt;
662	uint16_t smid;
663	uint8_t reply_descript_type;
664	struct mfi_mpi2_request_raid_scsi_io  *scsi_io_req;
665	uint32_t status, extStatus;
666	uint16_t num_completed;
667	union desc_value val;
668	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
669
670	desc = (struct mfi_mpi2_reply_header *)
671		((uintptr_t)sc->reply_frame_pool_align
672		+ sc->last_reply_idx * sc->reply_size);
673	reply_desc = desc;
674
675	if (reply_desc == NULL) {
676		device_printf(sc->mfi_dev, "reply desc is NULL!!\n");
677		return;
678	}
679
680	reply_descript_type = reply_desc->ReplyFlags
681	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
682	if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
683		return;
684
685	num_completed = 0;
686	val.word = ((union mfi_mpi2_reply_descriptor *)desc)->words;
687
688	/* Read Reply descriptor */
689	while ((val.u.low != 0xFFFFFFFF) && (val.u.high != 0xFFFFFFFF)) {
690		smid = reply_desc->SMID;
691		if (smid == 0 || smid > sc->mfi_max_fw_cmds) {
692			device_printf(sc->mfi_dev, "smid is %d cannot "
693			    "proceed - skipping\n", smid);
694			goto next;
695		}
696		cmd_tbolt = sc->mfi_cmd_pool_tbolt[smid - 1];
697		if (cmd_tbolt->sync_cmd_idx == sc->mfi_max_fw_cmds) {
698			device_printf(sc->mfi_dev, "cmd_tbolt %p "
699			    "has invalid sync_cmd_idx=%d - skipping\n",
700			    cmd_tbolt, cmd_tbolt->sync_cmd_idx);
701			goto next;
702		}
703		cmd_mfi = &sc->mfi_commands[cmd_tbolt->sync_cmd_idx];
704		scsi_io_req = cmd_tbolt->io_request;
705
706		status = cmd_mfi->cm_frame->dcmd.header.cmd_status;
707		extStatus = cmd_mfi->cm_frame->dcmd.header.scsi_status;
708		map_tbolt_cmd_status(cmd_mfi, status, extStatus);
709
710		/* mfi_tbolt_return_cmd is handled by mfi complete / return */
711		if ((cmd_mfi->cm_flags & MFI_CMD_SCSI) != 0 &&
712		    (cmd_mfi->cm_flags & MFI_CMD_POLLED) != 0) {
713			/* polled LD/SYSPD IO command */
714			/* XXX mark okay for now DJA */
715			cmd_mfi->cm_frame->header.cmd_status = MFI_STAT_OK;
716
717		} else {
718			/* remove command from busy queue if not polled */
719			if ((cmd_mfi->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
720				mfi_remove_busy(cmd_mfi);
721
722			/* complete the command */
723			mfi_complete(sc, cmd_mfi);
724		}
725
726next:
727		sc->last_reply_idx++;
728		if (sc->last_reply_idx >= sc->mfi_max_fw_cmds) {
729			MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
730			sc->last_reply_idx = 0;
731		}
732
733		/* Set it back to all 0xfff */
734		((union mfi_mpi2_reply_descriptor*)desc)->words =
735			~((uint64_t)0x00);
736
737		num_completed++;
738
739		/* Get the next reply descriptor */
740		desc = (struct mfi_mpi2_reply_header *)
741		    ((uintptr_t)sc->reply_frame_pool_align
742		    + sc->last_reply_idx * sc->reply_size);
743		reply_desc = desc;
744		val.word = ((union mfi_mpi2_reply_descriptor*)desc)->words;
745		reply_descript_type = reply_desc->ReplyFlags
746		    & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
747		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
748			break;
749	}
750
751	if (!num_completed)
752		return;
753
754	/* update replyIndex to FW */
755	if (sc->last_reply_idx)
756		MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
757
758	return;
759}
760
761/*
762 * mfi_get_cmd -	Get a command from the free pool
763 * @instance:		Adapter soft state
764 *
765 * Returns a free command from the pool
766 */
767
768struct mfi_cmd_tbolt *
769mfi_tbolt_get_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
770{
771	struct mfi_cmd_tbolt *cmd = NULL;
772
773	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
774
775	if ((cmd = TAILQ_FIRST(&sc->mfi_cmd_tbolt_tqh)) == NULL)
776		return (NULL);
777	TAILQ_REMOVE(&sc->mfi_cmd_tbolt_tqh, cmd, next);
778	memset((uint8_t *)cmd->sg_frame, 0, MEGASAS_MAX_SZ_CHAIN_FRAME);
779	memset((uint8_t *)cmd->io_request, 0,
780	    MEGASAS_THUNDERBOLT_NEW_MSG_SIZE);
781
782	cmd->sync_cmd_idx = mfi_cmd->cm_index;
783	mfi_cmd->cm_extra_frames = cmd->index; /* Frame count used as SMID */
784	mfi_cmd->cm_flags |= MFI_CMD_TBOLT;
785
786	return cmd;
787}
788
789union mfi_mpi2_request_descriptor *
790mfi_tbolt_get_request_descriptor(struct mfi_softc *sc, uint16_t index)
791{
792	uint8_t *p;
793
794	if (index >= sc->mfi_max_fw_cmds) {
795		device_printf(sc->mfi_dev, "Invalid SMID (0x%x)request "
796		    "for descriptor\n", index);
797		return NULL;
798	}
799	p = sc->request_desc_pool + sizeof(union mfi_mpi2_request_descriptor)
800	    * index;
801	memset(p, 0, sizeof(union mfi_mpi2_request_descriptor));
802	return (union mfi_mpi2_request_descriptor *)p;
803}
804
805
806/* Used to build IOCTL cmd */
807uint8_t
808mfi_build_mpt_pass_thru(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
809{
810	MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
811	struct mfi_mpi2_request_raid_scsi_io *io_req;
812	struct mfi_cmd_tbolt *cmd;
813
814	cmd = mfi_tbolt_get_cmd(sc, mfi_cmd);
815	if (!cmd)
816		return EBUSY;
817	io_req = cmd->io_request;
818	mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
819
820	io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
821	io_req->SGLOffset0 = offsetof(struct mfi_mpi2_request_raid_scsi_io,
822	    SGL) / 4;
823	io_req->ChainOffset = sc->chain_offset_value_for_mpt_ptmsg;
824
825	mpi25_ieee_chain->Address = mfi_cmd->cm_frame_busaddr;
826
827	/*
828	  In MFI pass thru, nextChainOffset will always be zero to
829	  indicate the end of the chain.
830	*/
831	mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
832		| MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
833
834	/* setting the length to the maximum length */
835	mpi25_ieee_chain->Length = 1024;
836
837	return 0;
838}
839
840void
841mfi_tbolt_build_ldio(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
842    struct mfi_cmd_tbolt *cmd)
843{
844	uint32_t start_lba_lo = 0, start_lba_hi = 0, device_id;
845	struct mfi_mpi2_request_raid_scsi_io	*io_request;
846	struct IO_REQUEST_INFO io_info;
847
848	device_id = mfi_cmd->cm_frame->io.header.target_id;
849	io_request = cmd->io_request;
850	io_request->RaidContext.TargetID = device_id;
851	io_request->RaidContext.Status = 0;
852	io_request->RaidContext.exStatus = 0;
853	io_request->RaidContext.regLockFlags = 0;
854
855	start_lba_lo = mfi_cmd->cm_frame->io.lba_lo;
856	start_lba_hi = mfi_cmd->cm_frame->io.lba_hi;
857
858	memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
859	io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
860	io_info.numBlocks = mfi_cmd->cm_frame->io.header.data_len;
861	io_info.ldTgtId = device_id;
862	if ((mfi_cmd->cm_frame->header.flags & MFI_FRAME_DIR_READ) ==
863	    MFI_FRAME_DIR_READ)
864		io_info.isRead = 1;
865
866	io_request->RaidContext.timeoutValue
867		= MFI_FUSION_FP_DEFAULT_TIMEOUT;
868	io_request->Function = MPI2_FUNCTION_LD_IO_REQUEST;
869	io_request->DevHandle = device_id;
870	cmd->request_desc->header.RequestFlags
871		= (MFI_REQ_DESCRIPT_FLAGS_LD_IO
872		   << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
873	if ((io_request->IoFlags == 6) && (io_info.numBlocks == 0))
874		io_request->RaidContext.RegLockLength = 0x100;
875	io_request->DataLength = mfi_cmd->cm_frame->io.header.data_len
876	    * MFI_SECTOR_LEN;
877}
878
879int
880mfi_tbolt_build_io(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
881    struct mfi_cmd_tbolt *cmd)
882{
883	struct mfi_mpi2_request_raid_scsi_io *io_request;
884	uint32_t sge_count;
885	uint8_t cdb_len;
886	int readop;
887	u_int64_t lba;
888
889	io_request = cmd->io_request;
890	if (!(mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_READ
891	      || mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE))
892		return 1;
893
894	mfi_tbolt_build_ldio(sc, mfi_cmd, cmd);
895
896	/* Convert to SCSI command CDB */
897	bzero(io_request->CDB.CDB32, sizeof(io_request->CDB.CDB32));
898	if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
899		readop = 0;
900	else
901		readop = 1;
902
903	lba =  mfi_cmd->cm_frame->io.lba_hi;
904	lba = (lba << 32) + mfi_cmd->cm_frame->io.lba_lo;
905	cdb_len = mfi_build_cdb(readop, 0, lba,
906	    mfi_cmd->cm_frame->io.header.data_len, io_request->CDB.CDB32);
907
908	/* Just the CDB length, rest of the Flags are zero */
909	io_request->IoFlags = cdb_len;
910
911	/*
912	 * Construct SGL
913	 */
914	sge_count = mfi_tbolt_make_sgl(sc, mfi_cmd,
915	    (pMpi25IeeeSgeChain64_t) &io_request->SGL, cmd);
916	if (sge_count > sc->mfi_max_sge) {
917		device_printf(sc->mfi_dev, "Error. sge_count (0x%x) exceeds "
918		    "max (0x%x) allowed\n", sge_count, sc->mfi_max_sge);
919		return 1;
920	}
921	io_request->RaidContext.numSGE = sge_count;
922	io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
923
924	if (mfi_cmd->cm_frame->header.cmd == MFI_CMD_LD_WRITE)
925		io_request->Control = MPI2_SCSIIO_CONTROL_WRITE;
926	else
927		io_request->Control = MPI2_SCSIIO_CONTROL_READ;
928
929	io_request->SGLOffset0 = offsetof(
930	    struct mfi_mpi2_request_raid_scsi_io, SGL)/4;
931
932	io_request->SenseBufferLowAddress = mfi_cmd->cm_sense_busaddr;
933	io_request->SenseBufferLength = MFI_SENSE_LEN;
934	io_request->RaidContext.Status = MFI_STAT_INVALID_STATUS;
935	io_request->RaidContext.exStatus = MFI_STAT_INVALID_STATUS;
936
937	return 0;
938}
939
940
941static int
942mfi_tbolt_make_sgl(struct mfi_softc *sc, struct mfi_command *mfi_cmd,
943		   pMpi25IeeeSgeChain64_t sgl_ptr, struct mfi_cmd_tbolt *cmd)
944{
945	uint8_t i, sg_processed, sg_to_process;
946	uint8_t sge_count, sge_idx;
947	union mfi_sgl *os_sgl;
948	pMpi25IeeeSgeChain64_t sgl_end;
949
950	/*
951	 * Return 0 if there is no data transfer
952	 */
953	if (!mfi_cmd->cm_sg || !mfi_cmd->cm_len) {
954	 	device_printf(sc->mfi_dev, "Buffer empty \n");
955		return 0;
956	}
957	os_sgl = mfi_cmd->cm_sg;
958	sge_count = mfi_cmd->cm_frame->header.sg_count;
959
960	if (sge_count > sc->mfi_max_sge) {
961		device_printf(sc->mfi_dev, "sgl ptr %p sg_cnt %d \n",
962		    os_sgl, sge_count);
963		return sge_count;
964	}
965
966	if (sge_count > sc->max_SGEs_in_main_message)
967		/* One element to store the chain info */
968		sge_idx = sc->max_SGEs_in_main_message - 1;
969	else
970		sge_idx = sge_count;
971
972	if (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)) {
973		sgl_end = sgl_ptr + (sc->max_SGEs_in_main_message - 1);
974		sgl_end->Flags = 0;
975	}
976
977	for (i = 0; i < sge_idx; i++) {
978		/*
979		 * For 32bit BSD we are getting 32 bit SGL's from OS
980		 * but FW only take 64 bit SGL's so copying from 32 bit
981		 * SGL's to 64.
982		 */
983		if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
984			sgl_ptr->Length = os_sgl->sg_skinny[i].len;
985			sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
986		} else {
987			sgl_ptr->Length = os_sgl->sg32[i].len;
988			sgl_ptr->Address = os_sgl->sg32[i].addr;
989		}
990		if (i == sge_count - 1 &&
991		    (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)))
992			sgl_ptr->Flags = MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
993		else
994			sgl_ptr->Flags = 0;
995		sgl_ptr++;
996		cmd->io_request->ChainOffset = 0;
997	}
998
999	sg_processed = i;
1000
1001	if (sg_processed < sge_count) {
1002		pMpi25IeeeSgeChain64_t sg_chain;
1003		sg_to_process = sge_count - sg_processed;
1004		cmd->io_request->ChainOffset =
1005		    sc->chain_offset_value_for_main_message;
1006		sg_chain = sgl_ptr;
1007		/* Prepare chain element */
1008		sg_chain->NextChainOffset = 0;
1009		if (sc->mfi_flags & (MFI_FLAGS_INVADER | MFI_FLAGS_FURY))
1010			sg_chain->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1011		else
1012			sg_chain->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1013			    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
1014		sg_chain->Length =  (sizeof(MPI2_SGE_IO_UNION) *
1015		    (sge_count - sg_processed));
1016		sg_chain->Address = cmd->sg_frame_phys_addr;
1017		sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->sg_frame;
1018		for (; i < sge_count; i++) {
1019			if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
1020				sgl_ptr->Length = os_sgl->sg_skinny[i].len;
1021				sgl_ptr->Address = os_sgl->sg_skinny[i].addr;
1022			} else {
1023				sgl_ptr->Length = os_sgl->sg32[i].len;
1024				sgl_ptr->Address = os_sgl->sg32[i].addr;
1025			}
1026			if (i == sge_count - 1 &&
1027			    (sc->mfi_flags &
1028			    (MFI_FLAGS_INVADER | MFI_FLAGS_FURY)))
1029				sgl_ptr->Flags =
1030				    MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1031			else
1032				sgl_ptr->Flags = 0;
1033			sgl_ptr++;
1034		}
1035	}
1036	return sge_count;
1037}
1038
1039union mfi_mpi2_request_descriptor *
1040mfi_build_and_issue_cmd(struct mfi_softc *sc, struct mfi_command *mfi_cmd)
1041{
1042	struct mfi_cmd_tbolt *cmd;
1043	union mfi_mpi2_request_descriptor *req_desc = NULL;
1044	uint16_t index;
1045	cmd = mfi_tbolt_get_cmd(sc, mfi_cmd);
1046	if (cmd == NULL)
1047		return (NULL);
1048
1049	index = cmd->index;
1050	req_desc = mfi_tbolt_get_request_descriptor(sc, index-1);
1051	if (req_desc == NULL) {
1052		mfi_tbolt_return_cmd(sc, cmd, mfi_cmd);
1053		return (NULL);
1054	}
1055
1056	if (mfi_tbolt_build_io(sc, mfi_cmd, cmd) != 0) {
1057		mfi_tbolt_return_cmd(sc, cmd, mfi_cmd);
1058		return (NULL);
1059	}
1060	req_desc->header.SMID = index;
1061	return req_desc;
1062}
1063
1064union mfi_mpi2_request_descriptor *
1065mfi_tbolt_build_mpt_cmd(struct mfi_softc *sc, struct mfi_command *cmd)
1066{
1067	union mfi_mpi2_request_descriptor *req_desc = NULL;
1068	uint16_t index;
1069	if (mfi_build_mpt_pass_thru(sc, cmd)) {
1070		device_printf(sc->mfi_dev, "Couldn't build MFI pass thru "
1071		    "cmd\n");
1072		return NULL;
1073	}
1074	/* For fusion the frame_count variable is used for SMID */
1075	index = cmd->cm_extra_frames;
1076
1077	req_desc = mfi_tbolt_get_request_descriptor(sc, index - 1);
1078	if (req_desc == NULL)
1079		return NULL;
1080
1081	bzero(req_desc, sizeof(*req_desc));
1082	req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1083	    MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1084	req_desc->header.SMID = index;
1085	return req_desc;
1086}
1087
1088int
1089mfi_tbolt_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
1090{
1091	struct mfi_frame_header *hdr;
1092	uint8_t *cdb;
1093	union mfi_mpi2_request_descriptor *req_desc = NULL;
1094	int tm = mfi_polled_cmd_timeout * 1000;
1095
1096	hdr = &cm->cm_frame->header;
1097	cdb = cm->cm_frame->pass.cdb;
1098	if (sc->adpreset)
1099		return 1;
1100	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
1101		cm->cm_timestamp = time_uptime;
1102		mfi_enqueue_busy(cm);
1103	} else {	/* still get interrupts for it */
1104		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1105		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1106	}
1107
1108	if (hdr->cmd == MFI_CMD_PD_SCSI_IO) {
1109		/* check for inquiry commands coming from CLI */
1110		if (cdb[0] != 0x28 || cdb[0] != 0x2A) {
1111			if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) ==
1112			    NULL) {
1113				device_printf(sc->mfi_dev, "Mapping from MFI "
1114				    "to MPT Failed \n");
1115				return 1;
1116			}
1117		}
1118		else
1119			device_printf(sc->mfi_dev, "DJA NA XXX SYSPDIO\n");
1120	} else if (hdr->cmd == MFI_CMD_LD_SCSI_IO ||
1121	    hdr->cmd == MFI_CMD_LD_READ || hdr->cmd == MFI_CMD_LD_WRITE) {
1122		cm->cm_flags |= MFI_CMD_SCSI;
1123		if ((req_desc = mfi_build_and_issue_cmd(sc, cm)) == NULL) {
1124			device_printf(sc->mfi_dev, "LDIO Failed \n");
1125			return 1;
1126		}
1127	} else if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == NULL) {
1128		device_printf(sc->mfi_dev, "Mapping from MFI to MPT Failed\n");
1129		return (1);
1130	}
1131
1132	if (cm->cm_flags & MFI_CMD_SCSI) {
1133		/*
1134		 * LD IO needs to be posted since it doesn't get
1135		 * acknowledged via a status update so have the
1136		 * controller reply via mfi_tbolt_complete_cmd.
1137		 */
1138		hdr->flags &= ~MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1139	}
1140
1141	MFI_WRITE4(sc, MFI_ILQP, (req_desc->words & 0xFFFFFFFF));
1142	MFI_WRITE4(sc, MFI_IHQP, (req_desc->words >>0x20));
1143
1144	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
1145		return 0;
1146
1147	/*
1148	 * This is a polled command, so busy-wait for it to complete.
1149	 *
1150	 * The value of hdr->cmd_status is updated directly by the hardware
1151	 * so there is no guarantee that mfi_tbolt_complete_cmd is called
1152	 * prior to this value changing.
1153	 */
1154	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1155		DELAY(1000);
1156		tm -= 1;
1157		if (tm <= 0)
1158			break;
1159		if (cm->cm_flags & MFI_CMD_SCSI) {
1160			/*
1161			 * Force check reply queue.
1162			 * This ensures that dump works correctly
1163			 */
1164			mfi_tbolt_complete_cmd(sc);
1165		}
1166	}
1167
1168	/* ensure the command cleanup has been processed before returning */
1169	mfi_tbolt_complete_cmd(sc);
1170
1171	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1172		device_printf(sc->mfi_dev, "Frame %p timed out "
1173		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
1174		return (ETIMEDOUT);
1175	}
1176	return 0;
1177}
1178
1179static void
1180mfi_issue_pending_cmds_again(struct mfi_softc *sc)
1181{
1182	struct mfi_command *cm, *tmp;
1183	struct mfi_cmd_tbolt *cmd;
1184
1185	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1186	TAILQ_FOREACH_REVERSE_SAFE(cm, &sc->mfi_busy, BUSYQ, cm_link, tmp) {
1187
1188		cm->retry_for_fw_reset++;
1189
1190		/*
1191		 * If a command has continuously been tried multiple times
1192		 * and causing a FW reset condition, no further recoveries
1193		 * should be performed on the controller
1194		 */
1195		if (cm->retry_for_fw_reset == 3) {
1196			device_printf(sc->mfi_dev, "megaraid_sas: command %p "
1197			    "index=%d was tried multiple times during adapter "
1198			    "reset - Shutting down the HBA\n", cm, cm->cm_index);
1199			mfi_kill_hba(sc);
1200			sc->hw_crit_error = 1;
1201			return;
1202		}
1203
1204		mfi_remove_busy(cm);
1205		if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
1206			if (cm->cm_extra_frames != 0 && cm->cm_extra_frames <=
1207			    sc->mfi_max_fw_cmds) {
1208				cmd = sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1];
1209				mfi_tbolt_return_cmd(sc, cmd, cm);
1210			} else {
1211				device_printf(sc->mfi_dev,
1212				    "Invalid extra_frames: %d detected\n",
1213				    cm->cm_extra_frames);
1214			}
1215		}
1216
1217		if (cm->cm_frame->dcmd.opcode != MFI_DCMD_CTRL_EVENT_WAIT) {
1218			device_printf(sc->mfi_dev,
1219			    "APJ ****requeue command %p index=%d\n",
1220			    cm, cm->cm_index);
1221			mfi_requeue_ready(cm);
1222		} else
1223			mfi_release_command(cm);
1224	}
1225	mfi_startio(sc);
1226}
1227
1228static void
1229mfi_kill_hba(struct mfi_softc *sc)
1230{
1231	if (sc->mfi_flags & MFI_FLAGS_TBOLT)
1232		MFI_WRITE4(sc, 0x00, MFI_STOP_ADP);
1233	else
1234		MFI_WRITE4(sc, MFI_IDB, MFI_STOP_ADP);
1235}
1236
1237static void
1238mfi_process_fw_state_chg_isr(void *arg)
1239{
1240	struct mfi_softc *sc= (struct mfi_softc *)arg;
1241	int error, status;
1242
1243	if (sc->adpreset == 1) {
1244		device_printf(sc->mfi_dev, "First stage of FW reset "
1245		     "initiated...\n");
1246
1247		sc->mfi_adp_reset(sc);
1248		sc->mfi_enable_intr(sc);
1249
1250		device_printf(sc->mfi_dev, "First stage of reset complete, "
1251		    "second stage initiated...\n");
1252
1253		sc->adpreset = 2;
1254
1255		/* waiting for about 20 second before start the second init */
1256		for (int wait = 0; wait < 20000; wait++)
1257			DELAY(1000);
1258		device_printf(sc->mfi_dev, "Second stage of FW reset "
1259		     "initiated...\n");
1260		while ((status = MFI_READ4(sc, MFI_RSR)) & 0x04);
1261
1262		sc->mfi_disable_intr(sc);
1263
1264		/* We expect the FW state to be READY */
1265		if (mfi_transition_firmware(sc)) {
1266			device_printf(sc->mfi_dev, "controller is not in "
1267			    "ready state\n");
1268			mfi_kill_hba(sc);
1269			sc->hw_crit_error = 1;
1270			return;
1271		}
1272		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
1273			device_printf(sc->mfi_dev, "Failed to initialise MFI "
1274			    "queue\n");
1275			mfi_kill_hba(sc);
1276			sc->hw_crit_error = 1;
1277			return;
1278		}
1279
1280		/* Init last reply index and max */
1281		MFI_WRITE4(sc, MFI_RFPI, sc->mfi_max_fw_cmds - 1);
1282		MFI_WRITE4(sc, MFI_RPI, sc->last_reply_idx);
1283
1284		sc->mfi_enable_intr(sc);
1285		sc->adpreset = 0;
1286		if (sc->mfi_aen_cm != NULL) {
1287			free(sc->mfi_aen_cm->cm_data, M_MFIBUF);
1288			mfi_remove_busy(sc->mfi_aen_cm);
1289			mfi_release_command(sc->mfi_aen_cm);
1290			sc->mfi_aen_cm = NULL;
1291		}
1292
1293		if (sc->mfi_map_sync_cm != NULL) {
1294			mfi_remove_busy(sc->mfi_map_sync_cm);
1295			mfi_release_command(sc->mfi_map_sync_cm);
1296			sc->mfi_map_sync_cm = NULL;
1297		}
1298		mfi_issue_pending_cmds_again(sc);
1299
1300		/*
1301		 * Issue pending command can result in adapter being marked
1302		 * dead because of too many re-tries. Check for that
1303		 * condition before clearing the reset condition on the FW
1304		 */
1305		if (!sc->hw_crit_error) {
1306			/*
1307			 * Initiate AEN (Asynchronous Event Notification) &
1308			 * Sync Map
1309			 */
1310			mfi_aen_setup(sc, sc->last_seq_num);
1311			mfi_tbolt_sync_map_info(sc);
1312
1313			sc->issuepend_done = 1;
1314			device_printf(sc->mfi_dev, "second stage of reset "
1315			    "complete, FW is ready now.\n");
1316		} else {
1317			device_printf(sc->mfi_dev, "second stage of reset "
1318			     "never completed, hba was marked offline.\n");
1319		}
1320	} else {
1321		device_printf(sc->mfi_dev, "mfi_process_fw_state_chg_isr "
1322		    "called with unhandled value:%d\n", sc->adpreset);
1323	}
1324}
1325
1326/*
1327 * The ThunderBolt HW has an option for the driver to directly
1328 * access the underlying disks and operate on the RAID.  To
1329 * do this there needs to be a capability to keep the RAID controller
1330 * and driver in sync.  The FreeBSD driver does not take advantage
1331 * of this feature since it adds a lot of complexity and slows down
1332 * performance.  Performance is gained by using the controller's
1333 * cache etc.
1334 *
1335 * Even though this driver doesn't access the disks directly, an
1336 * AEN like command is used to inform the RAID firmware to "sync"
1337 * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command.  This
1338 * command in write mode will return when the RAID firmware has
1339 * detected a change to the RAID state.  Examples of this type
1340 * of change are removing a disk.  Once the command returns then
1341 * the driver needs to acknowledge this and "sync" all LD's again.
1342 * This repeats until we shutdown.  Then we need to cancel this
1343 * pending command.
1344 *
1345 * If this is not done right the RAID firmware will not remove a
1346 * pulled drive and the RAID won't go degraded etc.  Effectively,
1347 * stopping any RAID mangement to functions.
1348 *
1349 * Doing another LD sync, requires the use of an event since the
1350 * driver needs to do a mfi_wait_command and can't do that in an
1351 * interrupt thread.
1352 *
1353 * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO
1354 * That requires a bunch of structure and it is simpler to just do
1355 * the MFI_DCMD_LD_GET_LIST versus walking the RAID map.
1356 */
1357
1358void
1359mfi_tbolt_sync_map_info(struct mfi_softc *sc)
1360{
1361	int error = 0, i;
1362	struct mfi_command *cmd = NULL;
1363	struct mfi_dcmd_frame *dcmd = NULL;
1364	uint32_t context = 0;
1365	union mfi_ld_ref *ld_sync = NULL;
1366	size_t ld_size;
1367	struct mfi_frame_header *hdr;
1368	struct mfi_command *cm = NULL;
1369	struct mfi_ld_list *list = NULL;
1370
1371	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1372
1373	if (sc->mfi_map_sync_cm != NULL || sc->cm_map_abort)
1374		return;
1375
1376	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1377	    (void **)&list, sizeof(*list));
1378	if (error)
1379		goto out;
1380
1381	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAIN;
1382
1383	if (mfi_wait_command(sc, cm) != 0) {
1384		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1385		goto out;
1386	}
1387
1388	hdr = &cm->cm_frame->header;
1389	if (hdr->cmd_status != MFI_STAT_OK) {
1390		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1391			      hdr->cmd_status);
1392		goto out;
1393	}
1394
1395	ld_size = sizeof(*ld_sync) * list->ld_count;
1396	ld_sync = (union mfi_ld_ref *) malloc(ld_size, M_MFIBUF,
1397	     M_NOWAIT | M_ZERO);
1398	if (ld_sync == NULL) {
1399		device_printf(sc->mfi_dev, "Failed to allocate sync\n");
1400		goto out;
1401	}
1402	for (i = 0; i < list->ld_count; i++)
1403		ld_sync[i].ref = list->ld_list[i].ld.ref;
1404
1405	if ((cmd = mfi_dequeue_free(sc)) == NULL) {
1406		device_printf(sc->mfi_dev, "Failed to get command\n");
1407		free(ld_sync, M_MFIBUF);
1408		goto out;
1409	}
1410
1411	context = cmd->cm_frame->header.context;
1412	bzero(cmd->cm_frame, sizeof(union mfi_frame));
1413	cmd->cm_frame->header.context = context;
1414
1415	dcmd = &cmd->cm_frame->dcmd;
1416	bzero(dcmd->mbox, MFI_MBOX_SIZE);
1417	dcmd->header.cmd = MFI_CMD_DCMD;
1418	dcmd->header.flags = MFI_FRAME_DIR_WRITE;
1419	dcmd->header.timeout = 0;
1420	dcmd->header.data_len = ld_size;
1421	dcmd->header.scsi_status = 0;
1422	dcmd->opcode = MFI_DCMD_LD_MAP_GET_INFO;
1423	cmd->cm_sg = &dcmd->sgl;
1424	cmd->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1425	cmd->cm_data = ld_sync;
1426	cmd->cm_private = ld_sync;
1427
1428	cmd->cm_len = ld_size;
1429	cmd->cm_complete = mfi_sync_map_complete;
1430	sc->mfi_map_sync_cm = cmd;
1431
1432	cmd->cm_flags = MFI_CMD_DATAOUT;
1433	cmd->cm_frame->dcmd.mbox[0] = list->ld_count;
1434	cmd->cm_frame->dcmd.mbox[1] = MFI_DCMD_MBOX_PEND_FLAG;
1435
1436	if ((error = mfi_mapcmd(sc, cmd)) != 0) {
1437		device_printf(sc->mfi_dev, "failed to send map sync\n");
1438		free(ld_sync, M_MFIBUF);
1439		sc->mfi_map_sync_cm = NULL;
1440		mfi_release_command(cmd);
1441		goto out;
1442	}
1443
1444out:
1445	if (list)
1446		free(list, M_MFIBUF);
1447	if (cm)
1448		mfi_release_command(cm);
1449}
1450
1451static void
1452mfi_sync_map_complete(struct mfi_command *cm)
1453{
1454	struct mfi_frame_header *hdr;
1455	struct mfi_softc *sc;
1456	int aborted = 0;
1457
1458	sc = cm->cm_sc;
1459	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1460
1461	hdr = &cm->cm_frame->header;
1462
1463	if (sc->mfi_map_sync_cm == NULL)
1464		return;
1465
1466	if (sc->cm_map_abort ||
1467	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1468		sc->cm_map_abort = 0;
1469		aborted = 1;
1470	}
1471
1472	free(cm->cm_data, M_MFIBUF);
1473	wakeup(&sc->mfi_map_sync_cm);
1474	sc->mfi_map_sync_cm = NULL;
1475	mfi_release_command(cm);
1476
1477	/* set it up again so the driver can catch more events */
1478	if (!aborted)
1479		mfi_queue_map_sync(sc);
1480}
1481
1482static void
1483mfi_queue_map_sync(struct mfi_softc *sc)
1484{
1485	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1486	taskqueue_enqueue(taskqueue_swi, &sc->mfi_map_sync_task);
1487}
1488
1489void
1490mfi_handle_map_sync(void *context, int pending)
1491{
1492	struct mfi_softc *sc;
1493
1494	sc = context;
1495	mtx_lock(&sc->mfi_io_lock);
1496	mfi_tbolt_sync_map_info(sc);
1497	mtx_unlock(&sc->mfi_io_lock);
1498}
1499