1144966Svkashyap/*
2169400Sscottl * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3144966Svkashyap * Copyright (c) 2004-05 Vinod Kashyap
4144966Svkashyap * All rights reserved.
5144966Svkashyap *
6144966Svkashyap * Redistribution and use in source and binary forms, with or without
7144966Svkashyap * modification, are permitted provided that the following conditions
8144966Svkashyap * are met:
9144966Svkashyap * 1. Redistributions of source code must retain the above copyright
10144966Svkashyap *    notice, this list of conditions and the following disclaimer.
11144966Svkashyap * 2. Redistributions in binary form must reproduce the above copyright
12144966Svkashyap *    notice, this list of conditions and the following disclaimer in the
13144966Svkashyap *    documentation and/or other materials provided with the distribution.
14144966Svkashyap *
15144966Svkashyap * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16144966Svkashyap * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17144966Svkashyap * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18144966Svkashyap * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19144966Svkashyap * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20144966Svkashyap * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21144966Svkashyap * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22144966Svkashyap * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23144966Svkashyap * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24144966Svkashyap * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25144966Svkashyap * SUCH DAMAGE.
26144966Svkashyap *
27144966Svkashyap *	$FreeBSD$
28144966Svkashyap */
29144966Svkashyap
30144966Svkashyap/*
31144966Svkashyap * AMCC'S 3ware driver for 9000 series storage controllers.
32144966Svkashyap *
33144966Svkashyap * Author: Vinod Kashyap
34169400Sscottl * Modifications by: Adam Radford
35172496Sscottl * Modifications by: Manjunath Ranganathaiah
36144966Svkashyap */
37144966Svkashyap
38144966Svkashyap
39144966Svkashyap/*
40144966Svkashyap * Common Layer I/O functions.
41144966Svkashyap */
42144966Svkashyap
43144966Svkashyap
44144966Svkashyap#include "tw_osl_share.h"
45144966Svkashyap#include "tw_cl_share.h"
46144966Svkashyap#include "tw_cl_fwif.h"
47144966Svkashyap#include "tw_cl_ioctl.h"
48144966Svkashyap#include "tw_cl.h"
49144966Svkashyap#include "tw_cl_externs.h"
50144966Svkashyap#include "tw_osl_ioctl.h"
51144966Svkashyap
52208969Sdelphij#include <cam/cam.h>
53208969Sdelphij#include <cam/cam_ccb.h>
54208969Sdelphij#include <cam/cam_xpt_sim.h>
55144966Svkashyap
56144966Svkashyap
57208969Sdelphij
58144966Svkashyap/*
59144966Svkashyap * Function name:	tw_cl_start_io
60144966Svkashyap * Description:		Interface to OS Layer for accepting SCSI requests.
61144966Svkashyap *
62144966Svkashyap * Input:		ctlr_handle	-- controller handle
63144966Svkashyap *			req_pkt		-- OSL built request packet
64144966Svkashyap *			req_handle	-- request handle
65144966Svkashyap * Output:		None
66144966Svkashyap * Return value:	0	-- success
67144966Svkashyap *			non-zero-- failure
68144966Svkashyap */
69144966SvkashyapTW_INT32
70144966Svkashyaptw_cl_start_io(struct tw_cl_ctlr_handle *ctlr_handle,
71144966Svkashyap	struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
72144966Svkashyap{
73144966Svkashyap	struct tw_cli_ctlr_context		*ctlr;
74144966Svkashyap	struct tw_cli_req_context		*req;
75144966Svkashyap	struct tw_cl_command_9k			*cmd;
76144966Svkashyap	struct tw_cl_scsi_req_packet		*scsi_req;
77212008Sdelphij	TW_INT32				error = TW_CL_ERR_REQ_SUCCESS;
78144966Svkashyap
79144966Svkashyap	tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
80144966Svkashyap
81144966Svkashyap	ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
82144966Svkashyap
83144966Svkashyap	/*
84144966Svkashyap	 * If working with a firmware version that does not support multiple
85144966Svkashyap	 * luns, and this request is directed at a non-zero lun, error it
86144966Svkashyap	 * back right away.
87144966Svkashyap	 */
88144966Svkashyap	if ((req_pkt->gen_req_pkt.scsi_req.lun) &&
89144966Svkashyap		(ctlr->working_srl < TWA_MULTI_LUN_FW_SRL)) {
90144966Svkashyap		req_pkt->status |= (TW_CL_ERR_REQ_INVALID_LUN |
91144966Svkashyap			TW_CL_ERR_REQ_SCSI_ERROR);
92144966Svkashyap		req_pkt->tw_osl_callback(req_handle);
93144966Svkashyap		return(TW_CL_ERR_REQ_SUCCESS);
94144966Svkashyap	}
95144966Svkashyap
96144966Svkashyap	if ((req = tw_cli_get_request(ctlr
97144966Svkashyap		)) == TW_CL_NULL) {
98144966Svkashyap		tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
99144966Svkashyap			"Out of request context packets: returning busy");
100144966Svkashyap		return(TW_OSL_EBUSY);
101144966Svkashyap	}
102144966Svkashyap
103144966Svkashyap	req_handle->cl_req_ctxt = req;
104144966Svkashyap	req->req_handle = req_handle;
105144966Svkashyap	req->orig_req = req_pkt;
106144966Svkashyap	req->tw_cli_callback = tw_cli_complete_io;
107144966Svkashyap
108144966Svkashyap	req->flags |= TW_CLI_REQ_FLAGS_EXTERNAL;
109144966Svkashyap	req->flags |= TW_CLI_REQ_FLAGS_9K;
110144966Svkashyap
111144966Svkashyap	scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
112144966Svkashyap
113144966Svkashyap	/* Build the cmd pkt. */
114144966Svkashyap	cmd = &(req->cmd_pkt->command.cmd_pkt_9k);
115144966Svkashyap
116144966Svkashyap	req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
117144966Svkashyap
118144966Svkashyap	cmd->res__opcode = BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
119144966Svkashyap	cmd->unit = (TW_UINT8)(scsi_req->unit);
120144966Svkashyap	cmd->lun_l4__req_id = TW_CL_SWAP16(
121144966Svkashyap		BUILD_LUN_L4__REQ_ID(scsi_req->lun, req->request_id));
122144966Svkashyap	cmd->status = 0;
123144966Svkashyap	cmd->sgl_offset = 16; /* offset from end of hdr = max cdb len */
124144966Svkashyap	tw_osl_memcpy(cmd->cdb, scsi_req->cdb, scsi_req->cdb_len);
125144966Svkashyap
126144966Svkashyap	if (req_pkt->flags & TW_CL_REQ_CALLBACK_FOR_SGLIST) {
127144966Svkashyap		TW_UINT32	num_sgl_entries;
128144966Svkashyap
129144966Svkashyap		req_pkt->tw_osl_sgl_callback(req_handle, cmd->sg_list,
130144966Svkashyap			&num_sgl_entries);
131144966Svkashyap		cmd->lun_h4__sgl_entries =
132144966Svkashyap			TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
133144966Svkashyap				num_sgl_entries));
134144966Svkashyap	} else {
135144966Svkashyap		cmd->lun_h4__sgl_entries =
136144966Svkashyap			TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
137144966Svkashyap				scsi_req->sgl_entries));
138144966Svkashyap		tw_cli_fill_sg_list(ctlr, scsi_req->sg_list,
139144966Svkashyap			cmd->sg_list, scsi_req->sgl_entries);
140144966Svkashyap	}
141144966Svkashyap
142212008Sdelphij	if (((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) != TW_CL_NULL) ||
143212008Sdelphij		(ctlr->reset_in_progress)) {
144212008Sdelphij		tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
145212008Sdelphij		TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
146212008Sdelphij			TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
147212008Sdelphij	} else if ((error = tw_cli_submit_cmd(req))) {
148144966Svkashyap		tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
149144966Svkashyap			"Could not start request. request = %p, error = %d",
150144966Svkashyap			req, error);
151144966Svkashyap		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
152144966Svkashyap	}
153144966Svkashyap	return(error);
154144966Svkashyap}
155144966Svkashyap
156144966Svkashyap
157144966Svkashyap
158144966Svkashyap/*
159144966Svkashyap * Function name:	tw_cli_submit_cmd
160144966Svkashyap * Description:		Submits a cmd to firmware.
161144966Svkashyap *
162144966Svkashyap * Input:		req	-- ptr to CL internal request context
163144966Svkashyap * Output:		None
164144966Svkashyap * Return value:	0	-- success
165144966Svkashyap *			non-zero-- failure
166144966Svkashyap */
167144966SvkashyapTW_INT32
168144966Svkashyaptw_cli_submit_cmd(struct tw_cli_req_context *req)
169144966Svkashyap{
170144966Svkashyap	struct tw_cli_ctlr_context	*ctlr = req->ctlr;
171152213Svkashyap	struct tw_cl_ctlr_handle	*ctlr_handle = ctlr->ctlr_handle;
172144966Svkashyap	TW_UINT32			status_reg;
173212008Sdelphij	TW_INT32			error = 0;
174144966Svkashyap
175152213Svkashyap	tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
176144966Svkashyap
177144966Svkashyap	/* Serialize access to the controller cmd queue. */
178152213Svkashyap	tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
179144966Svkashyap
180169400Sscottl	/* For 9650SE first write low 4 bytes */
181172496Sscottl	if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
182172496Sscottl	    (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA))
183169400Sscottl		tw_osl_write_reg(ctlr_handle,
184169400Sscottl				 TWA_COMMAND_QUEUE_OFFSET_LOW,
185169400Sscottl				 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
186169400Sscottl
187152213Svkashyap	status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
188144966Svkashyap	if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
189144966Svkashyap		struct tw_cl_req_packet	*req_pkt =
190144966Svkashyap			(struct tw_cl_req_packet *)(req->orig_req);
191144966Svkashyap
192152213Svkashyap		tw_cli_dbg_printf(7, ctlr_handle, tw_osl_cur_func(),
193144966Svkashyap			"Cmd queue full");
194144966Svkashyap
195144966Svkashyap		if ((req->flags & TW_CLI_REQ_FLAGS_INTERNAL)
196144966Svkashyap			|| ((req_pkt) &&
197144966Svkashyap			(req_pkt->flags & TW_CL_REQ_RETRY_ON_BUSY))
198144966Svkashyap			) {
199144966Svkashyap			if (req->state != TW_CLI_REQ_STATE_PENDING) {
200152213Svkashyap				tw_cli_dbg_printf(2, ctlr_handle,
201144966Svkashyap					tw_osl_cur_func(),
202144966Svkashyap					"pending internal/ioctl request");
203144966Svkashyap				req->state = TW_CLI_REQ_STATE_PENDING;
204144966Svkashyap				tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
205208969Sdelphij				/* Unmask command interrupt. */
206208969Sdelphij				TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
207208969Sdelphij					TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
208144966Svkashyap			} else
209144966Svkashyap				error = TW_OSL_EBUSY;
210152213Svkashyap		} else {
211144966Svkashyap			error = TW_OSL_EBUSY;
212152213Svkashyap		}
213144966Svkashyap	} else {
214152213Svkashyap		tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
215144966Svkashyap			"Submitting command");
216144966Svkashyap
217169400Sscottl		/* Insert command into busy queue */
218144966Svkashyap		req->state = TW_CLI_REQ_STATE_BUSY;
219144966Svkashyap		tw_cli_req_q_insert_tail(req, TW_CLI_BUSY_Q);
220169400Sscottl
221172496Sscottl		if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
222172496Sscottl		    (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
223169400Sscottl			/* Now write the high 4 bytes */
224169400Sscottl			tw_osl_write_reg(ctlr_handle,
225169400Sscottl					 TWA_COMMAND_QUEUE_OFFSET_HIGH,
226169400Sscottl					 (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
227169400Sscottl		} else {
228169400Sscottl			if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
229169400Sscottl				/* First write the low 4 bytes, then the high 4. */
230169400Sscottl				tw_osl_write_reg(ctlr_handle,
231169400Sscottl						 TWA_COMMAND_QUEUE_OFFSET_LOW,
232169400Sscottl						 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
233169400Sscottl				tw_osl_write_reg(ctlr_handle,
234169400Sscottl						 TWA_COMMAND_QUEUE_OFFSET_HIGH,
235169400Sscottl						 (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
236169400Sscottl			} else
237169400Sscottl				tw_osl_write_reg(ctlr_handle,
238169400Sscottl						 TWA_COMMAND_QUEUE_OFFSET,
239169400Sscottl						 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
240169400Sscottl		}
241144966Svkashyap	}
242212008Sdelphij
243152213Svkashyap	tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
244152213Svkashyap
245144966Svkashyap	return(error);
246144966Svkashyap}
247144966Svkashyap
248144966Svkashyap
249144966Svkashyap
250144966Svkashyap/*
251144966Svkashyap * Function name:	tw_cl_fw_passthru
252144966Svkashyap * Description:		Interface to OS Layer for accepting firmware
253144966Svkashyap *			passthru requests.
254144966Svkashyap * Input:		ctlr_handle	-- controller handle
255144966Svkashyap *			req_pkt		-- OSL built request packet
256144966Svkashyap *			req_handle	-- request handle
257144966Svkashyap * Output:		None
258144966Svkashyap * Return value:	0	-- success
259144966Svkashyap *			non-zero-- failure
260144966Svkashyap */
261144966SvkashyapTW_INT32
262144966Svkashyaptw_cl_fw_passthru(struct tw_cl_ctlr_handle *ctlr_handle,
263144966Svkashyap	struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
264144966Svkashyap{
265144966Svkashyap	struct tw_cli_ctlr_context		*ctlr;
266144966Svkashyap	struct tw_cli_req_context		*req;
267144966Svkashyap	union tw_cl_command_7k			*cmd_7k;
268144966Svkashyap	struct tw_cl_command_9k			*cmd_9k;
269144966Svkashyap	struct tw_cl_passthru_req_packet	*pt_req;
270144966Svkashyap	TW_UINT8				opcode;
271144966Svkashyap	TW_UINT8				sgl_offset;
272144966Svkashyap	TW_VOID					*sgl = TW_CL_NULL;
273212008Sdelphij	TW_INT32				error = TW_CL_ERR_REQ_SUCCESS;
274144966Svkashyap
275144966Svkashyap	tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
276144966Svkashyap
277144966Svkashyap	ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
278144966Svkashyap
279144966Svkashyap	if ((req = tw_cli_get_request(ctlr
280144966Svkashyap		)) == TW_CL_NULL) {
281144966Svkashyap		tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
282144966Svkashyap			"Out of request context packets: returning busy");
283144966Svkashyap		return(TW_OSL_EBUSY);
284144966Svkashyap	}
285144966Svkashyap
286144966Svkashyap	req_handle->cl_req_ctxt = req;
287144966Svkashyap	req->req_handle = req_handle;
288144966Svkashyap	req->orig_req = req_pkt;
289144966Svkashyap	req->tw_cli_callback = tw_cli_complete_io;
290144966Svkashyap
291212008Sdelphij	req->flags |= TW_CLI_REQ_FLAGS_PASSTHRU;
292144966Svkashyap
293144966Svkashyap	pt_req = &(req_pkt->gen_req_pkt.pt_req);
294144966Svkashyap
295144966Svkashyap	tw_osl_memcpy(req->cmd_pkt, pt_req->cmd_pkt,
296144966Svkashyap		pt_req->cmd_pkt_length);
297144966Svkashyap	/* Build the cmd pkt. */
298144966Svkashyap	if ((opcode = GET_OPCODE(((TW_UINT8 *)
299144966Svkashyap		(pt_req->cmd_pkt))[sizeof(struct tw_cl_command_header)]))
300144966Svkashyap			== TWA_FW_CMD_EXECUTE_SCSI) {
301144966Svkashyap		TW_UINT16	lun_l4, lun_h4;
302144966Svkashyap
303144966Svkashyap		tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
304144966Svkashyap			"passthru: 9k cmd pkt");
305144966Svkashyap		req->flags |= TW_CLI_REQ_FLAGS_9K;
306144966Svkashyap		cmd_9k = &(req->cmd_pkt->command.cmd_pkt_9k);
307144966Svkashyap		lun_l4 = GET_LUN_L4(cmd_9k->lun_l4__req_id);
308144966Svkashyap		lun_h4 = GET_LUN_H4(cmd_9k->lun_h4__sgl_entries);
309144966Svkashyap		cmd_9k->lun_l4__req_id = TW_CL_SWAP16(
310144966Svkashyap			BUILD_LUN_L4__REQ_ID(lun_l4, req->request_id));
311144966Svkashyap		if (pt_req->sgl_entries) {
312144966Svkashyap			cmd_9k->lun_h4__sgl_entries =
313144966Svkashyap				TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(lun_h4,
314144966Svkashyap					pt_req->sgl_entries));
315144966Svkashyap			sgl = (TW_VOID *)(cmd_9k->sg_list);
316144966Svkashyap		}
317144966Svkashyap	} else {
318144966Svkashyap		tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
319144966Svkashyap			"passthru: 7k cmd pkt");
320144966Svkashyap		cmd_7k = &(req->cmd_pkt->command.cmd_pkt_7k);
321144966Svkashyap		cmd_7k->generic.request_id =
322144966Svkashyap			(TW_UINT8)(TW_CL_SWAP16(req->request_id));
323144966Svkashyap		if ((sgl_offset =
324144966Svkashyap			GET_SGL_OFF(cmd_7k->generic.sgl_off__opcode))) {
325172496Sscottl			if (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)
326172496Sscottl				sgl = (((TW_UINT32 *)cmd_7k) + cmd_7k->generic.size);
327172496Sscottl			else
328172496Sscottl				sgl = (((TW_UINT32 *)cmd_7k) + sgl_offset);
329144966Svkashyap			cmd_7k->generic.size += pt_req->sgl_entries *
330144966Svkashyap				((ctlr->flags & TW_CL_64BIT_ADDRESSES) ? 3 : 2);
331144966Svkashyap		}
332144966Svkashyap	}
333144966Svkashyap
334144966Svkashyap	if (sgl)
335144966Svkashyap		tw_cli_fill_sg_list(ctlr, pt_req->sg_list,
336144966Svkashyap			sgl, pt_req->sgl_entries);
337144966Svkashyap
338212008Sdelphij	if (((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) != TW_CL_NULL) ||
339212008Sdelphij		(ctlr->reset_in_progress)) {
340212008Sdelphij		tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
341212008Sdelphij		TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
342212008Sdelphij			TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
343212008Sdelphij	} else if ((error = tw_cli_submit_cmd(req))) {
344144966Svkashyap		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
345144966Svkashyap			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
346144966Svkashyap			0x1100, 0x1, TW_CL_SEVERITY_ERROR_STRING,
347144966Svkashyap			"Failed to start passthru command",
348144966Svkashyap			"error = %d", error);
349144966Svkashyap		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
350144966Svkashyap	}
351144966Svkashyap	return(error);
352144966Svkashyap}
353144966Svkashyap
354144966Svkashyap
355144966Svkashyap
356144966Svkashyap/*
357144966Svkashyap * Function name:	tw_cl_ioctl
358144966Svkashyap * Description:		Handler of CL supported ioctl cmds.
359144966Svkashyap *
360144966Svkashyap * Input:		ctlr	-- ptr to per ctlr structure
361144966Svkashyap *			cmd	-- ioctl cmd
362144966Svkashyap *			buf	-- ptr to buffer in kernel memory, which is
363144966Svkashyap *				   a copy of the input buffer in user-space
364144966Svkashyap * Output:		buf	-- ptr to buffer in kernel memory, which will
365144966Svkashyap *				   need to be copied to the output buffer in
366144966Svkashyap *				   user-space
367144966Svkashyap * Return value:	0	-- success
368144966Svkashyap *			non-zero-- failure
369144966Svkashyap */
370144966SvkashyapTW_INT32
371197409Srdivackytw_cl_ioctl(struct tw_cl_ctlr_handle *ctlr_handle, u_long cmd, TW_VOID *buf)
372144966Svkashyap{
373144966Svkashyap	struct tw_cli_ctlr_context	*ctlr =
374144966Svkashyap		(struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
375144966Svkashyap	struct tw_cl_ioctl_packet	*user_buf =
376144966Svkashyap		(struct tw_cl_ioctl_packet *)buf;
377144966Svkashyap	struct tw_cl_event_packet	event_buf;
378144966Svkashyap	TW_INT32			event_index;
379144966Svkashyap	TW_INT32			start_index;
380144966Svkashyap	TW_INT32			error = TW_OSL_ESUCCESS;
381144966Svkashyap
382144966Svkashyap	tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
383144966Svkashyap
384144966Svkashyap	/* Serialize access to the AEN queue and the ioctl lock. */
385144966Svkashyap	tw_osl_get_lock(ctlr_handle, ctlr->gen_lock);
386144966Svkashyap
387144966Svkashyap	switch (cmd) {
388144966Svkashyap	case TW_CL_IOCTL_GET_FIRST_EVENT:
389144966Svkashyap		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
390144966Svkashyap			"Get First Event");
391144966Svkashyap
392144966Svkashyap		if (ctlr->aen_q_wrapped) {
393144966Svkashyap			if (ctlr->aen_q_overflow) {
394144966Svkashyap				/*
395144966Svkashyap				 * The aen queue has wrapped, even before some
396144966Svkashyap				 * events have been retrieved.  Let the caller
397144966Svkashyap				 * know that he missed out on some AEN's.
398144966Svkashyap				 */
399144966Svkashyap				user_buf->driver_pkt.status =
400144966Svkashyap					TW_CL_ERROR_AEN_OVERFLOW;
401144966Svkashyap				ctlr->aen_q_overflow = TW_CL_FALSE;
402144966Svkashyap			} else
403144966Svkashyap				user_buf->driver_pkt.status = 0;
404144966Svkashyap			event_index = ctlr->aen_head;
405144966Svkashyap		} else {
406144966Svkashyap			if (ctlr->aen_head == ctlr->aen_tail) {
407144966Svkashyap				user_buf->driver_pkt.status =
408144966Svkashyap					TW_CL_ERROR_AEN_NO_EVENTS;
409144966Svkashyap				break;
410144966Svkashyap			}
411144966Svkashyap			user_buf->driver_pkt.status = 0;
412144966Svkashyap			event_index = ctlr->aen_tail;	/* = 0 */
413144966Svkashyap		}
414144966Svkashyap		tw_osl_memcpy(user_buf->data_buf,
415144966Svkashyap			&(ctlr->aen_queue[event_index]),
416144966Svkashyap			sizeof(struct tw_cl_event_packet));
417144966Svkashyap
418144966Svkashyap		ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
419144966Svkashyap
420144966Svkashyap		break;
421144966Svkashyap
422144966Svkashyap
423144966Svkashyap	case TW_CL_IOCTL_GET_LAST_EVENT:
424144966Svkashyap		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
425144966Svkashyap			"Get Last Event");
426144966Svkashyap
427144966Svkashyap		if (ctlr->aen_q_wrapped) {
428144966Svkashyap			if (ctlr->aen_q_overflow) {
429144966Svkashyap				/*
430144966Svkashyap				 * The aen queue has wrapped, even before some
431144966Svkashyap				 * events have been retrieved.  Let the caller
432144966Svkashyap				 * know that he missed out on some AEN's.
433144966Svkashyap				 */
434144966Svkashyap				user_buf->driver_pkt.status =
435144966Svkashyap					TW_CL_ERROR_AEN_OVERFLOW;
436144966Svkashyap				ctlr->aen_q_overflow = TW_CL_FALSE;
437144966Svkashyap			} else
438144966Svkashyap				user_buf->driver_pkt.status = 0;
439144966Svkashyap		} else {
440144966Svkashyap			if (ctlr->aen_head == ctlr->aen_tail) {
441144966Svkashyap				user_buf->driver_pkt.status =
442144966Svkashyap					TW_CL_ERROR_AEN_NO_EVENTS;
443144966Svkashyap				break;
444144966Svkashyap			}
445144966Svkashyap			user_buf->driver_pkt.status = 0;
446144966Svkashyap		}
447144966Svkashyap		event_index = (ctlr->aen_head - 1 + ctlr->max_aens_supported) %
448144966Svkashyap			ctlr->max_aens_supported;
449144966Svkashyap
450144966Svkashyap		tw_osl_memcpy(user_buf->data_buf,
451144966Svkashyap			&(ctlr->aen_queue[event_index]),
452144966Svkashyap			sizeof(struct tw_cl_event_packet));
453144966Svkashyap
454144966Svkashyap		ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
455144966Svkashyap
456144966Svkashyap		break;
457144966Svkashyap
458144966Svkashyap
459144966Svkashyap	case TW_CL_IOCTL_GET_NEXT_EVENT:
460144966Svkashyap		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
461144966Svkashyap			"Get Next Event");
462144966Svkashyap
463144966Svkashyap		user_buf->driver_pkt.status = 0;
464144966Svkashyap		if (ctlr->aen_q_wrapped) {
465144966Svkashyap			tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
466144966Svkashyap				"Get Next Event: wrapped");
467144966Svkashyap			if (ctlr->aen_q_overflow) {
468144966Svkashyap				/*
469144966Svkashyap				 * The aen queue has wrapped, even before some
470144966Svkashyap				 * events have been retrieved.  Let the caller
471144966Svkashyap				 * know that he missed out on some AEN's.
472144966Svkashyap				 */
473144966Svkashyap				tw_cli_dbg_printf(2, ctlr_handle,
474144966Svkashyap					tw_osl_cur_func(),
475144966Svkashyap					"Get Next Event: overflow");
476144966Svkashyap				user_buf->driver_pkt.status =
477144966Svkashyap					TW_CL_ERROR_AEN_OVERFLOW;
478144966Svkashyap				ctlr->aen_q_overflow = TW_CL_FALSE;
479144966Svkashyap			}
480144966Svkashyap			start_index = ctlr->aen_head;
481144966Svkashyap		} else {
482144966Svkashyap			if (ctlr->aen_head == ctlr->aen_tail) {
483144966Svkashyap				tw_cli_dbg_printf(3, ctlr_handle,
484144966Svkashyap					tw_osl_cur_func(),
485144966Svkashyap					"Get Next Event: empty queue");
486144966Svkashyap				user_buf->driver_pkt.status =
487144966Svkashyap					TW_CL_ERROR_AEN_NO_EVENTS;
488144966Svkashyap				break;
489144966Svkashyap			}
490144966Svkashyap			start_index = ctlr->aen_tail;	/* = 0 */
491144966Svkashyap		}
492144966Svkashyap		tw_osl_memcpy(&event_buf, user_buf->data_buf,
493144966Svkashyap			sizeof(struct tw_cl_event_packet));
494144966Svkashyap
495144966Svkashyap		event_index = (start_index + event_buf.sequence_id -
496144966Svkashyap			ctlr->aen_queue[start_index].sequence_id + 1) %
497144966Svkashyap			ctlr->max_aens_supported;
498144966Svkashyap
499144966Svkashyap		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
500144966Svkashyap			"Get Next Event: si = %x, ei = %x, ebsi = %x, "
501144966Svkashyap			"sisi = %x, eisi = %x",
502144966Svkashyap			start_index, event_index, event_buf.sequence_id,
503144966Svkashyap			ctlr->aen_queue[start_index].sequence_id,
504144966Svkashyap			ctlr->aen_queue[event_index].sequence_id);
505144966Svkashyap
506144966Svkashyap		if (! (ctlr->aen_queue[event_index].sequence_id >
507144966Svkashyap			event_buf.sequence_id)) {
508144966Svkashyap			/*
509144966Svkashyap			 * We don't have any event matching the criterion.  So,
510144966Svkashyap			 * we have to report TW_CL_ERROR_NO_EVENTS.  If we also
511144966Svkashyap			 * encountered an overflow condition above, we cannot
512144966Svkashyap			 * report both conditions during this call.  We choose
513144966Svkashyap			 * to report NO_EVENTS this time, and an overflow the
514144966Svkashyap			 * next time we are called.
515144966Svkashyap			 */
516144966Svkashyap			if (user_buf->driver_pkt.status ==
517144966Svkashyap				TW_CL_ERROR_AEN_OVERFLOW) {
518144966Svkashyap				/*
519144966Svkashyap				 * Make a note so we report the overflow
520144966Svkashyap				 * next time.
521144966Svkashyap				 */
522144966Svkashyap				ctlr->aen_q_overflow = TW_CL_TRUE;
523144966Svkashyap			}
524144966Svkashyap			user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
525144966Svkashyap			break;
526144966Svkashyap		}
527144966Svkashyap		/* Copy the event -- even if there has been an overflow. */
528144966Svkashyap		tw_osl_memcpy(user_buf->data_buf,
529144966Svkashyap			&(ctlr->aen_queue[event_index]),
530144966Svkashyap			sizeof(struct tw_cl_event_packet));
531144966Svkashyap
532144966Svkashyap		ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
533144966Svkashyap
534144966Svkashyap		break;
535144966Svkashyap
536144966Svkashyap
537144966Svkashyap	case TW_CL_IOCTL_GET_PREVIOUS_EVENT:
538144966Svkashyap		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
539144966Svkashyap			"Get Previous Event");
540144966Svkashyap
541144966Svkashyap		user_buf->driver_pkt.status = 0;
542144966Svkashyap		if (ctlr->aen_q_wrapped) {
543144966Svkashyap			if (ctlr->aen_q_overflow) {
544144966Svkashyap				/*
545144966Svkashyap				 * The aen queue has wrapped, even before some
546144966Svkashyap				 * events have been retrieved.  Let the caller
547144966Svkashyap				 * know that he missed out on some AEN's.
548144966Svkashyap				 */
549144966Svkashyap				user_buf->driver_pkt.status =
550144966Svkashyap					TW_CL_ERROR_AEN_OVERFLOW;
551144966Svkashyap				ctlr->aen_q_overflow = TW_CL_FALSE;
552144966Svkashyap			}
553144966Svkashyap			start_index = ctlr->aen_head;
554144966Svkashyap		} else {
555144966Svkashyap			if (ctlr->aen_head == ctlr->aen_tail) {
556144966Svkashyap				user_buf->driver_pkt.status =
557144966Svkashyap					TW_CL_ERROR_AEN_NO_EVENTS;
558144966Svkashyap				break;
559144966Svkashyap			}
560144966Svkashyap			start_index = ctlr->aen_tail;	/* = 0 */
561144966Svkashyap		}
562144966Svkashyap		tw_osl_memcpy(&event_buf, user_buf->data_buf,
563144966Svkashyap			sizeof(struct tw_cl_event_packet));
564144966Svkashyap
565144966Svkashyap		event_index = (start_index + event_buf.sequence_id -
566144966Svkashyap			ctlr->aen_queue[start_index].sequence_id - 1) %
567144966Svkashyap			ctlr->max_aens_supported;
568144966Svkashyap
569144966Svkashyap		if (! (ctlr->aen_queue[event_index].sequence_id <
570144966Svkashyap			event_buf.sequence_id)) {
571144966Svkashyap			/*
572144966Svkashyap			 * We don't have any event matching the criterion.  So,
573144966Svkashyap			 * we have to report TW_CL_ERROR_NO_EVENTS.  If we also
574144966Svkashyap			 * encountered an overflow condition above, we cannot
575144966Svkashyap			 * report both conditions during this call.  We choose
576144966Svkashyap			 * to report NO_EVENTS this time, and an overflow the
577144966Svkashyap			 * next time we are called.
578144966Svkashyap			 */
579144966Svkashyap			if (user_buf->driver_pkt.status ==
580144966Svkashyap				TW_CL_ERROR_AEN_OVERFLOW) {
581144966Svkashyap				/*
582144966Svkashyap				 * Make a note so we report the overflow
583144966Svkashyap				 * next time.
584144966Svkashyap				 */
585144966Svkashyap				ctlr->aen_q_overflow = TW_CL_TRUE;
586144966Svkashyap			}
587144966Svkashyap			user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
588144966Svkashyap			break;
589144966Svkashyap		}
590144966Svkashyap		/* Copy the event -- even if there has been an overflow. */
591144966Svkashyap		tw_osl_memcpy(user_buf->data_buf,
592144966Svkashyap			&(ctlr->aen_queue[event_index]),
593144966Svkashyap			sizeof(struct tw_cl_event_packet));
594144966Svkashyap
595144966Svkashyap		ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
596144966Svkashyap
597144966Svkashyap		break;
598144966Svkashyap
599144966Svkashyap
600144966Svkashyap	case TW_CL_IOCTL_GET_LOCK:
601144966Svkashyap	{
602144966Svkashyap		struct tw_cl_lock_packet	lock_pkt;
603144966Svkashyap		TW_TIME				cur_time;
604144966Svkashyap
605144966Svkashyap		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
606144966Svkashyap			"Get ioctl lock");
607144966Svkashyap
608144966Svkashyap		cur_time = tw_osl_get_local_time();
609144966Svkashyap		tw_osl_memcpy(&lock_pkt, user_buf->data_buf,
610144966Svkashyap			sizeof(struct tw_cl_lock_packet));
611144966Svkashyap
612144966Svkashyap		if ((ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) ||
613144966Svkashyap			(lock_pkt.force_flag) ||
614144966Svkashyap			(cur_time >= ctlr->ioctl_lock.timeout)) {
615144966Svkashyap			tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
616144966Svkashyap				"GET_LOCK: Getting lock!");
617144966Svkashyap			ctlr->ioctl_lock.lock = TW_CLI_LOCK_HELD;
618144966Svkashyap			ctlr->ioctl_lock.timeout =
619144966Svkashyap				cur_time + (lock_pkt.timeout_msec / 1000);
620144966Svkashyap			lock_pkt.time_remaining_msec = lock_pkt.timeout_msec;
621144966Svkashyap			user_buf->driver_pkt.status = 0;
622144966Svkashyap		} else {
623144966Svkashyap			tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
624144966Svkashyap				"GET_LOCK: Lock already held!");
625144966Svkashyap			lock_pkt.time_remaining_msec = (TW_UINT32)(
626144966Svkashyap				(ctlr->ioctl_lock.timeout - cur_time) * 1000);
627144966Svkashyap			user_buf->driver_pkt.status =
628144966Svkashyap				TW_CL_ERROR_IOCTL_LOCK_ALREADY_HELD;
629144966Svkashyap		}
630144966Svkashyap		tw_osl_memcpy(user_buf->data_buf, &lock_pkt,
631144966Svkashyap			sizeof(struct tw_cl_lock_packet));
632144966Svkashyap		break;
633144966Svkashyap	}
634144966Svkashyap
635144966Svkashyap
636144966Svkashyap	case TW_CL_IOCTL_RELEASE_LOCK:
637144966Svkashyap		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
638144966Svkashyap			"Release ioctl lock");
639144966Svkashyap
640144966Svkashyap		if (ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) {
641144966Svkashyap			tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
642144966Svkashyap				"twa_ioctl: RELEASE_LOCK: Lock not held!");
643144966Svkashyap			user_buf->driver_pkt.status =
644144966Svkashyap				TW_CL_ERROR_IOCTL_LOCK_NOT_HELD;
645144966Svkashyap		} else {
646144966Svkashyap			tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
647144966Svkashyap				"RELEASE_LOCK: Releasing lock!");
648144966Svkashyap			ctlr->ioctl_lock.lock = TW_CLI_LOCK_FREE;
649144966Svkashyap			user_buf->driver_pkt.status = 0;
650144966Svkashyap		}
651144966Svkashyap		break;
652144966Svkashyap
653144966Svkashyap
654144966Svkashyap	case TW_CL_IOCTL_GET_COMPATIBILITY_INFO:
655144966Svkashyap	{
656144966Svkashyap		struct tw_cl_compatibility_packet	comp_pkt;
657144966Svkashyap
658144966Svkashyap		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
659144966Svkashyap			"Get compatibility info");
660144966Svkashyap
661144966Svkashyap		tw_osl_memcpy(comp_pkt.driver_version,
662144966Svkashyap			TW_OSL_DRIVER_VERSION_STRING,
663144966Svkashyap			sizeof(TW_OSL_DRIVER_VERSION_STRING));
664144966Svkashyap		comp_pkt.working_srl = ctlr->working_srl;
665144966Svkashyap		comp_pkt.working_branch = ctlr->working_branch;
666144966Svkashyap		comp_pkt.working_build = ctlr->working_build;
667152213Svkashyap		comp_pkt.driver_srl_high = TWA_CURRENT_FW_SRL;
668152213Svkashyap		comp_pkt.driver_branch_high =
669152213Svkashyap			TWA_CURRENT_FW_BRANCH(ctlr->arch_id);
670152213Svkashyap		comp_pkt.driver_build_high =
671152213Svkashyap			TWA_CURRENT_FW_BUILD(ctlr->arch_id);
672152213Svkashyap		comp_pkt.driver_srl_low = TWA_BASE_FW_SRL;
673152213Svkashyap		comp_pkt.driver_branch_low = TWA_BASE_FW_BRANCH;
674169400Sscottl		comp_pkt.driver_build_low = TWA_BASE_FW_BUILD;
675152213Svkashyap		comp_pkt.fw_on_ctlr_srl = ctlr->fw_on_ctlr_srl;
676152213Svkashyap		comp_pkt.fw_on_ctlr_branch = ctlr->fw_on_ctlr_branch;
677152213Svkashyap		comp_pkt.fw_on_ctlr_build = ctlr->fw_on_ctlr_build;
678144966Svkashyap		user_buf->driver_pkt.status = 0;
679144966Svkashyap
680144966Svkashyap		/* Copy compatibility information to user space. */
681144966Svkashyap		tw_osl_memcpy(user_buf->data_buf, &comp_pkt,
682144966Svkashyap			(sizeof(struct tw_cl_compatibility_packet) <
683144966Svkashyap			user_buf->driver_pkt.buffer_length) ?
684144966Svkashyap			sizeof(struct tw_cl_compatibility_packet) :
685144966Svkashyap			user_buf->driver_pkt.buffer_length);
686144966Svkashyap		break;
687144966Svkashyap	}
688144966Svkashyap
689144966Svkashyap	default:
690144966Svkashyap		/* Unknown opcode. */
691144966Svkashyap		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
692144966Svkashyap			"Unknown ioctl cmd 0x%x", cmd);
693144966Svkashyap		error = TW_OSL_ENOTTY;
694144966Svkashyap	}
695144966Svkashyap
696144966Svkashyap	tw_osl_free_lock(ctlr_handle, ctlr->gen_lock);
697144966Svkashyap	return(error);
698144966Svkashyap}
699144966Svkashyap
700144966Svkashyap
701144966Svkashyap
702144966Svkashyap/*
703144966Svkashyap * Function name:	tw_cli_get_param
704144966Svkashyap * Description:		Get a firmware parameter.
705144966Svkashyap *
706144966Svkashyap * Input:		ctlr		-- ptr to per ctlr structure
707144966Svkashyap *			table_id	-- parameter table #
708144966Svkashyap *			param_id	-- index of the parameter in the table
709144966Svkashyap *			param_size	-- size of the parameter in bytes
710144966Svkashyap *			callback	-- ptr to function, if any, to be called
711144966Svkashyap *					back on completion; TW_CL_NULL if no callback.
712144966Svkashyap * Output:		param_data	-- param value
713144966Svkashyap * Return value:	0	-- success
714144966Svkashyap *			non-zero-- failure
715144966Svkashyap */
716144966SvkashyapTW_INT32
717144966Svkashyaptw_cli_get_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
718144966Svkashyap	TW_INT32 param_id, TW_VOID *param_data, TW_INT32 param_size,
719144966Svkashyap	TW_VOID (* callback)(struct tw_cli_req_context *req))
720144966Svkashyap{
721144966Svkashyap	struct tw_cli_req_context	*req;
722144966Svkashyap	union tw_cl_command_7k		*cmd;
723144966Svkashyap	struct tw_cl_param_9k		*param = TW_CL_NULL;
724144966Svkashyap	TW_INT32			error = TW_OSL_EBUSY;
725144966Svkashyap
726144966Svkashyap	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
727144966Svkashyap
728144966Svkashyap	/* Get a request packet. */
729144966Svkashyap	if ((req = tw_cli_get_request(ctlr
730144966Svkashyap		)) == TW_CL_NULL)
731144966Svkashyap		goto out;
732144966Svkashyap
733144966Svkashyap	/* Make sure this is the only CL internal request at this time. */
734208969Sdelphij	if (ctlr->internal_req_busy) {
735144966Svkashyap		error = TW_OSL_EBUSY;
736144966Svkashyap		goto out;
737144966Svkashyap	}
738208969Sdelphij	ctlr->internal_req_busy = TW_CL_TRUE;
739144966Svkashyap	req->data = ctlr->internal_req_data;
740144966Svkashyap	req->data_phys = ctlr->internal_req_data_phys;
741144966Svkashyap	req->length = TW_CLI_SECTOR_SIZE;
742144966Svkashyap	req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
743144966Svkashyap
744144966Svkashyap	/* Initialize memory to read data into. */
745144966Svkashyap	param = (struct tw_cl_param_9k *)(req->data);
746144966Svkashyap	tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
747144966Svkashyap
748144966Svkashyap	/* Build the cmd pkt. */
749144966Svkashyap	cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
750144966Svkashyap
751144966Svkashyap	req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
752144966Svkashyap
753144966Svkashyap	cmd->param.sgl_off__opcode =
754144966Svkashyap		BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_GET_PARAM);
755212008Sdelphij	cmd->param.request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
756144966Svkashyap	cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
757144966Svkashyap	cmd->param.param_count = TW_CL_SWAP16(1);
758144966Svkashyap
759144966Svkashyap	if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
760144966Svkashyap		((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
761144966Svkashyap			TW_CL_SWAP64(req->data_phys);
762144966Svkashyap		((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
763144966Svkashyap			TW_CL_SWAP32(req->length);
764144966Svkashyap		cmd->param.size = 2 + 3;
765144966Svkashyap	} else {
766144966Svkashyap		((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
767144966Svkashyap			TW_CL_SWAP32(req->data_phys);
768144966Svkashyap		((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
769144966Svkashyap			TW_CL_SWAP32(req->length);
770144966Svkashyap		cmd->param.size = 2 + 2;
771144966Svkashyap	}
772144966Svkashyap
773144966Svkashyap	/* Specify which parameter we need. */
774144966Svkashyap	param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
775144966Svkashyap	param->parameter_id = (TW_UINT8)(param_id);
776144966Svkashyap	param->parameter_size_bytes = TW_CL_SWAP16(param_size);
777144966Svkashyap
778144966Svkashyap	/* Submit the command. */
779144966Svkashyap	if (callback == TW_CL_NULL) {
780144966Svkashyap		/* There's no call back; wait till the command completes. */
781144966Svkashyap		error = tw_cli_submit_and_poll_request(req,
782144966Svkashyap				TW_CLI_REQUEST_TIMEOUT_PERIOD);
783144966Svkashyap		if (error)
784144966Svkashyap			goto out;
785144966Svkashyap		if ((error = cmd->param.status)) {
786212008Sdelphij#if       0
787144966Svkashyap			tw_cli_create_ctlr_event(ctlr,
788144966Svkashyap				TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
789144966Svkashyap				&(req->cmd_pkt->cmd_hdr));
790212008Sdelphij#endif // 0
791144966Svkashyap			goto out;
792144966Svkashyap		}
793144966Svkashyap		tw_osl_memcpy(param_data, param->data, param_size);
794208969Sdelphij		ctlr->internal_req_busy = TW_CL_FALSE;
795144966Svkashyap		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
796144966Svkashyap	} else {
797144966Svkashyap		/* There's a call back.  Simply submit the command. */
798144966Svkashyap		req->tw_cli_callback = callback;
799144966Svkashyap		if ((error = tw_cli_submit_cmd(req)))
800144966Svkashyap			goto out;
801144966Svkashyap	}
802144966Svkashyap	return(0);
803144966Svkashyap
804144966Svkashyapout:
805144966Svkashyap	tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
806144966Svkashyap		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
807144966Svkashyap		0x1101, 0x1, TW_CL_SEVERITY_ERROR_STRING,
808144966Svkashyap		"get_param failed",
809144966Svkashyap		"error = %d", error);
810144966Svkashyap	if (param)
811208969Sdelphij		ctlr->internal_req_busy = TW_CL_FALSE;
812144966Svkashyap	if (req)
813144966Svkashyap		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
814144966Svkashyap	return(1);
815144966Svkashyap}
816144966Svkashyap
817144966Svkashyap
818144966Svkashyap
819144966Svkashyap/*
820144966Svkashyap * Function name:	tw_cli_set_param
821144966Svkashyap * Description:		Set a firmware parameter.
822144966Svkashyap *
823144966Svkashyap * Input:		ctlr		-- ptr to per ctlr structure
824144966Svkashyap *			table_id	-- parameter table #
825144966Svkashyap *			param_id	-- index of the parameter in the table
826144966Svkashyap *			param_size	-- size of the parameter in bytes
827144966Svkashyap *			callback	-- ptr to function, if any, to be called
828144966Svkashyap *					back on completion; TW_CL_NULL if no callback.
829144966Svkashyap * Output:		None
830144966Svkashyap * Return value:	0	-- success
831144966Svkashyap *			non-zero-- failure
832144966Svkashyap */
833144966SvkashyapTW_INT32
834144966Svkashyaptw_cli_set_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
835144966Svkashyap	TW_INT32 param_id, TW_INT32 param_size, TW_VOID *data,
836144966Svkashyap	TW_VOID (* callback)(struct tw_cli_req_context *req))
837144966Svkashyap{
838144966Svkashyap	struct tw_cli_req_context	*req;
839144966Svkashyap	union tw_cl_command_7k		*cmd;
840144966Svkashyap	struct tw_cl_param_9k		*param = TW_CL_NULL;
841144966Svkashyap	TW_INT32			error = TW_OSL_EBUSY;
842144966Svkashyap
843144966Svkashyap	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
844144966Svkashyap
845144966Svkashyap	/* Get a request packet. */
846144966Svkashyap	if ((req = tw_cli_get_request(ctlr
847144966Svkashyap		)) == TW_CL_NULL)
848144966Svkashyap		goto out;
849144966Svkashyap
850144966Svkashyap	/* Make sure this is the only CL internal request at this time. */
851208969Sdelphij	if (ctlr->internal_req_busy) {
852144966Svkashyap		error = TW_OSL_EBUSY;
853144966Svkashyap		goto out;
854144966Svkashyap	}
855208969Sdelphij	ctlr->internal_req_busy = TW_CL_TRUE;
856144966Svkashyap	req->data = ctlr->internal_req_data;
857144966Svkashyap	req->data_phys = ctlr->internal_req_data_phys;
858144966Svkashyap	req->length = TW_CLI_SECTOR_SIZE;
859144966Svkashyap	req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
860144966Svkashyap
861144966Svkashyap	/* Initialize memory to send data using. */
862144966Svkashyap	param = (struct tw_cl_param_9k *)(req->data);
863144966Svkashyap	tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
864144966Svkashyap
865144966Svkashyap	/* Build the cmd pkt. */
866144966Svkashyap	cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
867144966Svkashyap
868144966Svkashyap	req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
869144966Svkashyap
870144966Svkashyap	cmd->param.sgl_off__opcode =
871144966Svkashyap		BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_SET_PARAM);
872144966Svkashyap	cmd->param.request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
873144966Svkashyap	cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
874144966Svkashyap	cmd->param.param_count = TW_CL_SWAP16(1);
875144966Svkashyap
876144966Svkashyap	if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
877144966Svkashyap		((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
878144966Svkashyap			TW_CL_SWAP64(req->data_phys);
879144966Svkashyap		((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
880144966Svkashyap			TW_CL_SWAP32(req->length);
881144966Svkashyap		cmd->param.size = 2 + 3;
882144966Svkashyap	} else {
883144966Svkashyap		((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
884144966Svkashyap			TW_CL_SWAP32(req->data_phys);
885144966Svkashyap		((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
886144966Svkashyap			TW_CL_SWAP32(req->length);
887144966Svkashyap		cmd->param.size = 2 + 2;
888144966Svkashyap	}
889144966Svkashyap
890144966Svkashyap	/* Specify which parameter we want to set. */
891144966Svkashyap	param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
892144966Svkashyap	param->parameter_id = (TW_UINT8)(param_id);
893144966Svkashyap	param->parameter_size_bytes = TW_CL_SWAP16(param_size);
894144966Svkashyap	tw_osl_memcpy(param->data, data, param_size);
895144966Svkashyap
896144966Svkashyap	/* Submit the command. */
897144966Svkashyap	if (callback == TW_CL_NULL) {
898212008Sdelphij		/* There's no call back; wait till the command completes. */
899144966Svkashyap		error = tw_cli_submit_and_poll_request(req,
900212008Sdelphij				TW_CLI_REQUEST_TIMEOUT_PERIOD);
901144966Svkashyap		if (error)
902144966Svkashyap			goto out;
903144966Svkashyap		if ((error = cmd->param.status)) {
904212008Sdelphij#if       0
905144966Svkashyap			tw_cli_create_ctlr_event(ctlr,
906144966Svkashyap				TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
907144966Svkashyap				&(req->cmd_pkt->cmd_hdr));
908212008Sdelphij#endif // 0
909144966Svkashyap			goto out;
910144966Svkashyap		}
911208969Sdelphij		ctlr->internal_req_busy = TW_CL_FALSE;
912144966Svkashyap		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
913144966Svkashyap	} else {
914144966Svkashyap		/* There's a call back.  Simply submit the command. */
915144966Svkashyap		req->tw_cli_callback = callback;
916144966Svkashyap		if ((error = tw_cli_submit_cmd(req)))
917144966Svkashyap			goto out;
918144966Svkashyap	}
919144966Svkashyap	return(error);
920144966Svkashyap
921144966Svkashyapout:
922144966Svkashyap	tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
923144966Svkashyap		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
924144966Svkashyap		0x1102, 0x1, TW_CL_SEVERITY_ERROR_STRING,
925144966Svkashyap		"set_param failed",
926144966Svkashyap		"error = %d", error);
927144966Svkashyap	if (param)
928208969Sdelphij		ctlr->internal_req_busy = TW_CL_FALSE;
929144966Svkashyap	if (req)
930144966Svkashyap		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
931144966Svkashyap	return(error);
932144966Svkashyap}
933144966Svkashyap
934144966Svkashyap
935144966Svkashyap
936144966Svkashyap/*
937144966Svkashyap * Function name:	tw_cli_submit_and_poll_request
938144966Svkashyap * Description:		Sends down a firmware cmd, and waits for the completion
939144966Svkashyap *			in a tight loop.
940144966Svkashyap *
941144966Svkashyap * Input:		req	-- ptr to request pkt
942144966Svkashyap *			timeout -- max # of seconds to wait before giving up
943144966Svkashyap * Output:		None
944144966Svkashyap * Return value:	0	-- success
945144966Svkashyap *			non-zero-- failure
946144966Svkashyap */
947144966SvkashyapTW_INT32
948144966Svkashyaptw_cli_submit_and_poll_request(struct tw_cli_req_context *req,
949144966Svkashyap	TW_UINT32 timeout)
950144966Svkashyap{
951144966Svkashyap	struct tw_cli_ctlr_context	*ctlr = req->ctlr;
952144966Svkashyap	TW_TIME				end_time;
953144966Svkashyap	TW_INT32			error;
954144966Svkashyap
955144966Svkashyap	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
956144966Svkashyap
957144966Svkashyap	/*
958144966Svkashyap	 * If the cmd queue is full, tw_cli_submit_cmd will queue this
959144966Svkashyap	 * request in the pending queue, since this is an internal request.
960144966Svkashyap	 */
961144966Svkashyap	if ((error = tw_cli_submit_cmd(req))) {
962144966Svkashyap		tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
963144966Svkashyap			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
964144966Svkashyap			0x1103, 0x1, TW_CL_SEVERITY_ERROR_STRING,
965144966Svkashyap			"Failed to start internal request",
966144966Svkashyap			"error = %d", error);
967144966Svkashyap		return(error);
968144966Svkashyap	}
969144966Svkashyap
970144966Svkashyap	/*
971144966Svkashyap	 * Poll for the response until the command gets completed, or there's
972144966Svkashyap	 * a timeout.
973144966Svkashyap	 */
974144966Svkashyap	end_time = tw_osl_get_local_time() + timeout;
975144966Svkashyap	do {
976144966Svkashyap		if ((error = req->error_code))
977144966Svkashyap			/*
978144966Svkashyap			 * This will take care of completion due to a reset,
979144966Svkashyap			 * or a failure in tw_cli_submit_pending_queue.
980144966Svkashyap			 * The caller should do the clean-up.
981144966Svkashyap			 */
982144966Svkashyap			return(error);
983144966Svkashyap
984144966Svkashyap		/* See if the command completed. */
985144966Svkashyap		tw_cli_process_resp_intr(ctlr);
986144966Svkashyap
987144966Svkashyap		if ((req->state != TW_CLI_REQ_STATE_BUSY) &&
988144966Svkashyap			(req->state != TW_CLI_REQ_STATE_PENDING))
989144966Svkashyap			return(req->state != TW_CLI_REQ_STATE_COMPLETE);
990144966Svkashyap	} while (tw_osl_get_local_time() <= end_time);
991144966Svkashyap
992144966Svkashyap	/* Time out! */
993144966Svkashyap	tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
994144966Svkashyap		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
995144966Svkashyap		0x1104, 0x1, TW_CL_SEVERITY_ERROR_STRING,
996144966Svkashyap		"Internal request timed out",
997144966Svkashyap		"request = %p", req);
998144966Svkashyap
999144966Svkashyap	/*
1000144966Svkashyap	 * We will reset the controller only if the request has already been
1001144966Svkashyap	 * submitted, so as to not lose the request packet.  If a busy request
1002144966Svkashyap	 * timed out, the reset will take care of freeing resources.  If a
1003144966Svkashyap	 * pending request timed out, we will free resources for that request,
1004144966Svkashyap	 * right here, thereby avoiding a reset.  So, the caller is expected
1005144966Svkashyap	 * to NOT cleanup when TW_OSL_ETIMEDOUT is returned.
1006144966Svkashyap	 */
1007144966Svkashyap
1008144966Svkashyap	/*
1009144966Svkashyap	 * We have to make sure that this timed out request, if it were in the
1010144966Svkashyap	 * pending queue, doesn't get submitted while we are here, from
1011144966Svkashyap	 * tw_cli_submit_pending_queue.  There could be a race in that case.
1012144966Svkashyap	 * Need to revisit.
1013144966Svkashyap	 */
1014212008Sdelphij	if (req->state == TW_CLI_REQ_STATE_PENDING) {
1015144966Svkashyap		tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(),
1016144966Svkashyap			"Removing request from pending queue");
1017144966Svkashyap		/*
1018144966Svkashyap		 * Request was never submitted.  Clean up.  Note that we did
1019144966Svkashyap		 * not do a reset.  So, we have to remove the request ourselves
1020144966Svkashyap		 * from the pending queue (as against tw_cli_drain_pendinq_queue
1021144966Svkashyap		 * taking care of it).
1022144966Svkashyap		 */
1023144966Svkashyap		tw_cli_req_q_remove_item(req, TW_CLI_PENDING_Q);
1024208969Sdelphij		if ((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) == TW_CL_NULL)
1025208969Sdelphij			TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
1026208969Sdelphij				TWA_CONTROL_MASK_COMMAND_INTERRUPT);
1027144966Svkashyap		if (req->data)
1028208969Sdelphij			ctlr->internal_req_busy = TW_CL_FALSE;
1029144966Svkashyap		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1030144966Svkashyap	}
1031144966Svkashyap
1032144966Svkashyap	return(TW_OSL_ETIMEDOUT);
1033144966Svkashyap}
1034144966Svkashyap
1035144966Svkashyap
1036144966Svkashyap
1037144966Svkashyap/*
1038144966Svkashyap * Function name:	tw_cl_reset_ctlr
1039144966Svkashyap * Description:		Soft resets and then initializes the controller;
1040144966Svkashyap *			drains any incomplete requests.
1041144966Svkashyap *
1042144966Svkashyap * Input:		ctlr	-- ptr to per ctlr structure
1043212008Sdelphij * 			req_handle	-- ptr to request handle
1044144966Svkashyap * Output:		None
1045144966Svkashyap * Return value:	0	-- success
1046144966Svkashyap *			non-zero-- failure
1047144966Svkashyap */
1048144966SvkashyapTW_INT32
1049144966Svkashyaptw_cl_reset_ctlr(struct tw_cl_ctlr_handle *ctlr_handle)
1050144966Svkashyap{
1051144966Svkashyap	struct tw_cli_ctlr_context	*ctlr =
1052144966Svkashyap		(struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1053208969Sdelphij	struct twa_softc		*sc = ctlr_handle->osl_ctlr_ctxt;
1054212008Sdelphij	struct tw_cli_req_context	*req;
1055144966Svkashyap	TW_INT32			reset_attempt = 1;
1056212008Sdelphij	TW_INT32			error = 0;
1057144966Svkashyap
1058144966Svkashyap	tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "entered");
1059144966Svkashyap
1060208969Sdelphij	ctlr->reset_in_progress = TW_CL_TRUE;
1061212008Sdelphij	twa_teardown_intr(sc);
1062144966Svkashyap
1063208969Sdelphij
1064144966Svkashyap	/*
1065144966Svkashyap	 * Error back all requests in the complete, busy, and pending queues.
1066144966Svkashyap	 * If any request is already on its way to getting submitted, it's in
1067144966Svkashyap	 * none of these queues and so, will not be completed.  That request
1068144966Svkashyap	 * will continue its course and get submitted to the controller after
1069144966Svkashyap	 * the reset is done (and io_lock is released).
1070144966Svkashyap	 */
1071144966Svkashyap	tw_cli_drain_complete_queue(ctlr);
1072144966Svkashyap	tw_cli_drain_busy_queue(ctlr);
1073144966Svkashyap	tw_cli_drain_pending_queue(ctlr);
1074208969Sdelphij	ctlr->internal_req_busy = TW_CL_FALSE;
1075208969Sdelphij	ctlr->get_more_aens     = TW_CL_FALSE;
1076144966Svkashyap
1077144966Svkashyap	/* Soft reset the controller. */
1078212008Sdelphij	while (reset_attempt <= TW_CLI_MAX_RESET_ATTEMPTS) {
1079212008Sdelphij		if ((error = tw_cli_soft_reset(ctlr))) {
1080212008Sdelphij			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1081212008Sdelphij				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1082212008Sdelphij				0x1105, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1083212008Sdelphij				"Controller reset failed",
1084212008Sdelphij				"error = %d; attempt %d", error, reset_attempt++);
1085212008Sdelphij			reset_attempt++;
1086212008Sdelphij			continue;
1087212008Sdelphij		}
1088212008Sdelphij
1089212008Sdelphij		/* Re-establish logical connection with the controller. */
1090212008Sdelphij		if ((error = tw_cli_init_connection(ctlr,
1091212008Sdelphij				(TW_UINT16)(ctlr->max_simult_reqs),
1092212008Sdelphij				0, 0, 0, 0, 0, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL,
1093212008Sdelphij				TW_CL_NULL, TW_CL_NULL))) {
1094212008Sdelphij			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1095212008Sdelphij				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1096212008Sdelphij				0x1106, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1097212008Sdelphij				"Can't initialize connection after reset",
1098212008Sdelphij				"error = %d", error);
1099212008Sdelphij			reset_attempt++;
1100212008Sdelphij			continue;
1101212008Sdelphij		}
1102212008Sdelphij
1103212008Sdelphij#ifdef    TW_OSL_DEBUG
1104212008Sdelphij		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1105144966Svkashyap			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1106212008Sdelphij			0x1107, 0x3, TW_CL_SEVERITY_INFO_STRING,
1107212008Sdelphij			"Controller reset done!", " ");
1108212008Sdelphij#endif /* TW_OSL_DEBUG */
1109212008Sdelphij		break;
1110212008Sdelphij	} /* End of while */
1111144966Svkashyap
1112212008Sdelphij	/* Move commands from the reset queue to the pending queue. */
1113212008Sdelphij	while ((req = tw_cli_req_q_remove_head(ctlr, TW_CLI_RESET_Q)) != TW_CL_NULL) {
1114212008Sdelphij		tw_osl_timeout(req->req_handle);
1115212008Sdelphij		tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
1116144966Svkashyap	}
1117144966Svkashyap
1118212008Sdelphij	twa_setup_intr(sc);
1119212008Sdelphij	tw_cli_enable_interrupts(ctlr);
1120212008Sdelphij	if ((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) != TW_CL_NULL)
1121212008Sdelphij		TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
1122212008Sdelphij			TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
1123208969Sdelphij	ctlr->reset_in_progress = TW_CL_FALSE;
1124212008Sdelphij	ctlr->reset_needed = TW_CL_FALSE;
1125208969Sdelphij
1126144966Svkashyap	/* Request for a bus re-scan. */
1127212008Sdelphij	tw_osl_scan_bus(ctlr_handle);
1128212008Sdelphij
1129144966Svkashyap	return(error);
1130144966Svkashyap}
1131144966Svkashyap
1132212008SdelphijTW_VOID
1133212008Sdelphijtw_cl_set_reset_needed(struct tw_cl_ctlr_handle *ctlr_handle)
1134212008Sdelphij{
1135212008Sdelphij	struct tw_cli_ctlr_context	*ctlr =
1136212008Sdelphij		(struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1137144966Svkashyap
1138212008Sdelphij	ctlr->reset_needed = TW_CL_TRUE;
1139212008Sdelphij}
1140144966Svkashyap
1141212008SdelphijTW_INT32
1142212008Sdelphijtw_cl_is_reset_needed(struct tw_cl_ctlr_handle *ctlr_handle)
1143212008Sdelphij{
1144212008Sdelphij	struct tw_cli_ctlr_context	*ctlr =
1145212008Sdelphij		(struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1146212008Sdelphij
1147212008Sdelphij	return(ctlr->reset_needed);
1148212008Sdelphij}
1149212008Sdelphij
1150212008SdelphijTW_INT32
1151212008Sdelphijtw_cl_is_active(struct tw_cl_ctlr_handle *ctlr_handle)
1152212008Sdelphij{
1153212008Sdelphij	struct tw_cli_ctlr_context	*ctlr =
1154212008Sdelphij		(struct tw_cli_ctlr_context *)
1155212008Sdelphij		(ctlr_handle->cl_ctlr_ctxt);
1156212008Sdelphij
1157212008Sdelphij		return(ctlr->active);
1158212008Sdelphij}
1159212008Sdelphij
1160212008Sdelphij
1161212008Sdelphij
1162144966Svkashyap/*
1163144966Svkashyap * Function name:	tw_cli_soft_reset
1164144966Svkashyap * Description:		Does the actual soft reset.
1165144966Svkashyap *
1166144966Svkashyap * Input:		ctlr	-- ptr to per ctlr structure
1167144966Svkashyap * Output:		None
1168144966Svkashyap * Return value:	0	-- success
1169144966Svkashyap *			non-zero-- failure
1170144966Svkashyap */
1171144966SvkashyapTW_INT32
1172144966Svkashyaptw_cli_soft_reset(struct tw_cli_ctlr_context *ctlr)
1173144966Svkashyap{
1174144966Svkashyap	struct tw_cl_ctlr_handle	*ctlr_handle = ctlr->ctlr_handle;
1175208969Sdelphij	int				found;
1176208969Sdelphij	int				loop_count;
1177144966Svkashyap	TW_UINT32			error;
1178144966Svkashyap
1179144966Svkashyap	tw_cli_dbg_printf(1, ctlr_handle, tw_osl_cur_func(), "entered");
1180144966Svkashyap
1181212008Sdelphij	tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1182144966Svkashyap		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1183144966Svkashyap		0x1108, 0x3, TW_CL_SEVERITY_INFO_STRING,
1184144966Svkashyap		"Resetting controller...",
1185144966Svkashyap		" ");
1186144966Svkashyap
1187144966Svkashyap	/* Don't let any new commands get submitted to the controller. */
1188144966Svkashyap	tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
1189144966Svkashyap
1190144966Svkashyap	TW_CLI_SOFT_RESET(ctlr_handle);
1191144966Svkashyap
1192169400Sscottl	if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_X) ||
1193172496Sscottl	    (ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
1194172496Sscottl	    (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
1195152213Svkashyap		/*
1196152213Svkashyap		 * There's a hardware bug in the G133 ASIC, which can lead to
1197152213Svkashyap		 * PCI parity errors and hangs, if the host accesses any
1198152213Svkashyap		 * registers when the firmware is resetting the hardware, as
1199152213Svkashyap		 * part of a hard/soft reset.  The window of time when the
1200152213Svkashyap		 * problem can occur is about 10 ms.  Here, we will handshake
1201152213Svkashyap		 * with the firmware to find out when the firmware is pulling
1202152213Svkashyap		 * down the hardware reset pin, and wait for about 500 ms to
1203152213Svkashyap		 * make sure we don't access any hardware registers (for
1204152213Svkashyap		 * polling) during that window.
1205152213Svkashyap		 */
1206208969Sdelphij		ctlr->reset_phase1_in_progress = TW_CL_TRUE;
1207208969Sdelphij		loop_count = 0;
1208208969Sdelphij		do {
1209208969Sdelphij			found = (tw_cli_find_response(ctlr, TWA_RESET_PHASE1_NOTIFICATION_RESPONSE) == TW_OSL_ESUCCESS);
1210152213Svkashyap			tw_osl_delay(10);
1211208969Sdelphij			loop_count++;
1212208969Sdelphij			error = 0x7888;
1213208969Sdelphij		} while (!found && (loop_count < 6000000)); /* Loop for no more than 60 seconds */
1214208969Sdelphij
1215208969Sdelphij		if (!found) {
1216212008Sdelphij			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1217208969Sdelphij				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1218208969Sdelphij				0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1219208969Sdelphij				"Missed firmware handshake after soft-reset",
1220208969Sdelphij				"error = %d", error);
1221208969Sdelphij			tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1222208969Sdelphij			return(error);
1223208969Sdelphij		}
1224208969Sdelphij
1225152213Svkashyap		tw_osl_delay(TWA_RESET_PHASE1_WAIT_TIME_MS * 1000);
1226208969Sdelphij		ctlr->reset_phase1_in_progress = TW_CL_FALSE;
1227152213Svkashyap	}
1228152213Svkashyap
1229144966Svkashyap	if ((error = tw_cli_poll_status(ctlr,
1230144966Svkashyap			TWA_STATUS_MICROCONTROLLER_READY |
1231144966Svkashyap			TWA_STATUS_ATTENTION_INTERRUPT,
1232144966Svkashyap			TW_CLI_RESET_TIMEOUT_PERIOD))) {
1233212008Sdelphij		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1234144966Svkashyap			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1235144966Svkashyap			0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1236144966Svkashyap			"Micro-ctlr not ready/No attn intr after reset",
1237144966Svkashyap			"error = %d", error);
1238144966Svkashyap		tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1239144966Svkashyap		return(error);
1240144966Svkashyap	}
1241144966Svkashyap
1242144966Svkashyap	TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
1243144966Svkashyap		TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
1244144966Svkashyap
1245144966Svkashyap	if ((error = tw_cli_drain_response_queue(ctlr))) {
1246144966Svkashyap		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1247144966Svkashyap			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1248144966Svkashyap			0x110A, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1249144966Svkashyap			"Can't drain response queue after reset",
1250144966Svkashyap			"error = %d", error);
1251144966Svkashyap		tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1252144966Svkashyap		return(error);
1253144966Svkashyap	}
1254144966Svkashyap
1255144966Svkashyap	tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1256144966Svkashyap
1257144966Svkashyap	if ((error = tw_cli_drain_aen_queue(ctlr))) {
1258144966Svkashyap		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1259144966Svkashyap			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1260144966Svkashyap			0x110B, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1261144966Svkashyap			"Can't drain AEN queue after reset",
1262144966Svkashyap			"error = %d", error);
1263144966Svkashyap		return(error);
1264144966Svkashyap	}
1265144966Svkashyap
1266144966Svkashyap	if ((error = tw_cli_find_aen(ctlr, TWA_AEN_SOFT_RESET))) {
1267212008Sdelphij		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1268144966Svkashyap			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1269144966Svkashyap			0x110C, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1270144966Svkashyap			"Reset not reported by controller",
1271144966Svkashyap			"error = %d", error);
1272144966Svkashyap		return(error);
1273144966Svkashyap	}
1274212008Sdelphij
1275144966Svkashyap	return(TW_OSL_ESUCCESS);
1276144966Svkashyap}
1277144966Svkashyap
1278144966Svkashyap
1279144966Svkashyap
1280144966Svkashyap/*
1281144966Svkashyap * Function name:	tw_cli_send_scsi_cmd
1282144966Svkashyap * Description:		Sends down a scsi cmd to fw.
1283144966Svkashyap *
1284144966Svkashyap * Input:		req	-- ptr to request pkt
1285144966Svkashyap *			cmd	-- opcode of scsi cmd to send
1286144966Svkashyap * Output:		None
1287144966Svkashyap * Return value:	0	-- success
1288144966Svkashyap *			non-zero-- failure
1289144966Svkashyap */
1290144966SvkashyapTW_INT32
1291144966Svkashyaptw_cli_send_scsi_cmd(struct tw_cli_req_context *req, TW_INT32 cmd)
1292144966Svkashyap{
1293144966Svkashyap	struct tw_cl_command_packet	*cmdpkt;
1294144966Svkashyap	struct tw_cl_command_9k		*cmd9k;
1295144966Svkashyap	struct tw_cli_ctlr_context	*ctlr;
1296144966Svkashyap	TW_INT32			error;
1297144966Svkashyap
1298144966Svkashyap	ctlr = req->ctlr;
1299144966Svkashyap	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1300144966Svkashyap
1301144966Svkashyap	/* Make sure this is the only CL internal request at this time. */
1302208969Sdelphij	if (ctlr->internal_req_busy)
1303144966Svkashyap		return(TW_OSL_EBUSY);
1304208969Sdelphij	ctlr->internal_req_busy = TW_CL_TRUE;
1305144966Svkashyap	req->data = ctlr->internal_req_data;
1306144966Svkashyap	req->data_phys = ctlr->internal_req_data_phys;
1307144966Svkashyap	tw_osl_memzero(req->data, TW_CLI_SECTOR_SIZE);
1308144966Svkashyap	req->length = TW_CLI_SECTOR_SIZE;
1309144966Svkashyap
1310144966Svkashyap	/* Build the cmd pkt. */
1311144966Svkashyap	cmdpkt = req->cmd_pkt;
1312144966Svkashyap
1313144966Svkashyap	cmdpkt->cmd_hdr.header_desc.size_header = 128;
1314144966Svkashyap
1315144966Svkashyap	cmd9k = &(cmdpkt->command.cmd_pkt_9k);
1316144966Svkashyap
1317144966Svkashyap	cmd9k->res__opcode =
1318144966Svkashyap		BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
1319144966Svkashyap	cmd9k->unit = 0;
1320144966Svkashyap	cmd9k->lun_l4__req_id = TW_CL_SWAP16(req->request_id);
1321144966Svkashyap	cmd9k->status = 0;
1322144966Svkashyap	cmd9k->sgl_offset = 16; /* offset from end of hdr = max cdb len */
1323144966Svkashyap	cmd9k->lun_h4__sgl_entries = TW_CL_SWAP16(1);
1324144966Svkashyap
1325144966Svkashyap	if (req->ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1326144966Svkashyap		((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].address =
1327144966Svkashyap			TW_CL_SWAP64(req->data_phys);
1328144966Svkashyap		((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].length =
1329144966Svkashyap			TW_CL_SWAP32(req->length);
1330144966Svkashyap	} else {
1331144966Svkashyap		((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].address =
1332144966Svkashyap			TW_CL_SWAP32(req->data_phys);
1333144966Svkashyap		((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].length =
1334144966Svkashyap			TW_CL_SWAP32(req->length);
1335144966Svkashyap	}
1336144966Svkashyap
1337144966Svkashyap	cmd9k->cdb[0] = (TW_UINT8)cmd;
1338144966Svkashyap	cmd9k->cdb[4] = 128;
1339144966Svkashyap
1340144966Svkashyap	if ((error = tw_cli_submit_cmd(req)))
1341144966Svkashyap		if (error != TW_OSL_EBUSY) {
1342144966Svkashyap			tw_cli_dbg_printf(1, ctlr->ctlr_handle,
1343144966Svkashyap				tw_osl_cur_func(),
1344144966Svkashyap				"Failed to start SCSI command",
1345144966Svkashyap				"request = %p, error = %d", req, error);
1346144966Svkashyap			return(TW_OSL_EIO);
1347144966Svkashyap		}
1348144966Svkashyap	return(TW_OSL_ESUCCESS);
1349144966Svkashyap}
1350144966Svkashyap
1351144966Svkashyap
1352144966Svkashyap
1353144966Svkashyap/*
1354144966Svkashyap * Function name:	tw_cli_get_aen
1355144966Svkashyap * Description:		Sends down a Request Sense cmd to fw to fetch an AEN.
1356144966Svkashyap *
1357144966Svkashyap * Input:		ctlr	-- ptr to per ctlr structure
1358144966Svkashyap * Output:		None
1359144966Svkashyap * Return value:	0	-- success
1360144966Svkashyap *			non-zero-- failure
1361144966Svkashyap */
1362144966SvkashyapTW_INT32
1363144966Svkashyaptw_cli_get_aen(struct tw_cli_ctlr_context *ctlr)
1364144966Svkashyap{
1365144966Svkashyap	struct tw_cli_req_context	*req;
1366144966Svkashyap	TW_INT32			error;
1367144966Svkashyap
1368144966Svkashyap	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1369144966Svkashyap
1370144966Svkashyap	if ((req = tw_cli_get_request(ctlr
1371144966Svkashyap		)) == TW_CL_NULL)
1372144966Svkashyap		return(TW_OSL_EBUSY);
1373144966Svkashyap
1374144966Svkashyap	req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
1375144966Svkashyap	req->flags |= TW_CLI_REQ_FLAGS_9K;
1376144966Svkashyap	req->tw_cli_callback = tw_cli_aen_callback;
1377144966Svkashyap	if ((error = tw_cli_send_scsi_cmd(req, 0x03 /* REQUEST_SENSE */))) {
1378144966Svkashyap		tw_cli_dbg_printf(1, ctlr->ctlr_handle, tw_osl_cur_func(),
1379144966Svkashyap			"Could not send SCSI command",
1380144966Svkashyap			"request = %p, error = %d", req, error);
1381144966Svkashyap		if (req->data)
1382208969Sdelphij			ctlr->internal_req_busy = TW_CL_FALSE;
1383144966Svkashyap		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1384144966Svkashyap	}
1385144966Svkashyap	return(error);
1386144966Svkashyap}
1387144966Svkashyap
1388144966Svkashyap
1389144966Svkashyap
1390144966Svkashyap/*
1391144966Svkashyap * Function name:	tw_cli_fill_sg_list
1392144966Svkashyap * Description:		Fills in the scatter/gather list.
1393144966Svkashyap *
1394144966Svkashyap * Input:		ctlr	-- ptr to per ctlr structure
1395144966Svkashyap *			sgl_src	-- ptr to fill the sg list from
1396144966Svkashyap *			sgl_dest-- ptr to sg list
1397144966Svkashyap *			nsegments--# of segments
1398144966Svkashyap * Output:		None
1399144966Svkashyap * Return value:	None
1400144966Svkashyap */
1401144966SvkashyapTW_VOID
1402144966Svkashyaptw_cli_fill_sg_list(struct tw_cli_ctlr_context *ctlr, TW_VOID *sgl_src,
1403144966Svkashyap	TW_VOID *sgl_dest, TW_INT32 num_sgl_entries)
1404144966Svkashyap{
1405144966Svkashyap	TW_INT32	i;
1406144966Svkashyap
1407144966Svkashyap	tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1408144966Svkashyap
1409144966Svkashyap	if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1410144966Svkashyap		struct tw_cl_sg_desc64 *sgl_s =
1411144966Svkashyap			(struct tw_cl_sg_desc64 *)sgl_src;
1412144966Svkashyap		struct tw_cl_sg_desc64 *sgl_d =
1413144966Svkashyap			(struct tw_cl_sg_desc64 *)sgl_dest;
1414144966Svkashyap
1415144966Svkashyap		tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1416144966Svkashyap			"64 bit addresses");
1417144966Svkashyap		for (i = 0; i < num_sgl_entries; i++) {
1418144966Svkashyap			sgl_d[i].address = TW_CL_SWAP64(sgl_s->address);
1419144966Svkashyap			sgl_d[i].length = TW_CL_SWAP32(sgl_s->length);
1420144966Svkashyap			sgl_s++;
1421144966Svkashyap			if (ctlr->flags & TW_CL_64BIT_SG_LENGTH)
1422144966Svkashyap				sgl_s = (struct tw_cl_sg_desc64 *)
1423144966Svkashyap					(((TW_INT8 *)(sgl_s)) + 4);
1424144966Svkashyap		}
1425144966Svkashyap	} else {
1426144966Svkashyap		struct tw_cl_sg_desc32 *sgl_s =
1427144966Svkashyap			(struct tw_cl_sg_desc32 *)sgl_src;
1428144966Svkashyap		struct tw_cl_sg_desc32 *sgl_d =
1429144966Svkashyap			(struct tw_cl_sg_desc32 *)sgl_dest;
1430144966Svkashyap
1431144966Svkashyap		tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1432144966Svkashyap			"32 bit addresses");
1433144966Svkashyap		for (i = 0; i < num_sgl_entries; i++) {
1434144966Svkashyap			sgl_d[i].address = TW_CL_SWAP32(sgl_s[i].address);
1435144966Svkashyap			sgl_d[i].length = TW_CL_SWAP32(sgl_s[i].length);
1436144966Svkashyap		}
1437144966Svkashyap	}
1438144966Svkashyap}
1439144966Svkashyap
1440