1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
5 * Copyright (c) 2004-05 Vinod Kashyap
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *	$FreeBSD$
30 */
31
32/*
33 * AMCC'S 3ware driver for 9000 series storage controllers.
34 *
35 * Author: Vinod Kashyap
36 * Modifications by: Adam Radford
37 * Modifications by: Manjunath Ranganathaiah
38 */
39
40/*
41 * Common Layer I/O functions.
42 */
43
44#include "tw_osl_share.h"
45#include "tw_cl_share.h"
46#include "tw_cl_fwif.h"
47#include "tw_cl_ioctl.h"
48#include "tw_cl.h"
49#include "tw_cl_externs.h"
50#include "tw_osl_ioctl.h"
51
52#include <cam/cam.h>
53#include <cam/cam_ccb.h>
54#include <cam/cam_xpt_sim.h>
55
56/*
57 * Function name:	tw_cl_start_io
58 * Description:		Interface to OS Layer for accepting SCSI requests.
59 *
60 * Input:		ctlr_handle	-- controller handle
61 *			req_pkt		-- OSL built request packet
62 *			req_handle	-- request handle
63 * Output:		None
64 * Return value:	0	-- success
65 *			non-zero-- failure
66 */
67TW_INT32
68tw_cl_start_io(struct tw_cl_ctlr_handle *ctlr_handle,
69	struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
70{
71	struct tw_cli_ctlr_context		*ctlr;
72	struct tw_cli_req_context		*req;
73	struct tw_cl_command_9k			*cmd;
74	struct tw_cl_scsi_req_packet		*scsi_req;
75	TW_INT32				error = TW_CL_ERR_REQ_SUCCESS;
76
77	tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
78
79	ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
80
81	/*
82	 * If working with a firmware version that does not support multiple
83	 * luns, and this request is directed at a non-zero lun, error it
84	 * back right away.
85	 */
86	if ((req_pkt->gen_req_pkt.scsi_req.lun) &&
87		(ctlr->working_srl < TWA_MULTI_LUN_FW_SRL)) {
88		req_pkt->status |= (TW_CL_ERR_REQ_INVALID_LUN |
89			TW_CL_ERR_REQ_SCSI_ERROR);
90		req_pkt->tw_osl_callback(req_handle);
91		return(TW_CL_ERR_REQ_SUCCESS);
92	}
93
94	if ((req = tw_cli_get_request(ctlr
95		)) == TW_CL_NULL) {
96		tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
97			"Out of request context packets: returning busy");
98		return(TW_OSL_EBUSY);
99	}
100
101	req_handle->cl_req_ctxt = req;
102	req->req_handle = req_handle;
103	req->orig_req = req_pkt;
104	req->tw_cli_callback = tw_cli_complete_io;
105
106	req->flags |= TW_CLI_REQ_FLAGS_EXTERNAL;
107	req->flags |= TW_CLI_REQ_FLAGS_9K;
108
109	scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
110
111	/* Build the cmd pkt. */
112	cmd = &(req->cmd_pkt->command.cmd_pkt_9k);
113
114	req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
115
116	cmd->res__opcode = BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
117	cmd->unit = (TW_UINT8)(scsi_req->unit);
118	cmd->lun_l4__req_id = TW_CL_SWAP16(
119		BUILD_LUN_L4__REQ_ID(scsi_req->lun, req->request_id));
120	cmd->status = 0;
121	cmd->sgl_offset = 16; /* offset from end of hdr = max cdb len */
122	tw_osl_memcpy(cmd->cdb, scsi_req->cdb, scsi_req->cdb_len);
123
124	if (req_pkt->flags & TW_CL_REQ_CALLBACK_FOR_SGLIST) {
125		TW_UINT32	num_sgl_entries;
126
127		req_pkt->tw_osl_sgl_callback(req_handle, cmd->sg_list,
128			&num_sgl_entries);
129		cmd->lun_h4__sgl_entries =
130			TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
131				num_sgl_entries));
132	} else {
133		cmd->lun_h4__sgl_entries =
134			TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
135				scsi_req->sgl_entries));
136		tw_cli_fill_sg_list(ctlr, scsi_req->sg_list,
137			cmd->sg_list, scsi_req->sgl_entries);
138	}
139
140	if (((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) != TW_CL_NULL) ||
141		(ctlr->reset_in_progress)) {
142		tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
143		TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
144			TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
145	} else if ((error = tw_cli_submit_cmd(req))) {
146		tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
147			"Could not start request. request = %p, error = %d",
148			req, error);
149		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
150	}
151	return(error);
152}
153
154/*
155 * Function name:	tw_cli_submit_cmd
156 * Description:		Submits a cmd to firmware.
157 *
158 * Input:		req	-- ptr to CL internal request context
159 * Output:		None
160 * Return value:	0	-- success
161 *			non-zero-- failure
162 */
163TW_INT32
164tw_cli_submit_cmd(struct tw_cli_req_context *req)
165{
166	struct tw_cli_ctlr_context	*ctlr = req->ctlr;
167	struct tw_cl_ctlr_handle	*ctlr_handle = ctlr->ctlr_handle;
168	TW_UINT32			status_reg;
169	TW_INT32			error = 0;
170
171	tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
172
173	/* Serialize access to the controller cmd queue. */
174	tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
175
176	/* For 9650SE first write low 4 bytes */
177	if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
178	    (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA))
179		tw_osl_write_reg(ctlr_handle,
180				 TWA_COMMAND_QUEUE_OFFSET_LOW,
181				 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
182
183	status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
184	if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
185		struct tw_cl_req_packet	*req_pkt =
186			(struct tw_cl_req_packet *)(req->orig_req);
187
188		tw_cli_dbg_printf(7, ctlr_handle, tw_osl_cur_func(),
189			"Cmd queue full");
190
191		if ((req->flags & TW_CLI_REQ_FLAGS_INTERNAL)
192			|| ((req_pkt) &&
193			(req_pkt->flags & TW_CL_REQ_RETRY_ON_BUSY))
194			) {
195			if (req->state != TW_CLI_REQ_STATE_PENDING) {
196				tw_cli_dbg_printf(2, ctlr_handle,
197					tw_osl_cur_func(),
198					"pending internal/ioctl request");
199				req->state = TW_CLI_REQ_STATE_PENDING;
200				tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
201				/* Unmask command interrupt. */
202				TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
203					TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
204			} else
205				error = TW_OSL_EBUSY;
206		} else {
207			error = TW_OSL_EBUSY;
208		}
209	} else {
210		tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
211			"Submitting command");
212
213		/* Insert command into busy queue */
214		req->state = TW_CLI_REQ_STATE_BUSY;
215		tw_cli_req_q_insert_tail(req, TW_CLI_BUSY_Q);
216
217		if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
218		    (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
219			/* Now write the high 4 bytes */
220			tw_osl_write_reg(ctlr_handle,
221					 TWA_COMMAND_QUEUE_OFFSET_HIGH,
222					 (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
223		} else {
224			if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
225				/* First write the low 4 bytes, then the high 4. */
226				tw_osl_write_reg(ctlr_handle,
227						 TWA_COMMAND_QUEUE_OFFSET_LOW,
228						 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
229				tw_osl_write_reg(ctlr_handle,
230						 TWA_COMMAND_QUEUE_OFFSET_HIGH,
231						 (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
232			} else
233				tw_osl_write_reg(ctlr_handle,
234						 TWA_COMMAND_QUEUE_OFFSET,
235						 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
236		}
237	}
238
239	tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
240
241	return(error);
242}
243
244/*
245 * Function name:	tw_cl_fw_passthru
246 * Description:		Interface to OS Layer for accepting firmware
247 *			passthru requests.
248 * Input:		ctlr_handle	-- controller handle
249 *			req_pkt		-- OSL built request packet
250 *			req_handle	-- request handle
251 * Output:		None
252 * Return value:	0	-- success
253 *			non-zero-- failure
254 */
255TW_INT32
256tw_cl_fw_passthru(struct tw_cl_ctlr_handle *ctlr_handle,
257	struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
258{
259	struct tw_cli_ctlr_context		*ctlr;
260	struct tw_cli_req_context		*req;
261	union tw_cl_command_7k			*cmd_7k;
262	struct tw_cl_command_9k			*cmd_9k;
263	struct tw_cl_passthru_req_packet	*pt_req;
264	TW_UINT8				opcode;
265	TW_UINT8				sgl_offset;
266	TW_VOID					*sgl = TW_CL_NULL;
267	TW_INT32				error = TW_CL_ERR_REQ_SUCCESS;
268
269	tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
270
271	ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
272
273	if ((req = tw_cli_get_request(ctlr
274		)) == TW_CL_NULL) {
275		tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
276			"Out of request context packets: returning busy");
277		return(TW_OSL_EBUSY);
278	}
279
280	req_handle->cl_req_ctxt = req;
281	req->req_handle = req_handle;
282	req->orig_req = req_pkt;
283	req->tw_cli_callback = tw_cli_complete_io;
284
285	req->flags |= TW_CLI_REQ_FLAGS_PASSTHRU;
286
287	pt_req = &(req_pkt->gen_req_pkt.pt_req);
288
289	tw_osl_memcpy(req->cmd_pkt, pt_req->cmd_pkt,
290		pt_req->cmd_pkt_length);
291	/* Build the cmd pkt. */
292	if ((opcode = GET_OPCODE(((TW_UINT8 *)
293		(pt_req->cmd_pkt))[sizeof(struct tw_cl_command_header)]))
294			== TWA_FW_CMD_EXECUTE_SCSI) {
295		TW_UINT16	lun_l4, lun_h4;
296
297		tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
298			"passthru: 9k cmd pkt");
299		req->flags |= TW_CLI_REQ_FLAGS_9K;
300		cmd_9k = &(req->cmd_pkt->command.cmd_pkt_9k);
301		lun_l4 = GET_LUN_L4(cmd_9k->lun_l4__req_id);
302		lun_h4 = GET_LUN_H4(cmd_9k->lun_h4__sgl_entries);
303		cmd_9k->lun_l4__req_id = TW_CL_SWAP16(
304			BUILD_LUN_L4__REQ_ID(lun_l4, req->request_id));
305		if (pt_req->sgl_entries) {
306			cmd_9k->lun_h4__sgl_entries =
307				TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(lun_h4,
308					pt_req->sgl_entries));
309			sgl = (TW_VOID *)(cmd_9k->sg_list);
310		}
311	} else {
312		tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
313			"passthru: 7k cmd pkt");
314		cmd_7k = &(req->cmd_pkt->command.cmd_pkt_7k);
315		cmd_7k->generic.request_id =
316			(TW_UINT8)(TW_CL_SWAP16(req->request_id));
317		if ((sgl_offset =
318			GET_SGL_OFF(cmd_7k->generic.sgl_off__opcode))) {
319			if (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)
320				sgl = (((TW_UINT32 *)cmd_7k) + cmd_7k->generic.size);
321			else
322				sgl = (((TW_UINT32 *)cmd_7k) + sgl_offset);
323			cmd_7k->generic.size += pt_req->sgl_entries *
324				((ctlr->flags & TW_CL_64BIT_ADDRESSES) ? 3 : 2);
325		}
326	}
327
328	if (sgl)
329		tw_cli_fill_sg_list(ctlr, pt_req->sg_list,
330			sgl, pt_req->sgl_entries);
331
332	if (((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) != TW_CL_NULL) ||
333		(ctlr->reset_in_progress)) {
334		tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
335		TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
336			TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
337	} else if ((error = tw_cli_submit_cmd(req))) {
338		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
339			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
340			0x1100, 0x1, TW_CL_SEVERITY_ERROR_STRING,
341			"Failed to start passthru command",
342			"error = %d", error);
343		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
344	}
345	return(error);
346}
347
348/*
349 * Function name:	tw_cl_ioctl
350 * Description:		Handler of CL supported ioctl cmds.
351 *
352 * Input:		ctlr	-- ptr to per ctlr structure
353 *			cmd	-- ioctl cmd
354 *			buf	-- ptr to buffer in kernel memory, which is
355 *				   a copy of the input buffer in user-space
356 * Output:		buf	-- ptr to buffer in kernel memory, which will
357 *				   need to be copied to the output buffer in
358 *				   user-space
359 * Return value:	0	-- success
360 *			non-zero-- failure
361 */
362TW_INT32
363tw_cl_ioctl(struct tw_cl_ctlr_handle *ctlr_handle, u_long cmd, TW_VOID *buf)
364{
365	struct tw_cli_ctlr_context	*ctlr =
366		(struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
367	struct tw_cl_ioctl_packet	*user_buf =
368		(struct tw_cl_ioctl_packet *)buf;
369	struct tw_cl_event_packet	event_buf;
370	TW_INT32			event_index;
371	TW_INT32			start_index;
372	TW_INT32			error = TW_OSL_ESUCCESS;
373
374	tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
375
376	/* Serialize access to the AEN queue and the ioctl lock. */
377	tw_osl_get_lock(ctlr_handle, ctlr->gen_lock);
378
379	switch (cmd) {
380	case TW_CL_IOCTL_GET_FIRST_EVENT:
381		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
382			"Get First Event");
383
384		if (ctlr->aen_q_wrapped) {
385			if (ctlr->aen_q_overflow) {
386				/*
387				 * The aen queue has wrapped, even before some
388				 * events have been retrieved.  Let the caller
389				 * know that he missed out on some AEN's.
390				 */
391				user_buf->driver_pkt.status =
392					TW_CL_ERROR_AEN_OVERFLOW;
393				ctlr->aen_q_overflow = TW_CL_FALSE;
394			} else
395				user_buf->driver_pkt.status = 0;
396			event_index = ctlr->aen_head;
397		} else {
398			if (ctlr->aen_head == ctlr->aen_tail) {
399				user_buf->driver_pkt.status =
400					TW_CL_ERROR_AEN_NO_EVENTS;
401				break;
402			}
403			user_buf->driver_pkt.status = 0;
404			event_index = ctlr->aen_tail;	/* = 0 */
405		}
406		tw_osl_memcpy(user_buf->data_buf,
407			&(ctlr->aen_queue[event_index]),
408			sizeof(struct tw_cl_event_packet));
409
410		ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
411
412		break;
413
414	case TW_CL_IOCTL_GET_LAST_EVENT:
415		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
416			"Get Last Event");
417
418		if (ctlr->aen_q_wrapped) {
419			if (ctlr->aen_q_overflow) {
420				/*
421				 * The aen queue has wrapped, even before some
422				 * events have been retrieved.  Let the caller
423				 * know that he missed out on some AEN's.
424				 */
425				user_buf->driver_pkt.status =
426					TW_CL_ERROR_AEN_OVERFLOW;
427				ctlr->aen_q_overflow = TW_CL_FALSE;
428			} else
429				user_buf->driver_pkt.status = 0;
430		} else {
431			if (ctlr->aen_head == ctlr->aen_tail) {
432				user_buf->driver_pkt.status =
433					TW_CL_ERROR_AEN_NO_EVENTS;
434				break;
435			}
436			user_buf->driver_pkt.status = 0;
437		}
438		event_index = (ctlr->aen_head - 1 + ctlr->max_aens_supported) %
439			ctlr->max_aens_supported;
440
441		tw_osl_memcpy(user_buf->data_buf,
442			&(ctlr->aen_queue[event_index]),
443			sizeof(struct tw_cl_event_packet));
444
445		ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
446
447		break;
448
449	case TW_CL_IOCTL_GET_NEXT_EVENT:
450		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
451			"Get Next Event");
452
453		user_buf->driver_pkt.status = 0;
454		if (ctlr->aen_q_wrapped) {
455			tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
456				"Get Next Event: wrapped");
457			if (ctlr->aen_q_overflow) {
458				/*
459				 * The aen queue has wrapped, even before some
460				 * events have been retrieved.  Let the caller
461				 * know that he missed out on some AEN's.
462				 */
463				tw_cli_dbg_printf(2, ctlr_handle,
464					tw_osl_cur_func(),
465					"Get Next Event: overflow");
466				user_buf->driver_pkt.status =
467					TW_CL_ERROR_AEN_OVERFLOW;
468				ctlr->aen_q_overflow = TW_CL_FALSE;
469			}
470			start_index = ctlr->aen_head;
471		} else {
472			if (ctlr->aen_head == ctlr->aen_tail) {
473				tw_cli_dbg_printf(3, ctlr_handle,
474					tw_osl_cur_func(),
475					"Get Next Event: empty queue");
476				user_buf->driver_pkt.status =
477					TW_CL_ERROR_AEN_NO_EVENTS;
478				break;
479			}
480			start_index = ctlr->aen_tail;	/* = 0 */
481		}
482		tw_osl_memcpy(&event_buf, user_buf->data_buf,
483			sizeof(struct tw_cl_event_packet));
484
485		event_index = (start_index + event_buf.sequence_id -
486			ctlr->aen_queue[start_index].sequence_id + 1) %
487			ctlr->max_aens_supported;
488
489		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
490			"Get Next Event: si = %x, ei = %x, ebsi = %x, "
491			"sisi = %x, eisi = %x",
492			start_index, event_index, event_buf.sequence_id,
493			ctlr->aen_queue[start_index].sequence_id,
494			ctlr->aen_queue[event_index].sequence_id);
495
496		if (! (ctlr->aen_queue[event_index].sequence_id >
497			event_buf.sequence_id)) {
498			/*
499			 * We don't have any event matching the criterion.  So,
500			 * we have to report TW_CL_ERROR_NO_EVENTS.  If we also
501			 * encountered an overflow condition above, we cannot
502			 * report both conditions during this call.  We choose
503			 * to report NO_EVENTS this time, and an overflow the
504			 * next time we are called.
505			 */
506			if (user_buf->driver_pkt.status ==
507				TW_CL_ERROR_AEN_OVERFLOW) {
508				/*
509				 * Make a note so we report the overflow
510				 * next time.
511				 */
512				ctlr->aen_q_overflow = TW_CL_TRUE;
513			}
514			user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
515			break;
516		}
517		/* Copy the event -- even if there has been an overflow. */
518		tw_osl_memcpy(user_buf->data_buf,
519			&(ctlr->aen_queue[event_index]),
520			sizeof(struct tw_cl_event_packet));
521
522		ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
523
524		break;
525
526	case TW_CL_IOCTL_GET_PREVIOUS_EVENT:
527		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
528			"Get Previous Event");
529
530		user_buf->driver_pkt.status = 0;
531		if (ctlr->aen_q_wrapped) {
532			if (ctlr->aen_q_overflow) {
533				/*
534				 * The aen queue has wrapped, even before some
535				 * events have been retrieved.  Let the caller
536				 * know that he missed out on some AEN's.
537				 */
538				user_buf->driver_pkt.status =
539					TW_CL_ERROR_AEN_OVERFLOW;
540				ctlr->aen_q_overflow = TW_CL_FALSE;
541			}
542			start_index = ctlr->aen_head;
543		} else {
544			if (ctlr->aen_head == ctlr->aen_tail) {
545				user_buf->driver_pkt.status =
546					TW_CL_ERROR_AEN_NO_EVENTS;
547				break;
548			}
549			start_index = ctlr->aen_tail;	/* = 0 */
550		}
551		tw_osl_memcpy(&event_buf, user_buf->data_buf,
552			sizeof(struct tw_cl_event_packet));
553
554		event_index = (start_index + event_buf.sequence_id -
555			ctlr->aen_queue[start_index].sequence_id - 1) %
556			ctlr->max_aens_supported;
557
558		if (! (ctlr->aen_queue[event_index].sequence_id <
559			event_buf.sequence_id)) {
560			/*
561			 * We don't have any event matching the criterion.  So,
562			 * we have to report TW_CL_ERROR_NO_EVENTS.  If we also
563			 * encountered an overflow condition above, we cannot
564			 * report both conditions during this call.  We choose
565			 * to report NO_EVENTS this time, and an overflow the
566			 * next time we are called.
567			 */
568			if (user_buf->driver_pkt.status ==
569				TW_CL_ERROR_AEN_OVERFLOW) {
570				/*
571				 * Make a note so we report the overflow
572				 * next time.
573				 */
574				ctlr->aen_q_overflow = TW_CL_TRUE;
575			}
576			user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
577			break;
578		}
579		/* Copy the event -- even if there has been an overflow. */
580		tw_osl_memcpy(user_buf->data_buf,
581			&(ctlr->aen_queue[event_index]),
582			sizeof(struct tw_cl_event_packet));
583
584		ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
585
586		break;
587
588	case TW_CL_IOCTL_GET_LOCK:
589	{
590		struct tw_cl_lock_packet	lock_pkt;
591		TW_TIME				cur_time;
592
593		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
594			"Get ioctl lock");
595
596		cur_time = tw_osl_get_local_time();
597		tw_osl_memcpy(&lock_pkt, user_buf->data_buf,
598			sizeof(struct tw_cl_lock_packet));
599
600		if ((ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) ||
601			(lock_pkt.force_flag) ||
602			(cur_time >= ctlr->ioctl_lock.timeout)) {
603			tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
604				"GET_LOCK: Getting lock!");
605			ctlr->ioctl_lock.lock = TW_CLI_LOCK_HELD;
606			ctlr->ioctl_lock.timeout =
607				cur_time + (lock_pkt.timeout_msec / 1000);
608			lock_pkt.time_remaining_msec = lock_pkt.timeout_msec;
609			user_buf->driver_pkt.status = 0;
610		} else {
611			tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
612				"GET_LOCK: Lock already held!");
613			lock_pkt.time_remaining_msec = (TW_UINT32)(
614				(ctlr->ioctl_lock.timeout - cur_time) * 1000);
615			user_buf->driver_pkt.status =
616				TW_CL_ERROR_IOCTL_LOCK_ALREADY_HELD;
617		}
618		tw_osl_memcpy(user_buf->data_buf, &lock_pkt,
619			sizeof(struct tw_cl_lock_packet));
620		break;
621	}
622
623	case TW_CL_IOCTL_RELEASE_LOCK:
624		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
625			"Release ioctl lock");
626
627		if (ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) {
628			tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
629				"twa_ioctl: RELEASE_LOCK: Lock not held!");
630			user_buf->driver_pkt.status =
631				TW_CL_ERROR_IOCTL_LOCK_NOT_HELD;
632		} else {
633			tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
634				"RELEASE_LOCK: Releasing lock!");
635			ctlr->ioctl_lock.lock = TW_CLI_LOCK_FREE;
636			user_buf->driver_pkt.status = 0;
637		}
638		break;
639
640	case TW_CL_IOCTL_GET_COMPATIBILITY_INFO:
641	{
642		struct tw_cl_compatibility_packet	comp_pkt;
643
644		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
645			"Get compatibility info");
646
647		tw_osl_memcpy(comp_pkt.driver_version,
648			TW_OSL_DRIVER_VERSION_STRING,
649			sizeof(TW_OSL_DRIVER_VERSION_STRING));
650		comp_pkt.working_srl = ctlr->working_srl;
651		comp_pkt.working_branch = ctlr->working_branch;
652		comp_pkt.working_build = ctlr->working_build;
653		comp_pkt.driver_srl_high = TWA_CURRENT_FW_SRL;
654		comp_pkt.driver_branch_high =
655			TWA_CURRENT_FW_BRANCH(ctlr->arch_id);
656		comp_pkt.driver_build_high =
657			TWA_CURRENT_FW_BUILD(ctlr->arch_id);
658		comp_pkt.driver_srl_low = TWA_BASE_FW_SRL;
659		comp_pkt.driver_branch_low = TWA_BASE_FW_BRANCH;
660		comp_pkt.driver_build_low = TWA_BASE_FW_BUILD;
661		comp_pkt.fw_on_ctlr_srl = ctlr->fw_on_ctlr_srl;
662		comp_pkt.fw_on_ctlr_branch = ctlr->fw_on_ctlr_branch;
663		comp_pkt.fw_on_ctlr_build = ctlr->fw_on_ctlr_build;
664		user_buf->driver_pkt.status = 0;
665
666		/* Copy compatibility information to user space. */
667		tw_osl_memcpy(user_buf->data_buf, &comp_pkt,
668			(sizeof(struct tw_cl_compatibility_packet) <
669			user_buf->driver_pkt.buffer_length) ?
670			sizeof(struct tw_cl_compatibility_packet) :
671			user_buf->driver_pkt.buffer_length);
672		break;
673	}
674
675	default:
676		/* Unknown opcode. */
677		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
678			"Unknown ioctl cmd 0x%x", cmd);
679		error = TW_OSL_ENOTTY;
680	}
681
682	tw_osl_free_lock(ctlr_handle, ctlr->gen_lock);
683	return(error);
684}
685
686/*
687 * Function name:	tw_cli_get_param
688 * Description:		Get a firmware parameter.
689 *
690 * Input:		ctlr		-- ptr to per ctlr structure
691 *			table_id	-- parameter table #
692 *			param_id	-- index of the parameter in the table
693 *			param_size	-- size of the parameter in bytes
694 *			callback	-- ptr to function, if any, to be called
695 *					back on completion; TW_CL_NULL if no callback.
696 * Output:		param_data	-- param value
697 * Return value:	0	-- success
698 *			non-zero-- failure
699 */
700TW_INT32
701tw_cli_get_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
702	TW_INT32 param_id, TW_VOID *param_data, TW_INT32 param_size,
703	TW_VOID (* callback)(struct tw_cli_req_context *req))
704{
705	struct tw_cli_req_context	*req;
706	union tw_cl_command_7k		*cmd;
707	struct tw_cl_param_9k		*param = TW_CL_NULL;
708	TW_INT32			error = TW_OSL_EBUSY;
709
710	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
711
712	/* Get a request packet. */
713	if ((req = tw_cli_get_request(ctlr
714		)) == TW_CL_NULL)
715		goto out;
716
717	/* Make sure this is the only CL internal request at this time. */
718	if (ctlr->internal_req_busy) {
719		error = TW_OSL_EBUSY;
720		goto out;
721	}
722	ctlr->internal_req_busy = TW_CL_TRUE;
723	req->data = ctlr->internal_req_data;
724	req->data_phys = ctlr->internal_req_data_phys;
725	req->length = TW_CLI_SECTOR_SIZE;
726	req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
727
728	/* Initialize memory to read data into. */
729	param = (struct tw_cl_param_9k *)(req->data);
730	tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
731
732	/* Build the cmd pkt. */
733	cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
734
735	req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
736
737	cmd->param.sgl_off__opcode =
738		BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_GET_PARAM);
739	cmd->param.request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
740	cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
741	cmd->param.param_count = TW_CL_SWAP16(1);
742
743	if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
744		((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
745			TW_CL_SWAP64(req->data_phys);
746		((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
747			TW_CL_SWAP32(req->length);
748		cmd->param.size = 2 + 3;
749	} else {
750		((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
751			TW_CL_SWAP32(req->data_phys);
752		((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
753			TW_CL_SWAP32(req->length);
754		cmd->param.size = 2 + 2;
755	}
756
757	/* Specify which parameter we need. */
758	param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
759	param->parameter_id = (TW_UINT8)(param_id);
760	param->parameter_size_bytes = TW_CL_SWAP16(param_size);
761
762	/* Submit the command. */
763	if (callback == TW_CL_NULL) {
764		/* There's no call back; wait till the command completes. */
765		error = tw_cli_submit_and_poll_request(req,
766				TW_CLI_REQUEST_TIMEOUT_PERIOD);
767		if (error)
768			goto out;
769		if ((error = cmd->param.status)) {
770#if       0
771			tw_cli_create_ctlr_event(ctlr,
772				TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
773				&(req->cmd_pkt->cmd_hdr));
774#endif // 0
775			goto out;
776		}
777		tw_osl_memcpy(param_data, param->data, param_size);
778		ctlr->internal_req_busy = TW_CL_FALSE;
779		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
780	} else {
781		/* There's a call back.  Simply submit the command. */
782		req->tw_cli_callback = callback;
783		if ((error = tw_cli_submit_cmd(req)))
784			goto out;
785	}
786	return(0);
787
788out:
789	tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
790		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
791		0x1101, 0x1, TW_CL_SEVERITY_ERROR_STRING,
792		"get_param failed",
793		"error = %d", error);
794	if (param)
795		ctlr->internal_req_busy = TW_CL_FALSE;
796	if (req)
797		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
798	return(1);
799}
800
801/*
802 * Function name:	tw_cli_set_param
803 * Description:		Set a firmware parameter.
804 *
805 * Input:		ctlr		-- ptr to per ctlr structure
806 *			table_id	-- parameter table #
807 *			param_id	-- index of the parameter in the table
808 *			param_size	-- size of the parameter in bytes
809 *			callback	-- ptr to function, if any, to be called
810 *					back on completion; TW_CL_NULL if no callback.
811 * Output:		None
812 * Return value:	0	-- success
813 *			non-zero-- failure
814 */
815TW_INT32
816tw_cli_set_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
817	TW_INT32 param_id, TW_INT32 param_size, TW_VOID *data,
818	TW_VOID (* callback)(struct tw_cli_req_context *req))
819{
820	struct tw_cli_req_context	*req;
821	union tw_cl_command_7k		*cmd;
822	struct tw_cl_param_9k		*param = TW_CL_NULL;
823	TW_INT32			error = TW_OSL_EBUSY;
824
825	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
826
827	/* Get a request packet. */
828	if ((req = tw_cli_get_request(ctlr
829		)) == TW_CL_NULL)
830		goto out;
831
832	/* Make sure this is the only CL internal request at this time. */
833	if (ctlr->internal_req_busy) {
834		error = TW_OSL_EBUSY;
835		goto out;
836	}
837	ctlr->internal_req_busy = TW_CL_TRUE;
838	req->data = ctlr->internal_req_data;
839	req->data_phys = ctlr->internal_req_data_phys;
840	req->length = TW_CLI_SECTOR_SIZE;
841	req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
842
843	/* Initialize memory to send data using. */
844	param = (struct tw_cl_param_9k *)(req->data);
845	tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
846
847	/* Build the cmd pkt. */
848	cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
849
850	req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
851
852	cmd->param.sgl_off__opcode =
853		BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_SET_PARAM);
854	cmd->param.request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
855	cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
856	cmd->param.param_count = TW_CL_SWAP16(1);
857
858	if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
859		((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
860			TW_CL_SWAP64(req->data_phys);
861		((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
862			TW_CL_SWAP32(req->length);
863		cmd->param.size = 2 + 3;
864	} else {
865		((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
866			TW_CL_SWAP32(req->data_phys);
867		((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
868			TW_CL_SWAP32(req->length);
869		cmd->param.size = 2 + 2;
870	}
871
872	/* Specify which parameter we want to set. */
873	param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
874	param->parameter_id = (TW_UINT8)(param_id);
875	param->parameter_size_bytes = TW_CL_SWAP16(param_size);
876	tw_osl_memcpy(param->data, data, param_size);
877
878	/* Submit the command. */
879	if (callback == TW_CL_NULL) {
880		/* There's no call back; wait till the command completes. */
881		error = tw_cli_submit_and_poll_request(req,
882				TW_CLI_REQUEST_TIMEOUT_PERIOD);
883		if (error)
884			goto out;
885		if ((error = cmd->param.status)) {
886#if       0
887			tw_cli_create_ctlr_event(ctlr,
888				TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
889				&(req->cmd_pkt->cmd_hdr));
890#endif // 0
891			goto out;
892		}
893		ctlr->internal_req_busy = TW_CL_FALSE;
894		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
895	} else {
896		/* There's a call back.  Simply submit the command. */
897		req->tw_cli_callback = callback;
898		if ((error = tw_cli_submit_cmd(req)))
899			goto out;
900	}
901	return(error);
902
903out:
904	tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
905		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
906		0x1102, 0x1, TW_CL_SEVERITY_ERROR_STRING,
907		"set_param failed",
908		"error = %d", error);
909	if (param)
910		ctlr->internal_req_busy = TW_CL_FALSE;
911	if (req)
912		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
913	return(error);
914}
915
916/*
917 * Function name:	tw_cli_submit_and_poll_request
918 * Description:		Sends down a firmware cmd, and waits for the completion
919 *			in a tight loop.
920 *
921 * Input:		req	-- ptr to request pkt
922 *			timeout -- max # of seconds to wait before giving up
923 * Output:		None
924 * Return value:	0	-- success
925 *			non-zero-- failure
926 */
927TW_INT32
928tw_cli_submit_and_poll_request(struct tw_cli_req_context *req,
929	TW_UINT32 timeout)
930{
931	struct tw_cli_ctlr_context	*ctlr = req->ctlr;
932	TW_TIME				end_time;
933	TW_INT32			error;
934
935	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
936
937	/*
938	 * If the cmd queue is full, tw_cli_submit_cmd will queue this
939	 * request in the pending queue, since this is an internal request.
940	 */
941	if ((error = tw_cli_submit_cmd(req))) {
942		tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
943			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
944			0x1103, 0x1, TW_CL_SEVERITY_ERROR_STRING,
945			"Failed to start internal request",
946			"error = %d", error);
947		return(error);
948	}
949
950	/*
951	 * Poll for the response until the command gets completed, or there's
952	 * a timeout.
953	 */
954	end_time = tw_osl_get_local_time() + timeout;
955	do {
956		if ((error = req->error_code))
957			/*
958			 * This will take care of completion due to a reset,
959			 * or a failure in tw_cli_submit_pending_queue.
960			 * The caller should do the clean-up.
961			 */
962			return(error);
963
964		/* See if the command completed. */
965		tw_cli_process_resp_intr(ctlr);
966
967		if ((req->state != TW_CLI_REQ_STATE_BUSY) &&
968			(req->state != TW_CLI_REQ_STATE_PENDING))
969			return(req->state != TW_CLI_REQ_STATE_COMPLETE);
970	} while (tw_osl_get_local_time() <= end_time);
971
972	/* Time out! */
973	tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
974		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
975		0x1104, 0x1, TW_CL_SEVERITY_ERROR_STRING,
976		"Internal request timed out",
977		"request = %p", req);
978
979	/*
980	 * We will reset the controller only if the request has already been
981	 * submitted, so as to not lose the request packet.  If a busy request
982	 * timed out, the reset will take care of freeing resources.  If a
983	 * pending request timed out, we will free resources for that request,
984	 * right here, thereby avoiding a reset.  So, the caller is expected
985	 * to NOT cleanup when TW_OSL_ETIMEDOUT is returned.
986	 */
987
988	/*
989	 * We have to make sure that this timed out request, if it were in the
990	 * pending queue, doesn't get submitted while we are here, from
991	 * tw_cli_submit_pending_queue.  There could be a race in that case.
992	 * Need to revisit.
993	 */
994	if (req->state == TW_CLI_REQ_STATE_PENDING) {
995		tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(),
996			"Removing request from pending queue");
997		/*
998		 * Request was never submitted.  Clean up.  Note that we did
999		 * not do a reset.  So, we have to remove the request ourselves
1000		 * from the pending queue (as against tw_cli_drain_pendinq_queue
1001		 * taking care of it).
1002		 */
1003		tw_cli_req_q_remove_item(req, TW_CLI_PENDING_Q);
1004		if ((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) == TW_CL_NULL)
1005			TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
1006				TWA_CONTROL_MASK_COMMAND_INTERRUPT);
1007		if (req->data)
1008			ctlr->internal_req_busy = TW_CL_FALSE;
1009		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1010	}
1011
1012	return(TW_OSL_ETIMEDOUT);
1013}
1014
1015/*
1016 * Function name:	tw_cl_reset_ctlr
1017 * Description:		Soft resets and then initializes the controller;
1018 *			drains any incomplete requests.
1019 *
1020 * Input:		ctlr	-- ptr to per ctlr structure
1021 * 			req_handle	-- ptr to request handle
1022 * Output:		None
1023 * Return value:	0	-- success
1024 *			non-zero-- failure
1025 */
1026TW_INT32
1027tw_cl_reset_ctlr(struct tw_cl_ctlr_handle *ctlr_handle)
1028{
1029	struct tw_cli_ctlr_context	*ctlr =
1030		(struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1031	struct twa_softc		*sc = ctlr_handle->osl_ctlr_ctxt;
1032	struct tw_cli_req_context	*req;
1033	TW_INT32			reset_attempt = 1;
1034	TW_INT32			error = 0;
1035
1036	tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "entered");
1037
1038	ctlr->reset_in_progress = TW_CL_TRUE;
1039	twa_teardown_intr(sc);
1040
1041	/*
1042	 * Error back all requests in the complete, busy, and pending queues.
1043	 * If any request is already on its way to getting submitted, it's in
1044	 * none of these queues and so, will not be completed.  That request
1045	 * will continue its course and get submitted to the controller after
1046	 * the reset is done (and io_lock is released).
1047	 */
1048	tw_cli_drain_complete_queue(ctlr);
1049	tw_cli_drain_busy_queue(ctlr);
1050	tw_cli_drain_pending_queue(ctlr);
1051	ctlr->internal_req_busy = TW_CL_FALSE;
1052	ctlr->get_more_aens     = TW_CL_FALSE;
1053
1054	/* Soft reset the controller. */
1055	while (reset_attempt <= TW_CLI_MAX_RESET_ATTEMPTS) {
1056		if ((error = tw_cli_soft_reset(ctlr))) {
1057			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1058				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1059				0x1105, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1060				"Controller reset failed",
1061				"error = %d; attempt %d", error, reset_attempt++);
1062			reset_attempt++;
1063			continue;
1064		}
1065
1066		/* Re-establish logical connection with the controller. */
1067		if ((error = tw_cli_init_connection(ctlr,
1068				(TW_UINT16)(ctlr->max_simult_reqs),
1069				0, 0, 0, 0, 0, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL,
1070				TW_CL_NULL, TW_CL_NULL))) {
1071			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1072				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1073				0x1106, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1074				"Can't initialize connection after reset",
1075				"error = %d", error);
1076			reset_attempt++;
1077			continue;
1078		}
1079
1080#ifdef    TW_OSL_DEBUG
1081		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1082			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1083			0x1107, 0x3, TW_CL_SEVERITY_INFO_STRING,
1084			"Controller reset done!", " ");
1085#endif /* TW_OSL_DEBUG */
1086		break;
1087	} /* End of while */
1088
1089	/* Move commands from the reset queue to the pending queue. */
1090	while ((req = tw_cli_req_q_remove_head(ctlr, TW_CLI_RESET_Q)) != TW_CL_NULL) {
1091		tw_osl_timeout(req->req_handle);
1092		tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
1093	}
1094
1095	twa_setup_intr(sc);
1096	tw_cli_enable_interrupts(ctlr);
1097	if ((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) != TW_CL_NULL)
1098		TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
1099			TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
1100	ctlr->reset_in_progress = TW_CL_FALSE;
1101	ctlr->reset_needed = TW_CL_FALSE;
1102
1103	/* Request for a bus re-scan. */
1104	tw_osl_scan_bus(ctlr_handle);
1105
1106	return(error);
1107}
1108
1109TW_VOID
1110tw_cl_set_reset_needed(struct tw_cl_ctlr_handle *ctlr_handle)
1111{
1112	struct tw_cli_ctlr_context	*ctlr =
1113		(struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1114
1115	ctlr->reset_needed = TW_CL_TRUE;
1116}
1117
1118TW_INT32
1119tw_cl_is_reset_needed(struct tw_cl_ctlr_handle *ctlr_handle)
1120{
1121	struct tw_cli_ctlr_context	*ctlr =
1122		(struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1123
1124	return(ctlr->reset_needed);
1125}
1126
1127TW_INT32
1128tw_cl_is_active(struct tw_cl_ctlr_handle *ctlr_handle)
1129{
1130	struct tw_cli_ctlr_context	*ctlr =
1131		(struct tw_cli_ctlr_context *)
1132		(ctlr_handle->cl_ctlr_ctxt);
1133
1134		return(ctlr->active);
1135}
1136
1137/*
1138 * Function name:	tw_cli_soft_reset
1139 * Description:		Does the actual soft reset.
1140 *
1141 * Input:		ctlr	-- ptr to per ctlr structure
1142 * Output:		None
1143 * Return value:	0	-- success
1144 *			non-zero-- failure
1145 */
1146TW_INT32
1147tw_cli_soft_reset(struct tw_cli_ctlr_context *ctlr)
1148{
1149	struct tw_cl_ctlr_handle	*ctlr_handle = ctlr->ctlr_handle;
1150	int				found;
1151	int				loop_count;
1152	TW_UINT32			error;
1153
1154	tw_cli_dbg_printf(1, ctlr_handle, tw_osl_cur_func(), "entered");
1155
1156	tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1157		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1158		0x1108, 0x3, TW_CL_SEVERITY_INFO_STRING,
1159		"Resetting controller...",
1160		" ");
1161
1162	/* Don't let any new commands get submitted to the controller. */
1163	tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
1164
1165	TW_CLI_SOFT_RESET(ctlr_handle);
1166
1167	if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_X) ||
1168	    (ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
1169	    (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
1170		/*
1171		 * There's a hardware bug in the G133 ASIC, which can lead to
1172		 * PCI parity errors and hangs, if the host accesses any
1173		 * registers when the firmware is resetting the hardware, as
1174		 * part of a hard/soft reset.  The window of time when the
1175		 * problem can occur is about 10 ms.  Here, we will handshake
1176		 * with the firmware to find out when the firmware is pulling
1177		 * down the hardware reset pin, and wait for about 500 ms to
1178		 * make sure we don't access any hardware registers (for
1179		 * polling) during that window.
1180		 */
1181		ctlr->reset_phase1_in_progress = TW_CL_TRUE;
1182		loop_count = 0;
1183		do {
1184			found = (tw_cli_find_response(ctlr, TWA_RESET_PHASE1_NOTIFICATION_RESPONSE) == TW_OSL_ESUCCESS);
1185			tw_osl_delay(10);
1186			loop_count++;
1187			error = 0x7888;
1188		} while (!found && (loop_count < 6000000)); /* Loop for no more than 60 seconds */
1189
1190		if (!found) {
1191			tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1192				TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1193				0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1194				"Missed firmware handshake after soft-reset",
1195				"error = %d", error);
1196			tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1197			return(error);
1198		}
1199
1200		tw_osl_delay(TWA_RESET_PHASE1_WAIT_TIME_MS * 1000);
1201		ctlr->reset_phase1_in_progress = TW_CL_FALSE;
1202	}
1203
1204	if ((error = tw_cli_poll_status(ctlr,
1205			TWA_STATUS_MICROCONTROLLER_READY |
1206			TWA_STATUS_ATTENTION_INTERRUPT,
1207			TW_CLI_RESET_TIMEOUT_PERIOD))) {
1208		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1209			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1210			0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1211			"Micro-ctlr not ready/No attn intr after reset",
1212			"error = %d", error);
1213		tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1214		return(error);
1215	}
1216
1217	TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
1218		TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
1219
1220	if ((error = tw_cli_drain_response_queue(ctlr))) {
1221		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1222			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1223			0x110A, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1224			"Can't drain response queue after reset",
1225			"error = %d", error);
1226		tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1227		return(error);
1228	}
1229
1230	tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1231
1232	if ((error = tw_cli_drain_aen_queue(ctlr))) {
1233		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1234			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1235			0x110B, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1236			"Can't drain AEN queue after reset",
1237			"error = %d", error);
1238		return(error);
1239	}
1240
1241	if ((error = tw_cli_find_aen(ctlr, TWA_AEN_SOFT_RESET))) {
1242		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1243			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1244			0x110C, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1245			"Reset not reported by controller",
1246			"error = %d", error);
1247		return(error);
1248	}
1249
1250	return(TW_OSL_ESUCCESS);
1251}
1252
1253/*
1254 * Function name:	tw_cli_send_scsi_cmd
1255 * Description:		Sends down a scsi cmd to fw.
1256 *
1257 * Input:		req	-- ptr to request pkt
1258 *			cmd	-- opcode of scsi cmd to send
1259 * Output:		None
1260 * Return value:	0	-- success
1261 *			non-zero-- failure
1262 */
1263TW_INT32
1264tw_cli_send_scsi_cmd(struct tw_cli_req_context *req, TW_INT32 cmd)
1265{
1266	struct tw_cl_command_packet	*cmdpkt;
1267	struct tw_cl_command_9k		*cmd9k;
1268	struct tw_cli_ctlr_context	*ctlr;
1269	TW_INT32			error;
1270
1271	ctlr = req->ctlr;
1272	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1273
1274	/* Make sure this is the only CL internal request at this time. */
1275	if (ctlr->internal_req_busy)
1276		return(TW_OSL_EBUSY);
1277	ctlr->internal_req_busy = TW_CL_TRUE;
1278	req->data = ctlr->internal_req_data;
1279	req->data_phys = ctlr->internal_req_data_phys;
1280	tw_osl_memzero(req->data, TW_CLI_SECTOR_SIZE);
1281	req->length = TW_CLI_SECTOR_SIZE;
1282
1283	/* Build the cmd pkt. */
1284	cmdpkt = req->cmd_pkt;
1285
1286	cmdpkt->cmd_hdr.header_desc.size_header = 128;
1287
1288	cmd9k = &(cmdpkt->command.cmd_pkt_9k);
1289
1290	cmd9k->res__opcode =
1291		BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
1292	cmd9k->unit = 0;
1293	cmd9k->lun_l4__req_id = TW_CL_SWAP16(req->request_id);
1294	cmd9k->status = 0;
1295	cmd9k->sgl_offset = 16; /* offset from end of hdr = max cdb len */
1296	cmd9k->lun_h4__sgl_entries = TW_CL_SWAP16(1);
1297
1298	if (req->ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1299		((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].address =
1300			TW_CL_SWAP64(req->data_phys);
1301		((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].length =
1302			TW_CL_SWAP32(req->length);
1303	} else {
1304		((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].address =
1305			TW_CL_SWAP32(req->data_phys);
1306		((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].length =
1307			TW_CL_SWAP32(req->length);
1308	}
1309
1310	cmd9k->cdb[0] = (TW_UINT8)cmd;
1311	cmd9k->cdb[4] = 128;
1312
1313	if ((error = tw_cli_submit_cmd(req)))
1314		if (error != TW_OSL_EBUSY) {
1315			tw_cli_dbg_printf(1, ctlr->ctlr_handle,
1316				tw_osl_cur_func(),
1317				"Failed to start SCSI command",
1318				"request = %p, error = %d", req, error);
1319			return(TW_OSL_EIO);
1320		}
1321	return(TW_OSL_ESUCCESS);
1322}
1323
1324/*
1325 * Function name:	tw_cli_get_aen
1326 * Description:		Sends down a Request Sense cmd to fw to fetch an AEN.
1327 *
1328 * Input:		ctlr	-- ptr to per ctlr structure
1329 * Output:		None
1330 * Return value:	0	-- success
1331 *			non-zero-- failure
1332 */
1333TW_INT32
1334tw_cli_get_aen(struct tw_cli_ctlr_context *ctlr)
1335{
1336	struct tw_cli_req_context	*req;
1337	TW_INT32			error;
1338
1339	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1340
1341	if ((req = tw_cli_get_request(ctlr
1342		)) == TW_CL_NULL)
1343		return(TW_OSL_EBUSY);
1344
1345	req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
1346	req->flags |= TW_CLI_REQ_FLAGS_9K;
1347	req->tw_cli_callback = tw_cli_aen_callback;
1348	if ((error = tw_cli_send_scsi_cmd(req, 0x03 /* REQUEST_SENSE */))) {
1349		tw_cli_dbg_printf(1, ctlr->ctlr_handle, tw_osl_cur_func(),
1350			"Could not send SCSI command",
1351			"request = %p, error = %d", req, error);
1352		if (req->data)
1353			ctlr->internal_req_busy = TW_CL_FALSE;
1354		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1355	}
1356	return(error);
1357}
1358
1359/*
1360 * Function name:	tw_cli_fill_sg_list
1361 * Description:		Fills in the scatter/gather list.
1362 *
1363 * Input:		ctlr	-- ptr to per ctlr structure
1364 *			sgl_src	-- ptr to fill the sg list from
1365 *			sgl_dest-- ptr to sg list
1366 *			nsegments--# of segments
1367 * Output:		None
1368 * Return value:	None
1369 */
1370TW_VOID
1371tw_cli_fill_sg_list(struct tw_cli_ctlr_context *ctlr, TW_VOID *sgl_src,
1372	TW_VOID *sgl_dest, TW_INT32 num_sgl_entries)
1373{
1374	TW_INT32	i;
1375
1376	tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1377
1378	if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1379		struct tw_cl_sg_desc64 *sgl_s =
1380			(struct tw_cl_sg_desc64 *)sgl_src;
1381		struct tw_cl_sg_desc64 *sgl_d =
1382			(struct tw_cl_sg_desc64 *)sgl_dest;
1383
1384		tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1385			"64 bit addresses");
1386		for (i = 0; i < num_sgl_entries; i++) {
1387			sgl_d[i].address = TW_CL_SWAP64(sgl_s->address);
1388			sgl_d[i].length = TW_CL_SWAP32(sgl_s->length);
1389			sgl_s++;
1390			if (ctlr->flags & TW_CL_64BIT_SG_LENGTH)
1391				sgl_s = (struct tw_cl_sg_desc64 *)
1392					(((TW_INT8 *)(sgl_s)) + 4);
1393		}
1394	} else {
1395		struct tw_cl_sg_desc32 *sgl_s =
1396			(struct tw_cl_sg_desc32 *)sgl_src;
1397		struct tw_cl_sg_desc32 *sgl_d =
1398			(struct tw_cl_sg_desc32 *)sgl_dest;
1399
1400		tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1401			"32 bit addresses");
1402		for (i = 0; i < num_sgl_entries; i++) {
1403			sgl_d[i].address = TW_CL_SWAP32(sgl_s[i].address);
1404			sgl_d[i].length = TW_CL_SWAP32(sgl_s[i].length);
1405		}
1406	}
1407}
1408