tw_cl_io.c revision 169400
1/*
2 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *	$FreeBSD: head/sys/dev/twa/tw_cl_io.c 169400 2007-05-09 04:16:32Z scottl $
28 */
29
30/*
31 * AMCC'S 3ware driver for 9000 series storage controllers.
32 *
33 * Author: Vinod Kashyap
34 * Modifications by: Adam Radford
35 */
36
37
38/*
39 * Common Layer I/O functions.
40 */
41
42
43#include "tw_osl_share.h"
44#include "tw_cl_share.h"
45#include "tw_cl_fwif.h"
46#include "tw_cl_ioctl.h"
47#include "tw_cl.h"
48#include "tw_cl_externs.h"
49#include "tw_osl_ioctl.h"
50
51
52
53/*
54 * Function name:	tw_cl_start_io
55 * Description:		Interface to OS Layer for accepting SCSI requests.
56 *
57 * Input:		ctlr_handle	-- controller handle
58 *			req_pkt		-- OSL built request packet
59 *			req_handle	-- request handle
60 * Output:		None
61 * Return value:	0	-- success
62 *			non-zero-- failure
63 */
64TW_INT32
65tw_cl_start_io(struct tw_cl_ctlr_handle *ctlr_handle,
66	struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
67{
68	struct tw_cli_ctlr_context		*ctlr;
69	struct tw_cli_req_context		*req;
70	struct tw_cl_command_9k			*cmd;
71	struct tw_cl_scsi_req_packet		*scsi_req;
72	TW_INT32				error;
73
74	tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
75
76	ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
77
78	if (ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) {
79		tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
80			"I/O during reset: returning busy. Ctlr state = 0x%x",
81			ctlr->state);
82		tw_osl_ctlr_busy(ctlr_handle, req_handle);
83		return(TW_OSL_EBUSY);
84	}
85
86	/*
87	 * If working with a firmware version that does not support multiple
88	 * luns, and this request is directed at a non-zero lun, error it
89	 * back right away.
90	 */
91	if ((req_pkt->gen_req_pkt.scsi_req.lun) &&
92		(ctlr->working_srl < TWA_MULTI_LUN_FW_SRL)) {
93		req_pkt->status |= (TW_CL_ERR_REQ_INVALID_LUN |
94			TW_CL_ERR_REQ_SCSI_ERROR);
95		req_pkt->tw_osl_callback(req_handle);
96		return(TW_CL_ERR_REQ_SUCCESS);
97	}
98
99	if ((req = tw_cli_get_request(ctlr
100		)) == TW_CL_NULL) {
101		tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
102			"Out of request context packets: returning busy");
103		tw_osl_ctlr_busy(ctlr_handle, req_handle);
104		return(TW_OSL_EBUSY);
105	}
106
107	req_handle->cl_req_ctxt = req;
108	req->req_handle = req_handle;
109	req->orig_req = req_pkt;
110	req->tw_cli_callback = tw_cli_complete_io;
111
112	req->flags |= TW_CLI_REQ_FLAGS_EXTERNAL;
113	req->flags |= TW_CLI_REQ_FLAGS_9K;
114
115	scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
116
117	/* Build the cmd pkt. */
118	cmd = &(req->cmd_pkt->command.cmd_pkt_9k);
119
120	req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
121
122	cmd->res__opcode = BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
123	cmd->unit = (TW_UINT8)(scsi_req->unit);
124	cmd->lun_l4__req_id = TW_CL_SWAP16(
125		BUILD_LUN_L4__REQ_ID(scsi_req->lun, req->request_id));
126	cmd->status = 0;
127	cmd->sgl_offset = 16; /* offset from end of hdr = max cdb len */
128	tw_osl_memcpy(cmd->cdb, scsi_req->cdb, scsi_req->cdb_len);
129
130	if (req_pkt->flags & TW_CL_REQ_CALLBACK_FOR_SGLIST) {
131		TW_UINT32	num_sgl_entries;
132
133		req_pkt->tw_osl_sgl_callback(req_handle, cmd->sg_list,
134			&num_sgl_entries);
135		cmd->lun_h4__sgl_entries =
136			TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
137				num_sgl_entries));
138	} else {
139		cmd->lun_h4__sgl_entries =
140			TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
141				scsi_req->sgl_entries));
142		tw_cli_fill_sg_list(ctlr, scsi_req->sg_list,
143			cmd->sg_list, scsi_req->sgl_entries);
144	}
145
146	if ((error = tw_cli_submit_cmd(req))) {
147		tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
148			"Could not start request. request = %p, error = %d",
149			req, error);
150		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
151	}
152	return(error);
153}
154
155
156
157/*
158 * Function name:	tw_cli_submit_cmd
159 * Description:		Submits a cmd to firmware.
160 *
161 * Input:		req	-- ptr to CL internal request context
162 * Output:		None
163 * Return value:	0	-- success
164 *			non-zero-- failure
165 */
166TW_INT32
167tw_cli_submit_cmd(struct tw_cli_req_context *req)
168{
169	struct tw_cli_ctlr_context	*ctlr = req->ctlr;
170	struct tw_cl_ctlr_handle	*ctlr_handle = ctlr->ctlr_handle;
171	TW_UINT32			status_reg;
172	TW_INT32			error;
173	TW_UINT8			notify_osl_of_ctlr_busy = TW_CL_FALSE;
174
175	tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
176
177	/* Serialize access to the controller cmd queue. */
178	tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
179
180	/* For 9650SE first write low 4 bytes */
181	if (ctlr->device_id == TW_CL_DEVICE_ID_9K_E)
182		tw_osl_write_reg(ctlr_handle,
183				 TWA_COMMAND_QUEUE_OFFSET_LOW,
184				 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
185
186	/* Check to see if we can post a command. */
187	status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
188	if ((error = tw_cli_check_ctlr_state(ctlr, status_reg)))
189		goto out;
190
191	if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
192		struct tw_cl_req_packet	*req_pkt =
193			(struct tw_cl_req_packet *)(req->orig_req);
194
195		tw_cli_dbg_printf(7, ctlr_handle, tw_osl_cur_func(),
196			"Cmd queue full");
197
198		if ((req->flags & TW_CLI_REQ_FLAGS_INTERNAL)
199			|| ((req_pkt) &&
200			(req_pkt->flags & TW_CL_REQ_RETRY_ON_BUSY))
201			) {
202			if (req->state != TW_CLI_REQ_STATE_PENDING) {
203				tw_cli_dbg_printf(2, ctlr_handle,
204					tw_osl_cur_func(),
205					"pending internal/ioctl request");
206				req->state = TW_CLI_REQ_STATE_PENDING;
207				tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
208				error = 0;
209			} else
210				error = TW_OSL_EBUSY;
211		} else {
212			notify_osl_of_ctlr_busy = TW_CL_TRUE;
213			error = TW_OSL_EBUSY;
214		}
215	} else {
216		tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
217			"Submitting command");
218
219		/* Insert command into busy queue */
220		req->state = TW_CLI_REQ_STATE_BUSY;
221		tw_cli_req_q_insert_tail(req, TW_CLI_BUSY_Q);
222
223		if (ctlr->device_id == TW_CL_DEVICE_ID_9K_E) {
224			/* Now write the high 4 bytes */
225			tw_osl_write_reg(ctlr_handle,
226					 TWA_COMMAND_QUEUE_OFFSET_HIGH,
227					 (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
228		} else {
229			if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
230				/* First write the low 4 bytes, then the high 4. */
231				tw_osl_write_reg(ctlr_handle,
232						 TWA_COMMAND_QUEUE_OFFSET_LOW,
233						 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
234				tw_osl_write_reg(ctlr_handle,
235						 TWA_COMMAND_QUEUE_OFFSET_HIGH,
236						 (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
237			} else
238				tw_osl_write_reg(ctlr_handle,
239						 TWA_COMMAND_QUEUE_OFFSET,
240						 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
241		}
242	}
243out:
244	tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
245
246	if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
247		if (notify_osl_of_ctlr_busy)
248			tw_osl_ctlr_busy(ctlr_handle, req->req_handle);
249
250		/*
251		 * Synchronize access between writes to command and control
252		 * registers in 64-bit environments, on G66.
253		 */
254		if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED)
255			tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
256
257		/* Unmask command interrupt. */
258		TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
259			TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
260
261		if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED)
262			tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
263	}
264
265	return(error);
266}
267
268
269
270/*
271 * Function name:	tw_cl_fw_passthru
272 * Description:		Interface to OS Layer for accepting firmware
273 *			passthru requests.
274 * Input:		ctlr_handle	-- controller handle
275 *			req_pkt		-- OSL built request packet
276 *			req_handle	-- request handle
277 * Output:		None
278 * Return value:	0	-- success
279 *			non-zero-- failure
280 */
281TW_INT32
282tw_cl_fw_passthru(struct tw_cl_ctlr_handle *ctlr_handle,
283	struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
284{
285	struct tw_cli_ctlr_context		*ctlr;
286	struct tw_cli_req_context		*req;
287	union tw_cl_command_7k			*cmd_7k;
288	struct tw_cl_command_9k			*cmd_9k;
289	struct tw_cl_passthru_req_packet	*pt_req;
290	TW_UINT8				opcode;
291	TW_UINT8				sgl_offset;
292	TW_VOID					*sgl = TW_CL_NULL;
293	TW_INT32				error;
294
295	tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
296
297	ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
298
299	if (ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) {
300		tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
301			"Passthru request during reset: returning busy. "
302			"Ctlr state = 0x%x",
303			ctlr->state);
304		tw_osl_ctlr_busy(ctlr_handle, req_handle);
305		return(TW_OSL_EBUSY);
306	}
307
308	if ((req = tw_cli_get_request(ctlr
309		)) == TW_CL_NULL) {
310		tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
311			"Out of request context packets: returning busy");
312		tw_osl_ctlr_busy(ctlr_handle, req_handle);
313		return(TW_OSL_EBUSY);
314	}
315
316	req_handle->cl_req_ctxt = req;
317	req->req_handle = req_handle;
318	req->orig_req = req_pkt;
319	req->tw_cli_callback = tw_cli_complete_io;
320
321	req->flags |= (TW_CLI_REQ_FLAGS_EXTERNAL | TW_CLI_REQ_FLAGS_PASSTHRU);
322
323	pt_req = &(req_pkt->gen_req_pkt.pt_req);
324
325	tw_osl_memcpy(req->cmd_pkt, pt_req->cmd_pkt,
326		pt_req->cmd_pkt_length);
327	/* Build the cmd pkt. */
328	if ((opcode = GET_OPCODE(((TW_UINT8 *)
329		(pt_req->cmd_pkt))[sizeof(struct tw_cl_command_header)]))
330			== TWA_FW_CMD_EXECUTE_SCSI) {
331		TW_UINT16	lun_l4, lun_h4;
332
333		tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
334			"passthru: 9k cmd pkt");
335		req->flags |= TW_CLI_REQ_FLAGS_9K;
336		cmd_9k = &(req->cmd_pkt->command.cmd_pkt_9k);
337		lun_l4 = GET_LUN_L4(cmd_9k->lun_l4__req_id);
338		lun_h4 = GET_LUN_H4(cmd_9k->lun_h4__sgl_entries);
339		cmd_9k->lun_l4__req_id = TW_CL_SWAP16(
340			BUILD_LUN_L4__REQ_ID(lun_l4, req->request_id));
341		if (pt_req->sgl_entries) {
342			cmd_9k->lun_h4__sgl_entries =
343				TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(lun_h4,
344					pt_req->sgl_entries));
345			sgl = (TW_VOID *)(cmd_9k->sg_list);
346		}
347	} else {
348		tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
349			"passthru: 7k cmd pkt");
350		cmd_7k = &(req->cmd_pkt->command.cmd_pkt_7k);
351		cmd_7k->generic.request_id =
352			(TW_UINT8)(TW_CL_SWAP16(req->request_id));
353		if ((sgl_offset =
354			GET_SGL_OFF(cmd_7k->generic.sgl_off__opcode))) {
355			sgl = (((TW_UINT32 *)cmd_7k) + sgl_offset);
356			cmd_7k->generic.size += pt_req->sgl_entries *
357				((ctlr->flags & TW_CL_64BIT_ADDRESSES) ? 3 : 2);
358		}
359	}
360
361	if (sgl)
362		tw_cli_fill_sg_list(ctlr, pt_req->sg_list,
363			sgl, pt_req->sgl_entries);
364
365	if ((error = tw_cli_submit_cmd(req))) {
366		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
367			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
368			0x1100, 0x1, TW_CL_SEVERITY_ERROR_STRING,
369			"Failed to start passthru command",
370			"error = %d", error);
371		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
372	}
373	return(error);
374}
375
376
377
378/*
379 * Function name:	tw_cl_ioctl
380 * Description:		Handler of CL supported ioctl cmds.
381 *
382 * Input:		ctlr	-- ptr to per ctlr structure
383 *			cmd	-- ioctl cmd
384 *			buf	-- ptr to buffer in kernel memory, which is
385 *				   a copy of the input buffer in user-space
386 * Output:		buf	-- ptr to buffer in kernel memory, which will
387 *				   need to be copied to the output buffer in
388 *				   user-space
389 * Return value:	0	-- success
390 *			non-zero-- failure
391 */
392TW_INT32
393tw_cl_ioctl(struct tw_cl_ctlr_handle *ctlr_handle, TW_INT32 cmd, TW_VOID *buf)
394{
395	struct tw_cli_ctlr_context	*ctlr =
396		(struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
397	struct tw_cl_ioctl_packet	*user_buf =
398		(struct tw_cl_ioctl_packet *)buf;
399	struct tw_cl_event_packet	event_buf;
400	TW_INT32			event_index;
401	TW_INT32			start_index;
402	TW_INT32			error = TW_OSL_ESUCCESS;
403
404	tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
405
406	/* Serialize access to the AEN queue and the ioctl lock. */
407	tw_osl_get_lock(ctlr_handle, ctlr->gen_lock);
408
409	switch (cmd) {
410	case TW_CL_IOCTL_GET_FIRST_EVENT:
411		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
412			"Get First Event");
413
414		if (ctlr->aen_q_wrapped) {
415			if (ctlr->aen_q_overflow) {
416				/*
417				 * The aen queue has wrapped, even before some
418				 * events have been retrieved.  Let the caller
419				 * know that he missed out on some AEN's.
420				 */
421				user_buf->driver_pkt.status =
422					TW_CL_ERROR_AEN_OVERFLOW;
423				ctlr->aen_q_overflow = TW_CL_FALSE;
424			} else
425				user_buf->driver_pkt.status = 0;
426			event_index = ctlr->aen_head;
427		} else {
428			if (ctlr->aen_head == ctlr->aen_tail) {
429				user_buf->driver_pkt.status =
430					TW_CL_ERROR_AEN_NO_EVENTS;
431				break;
432			}
433			user_buf->driver_pkt.status = 0;
434			event_index = ctlr->aen_tail;	/* = 0 */
435		}
436		tw_osl_memcpy(user_buf->data_buf,
437			&(ctlr->aen_queue[event_index]),
438			sizeof(struct tw_cl_event_packet));
439
440		ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
441
442		break;
443
444
445	case TW_CL_IOCTL_GET_LAST_EVENT:
446		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
447			"Get Last Event");
448
449		if (ctlr->aen_q_wrapped) {
450			if (ctlr->aen_q_overflow) {
451				/*
452				 * The aen queue has wrapped, even before some
453				 * events have been retrieved.  Let the caller
454				 * know that he missed out on some AEN's.
455				 */
456				user_buf->driver_pkt.status =
457					TW_CL_ERROR_AEN_OVERFLOW;
458				ctlr->aen_q_overflow = TW_CL_FALSE;
459			} else
460				user_buf->driver_pkt.status = 0;
461		} else {
462			if (ctlr->aen_head == ctlr->aen_tail) {
463				user_buf->driver_pkt.status =
464					TW_CL_ERROR_AEN_NO_EVENTS;
465				break;
466			}
467			user_buf->driver_pkt.status = 0;
468		}
469		event_index = (ctlr->aen_head - 1 + ctlr->max_aens_supported) %
470			ctlr->max_aens_supported;
471
472		tw_osl_memcpy(user_buf->data_buf,
473			&(ctlr->aen_queue[event_index]),
474			sizeof(struct tw_cl_event_packet));
475
476		ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
477
478		break;
479
480
481	case TW_CL_IOCTL_GET_NEXT_EVENT:
482		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
483			"Get Next Event");
484
485		user_buf->driver_pkt.status = 0;
486		if (ctlr->aen_q_wrapped) {
487			tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
488				"Get Next Event: wrapped");
489			if (ctlr->aen_q_overflow) {
490				/*
491				 * The aen queue has wrapped, even before some
492				 * events have been retrieved.  Let the caller
493				 * know that he missed out on some AEN's.
494				 */
495				tw_cli_dbg_printf(2, ctlr_handle,
496					tw_osl_cur_func(),
497					"Get Next Event: overflow");
498				user_buf->driver_pkt.status =
499					TW_CL_ERROR_AEN_OVERFLOW;
500				ctlr->aen_q_overflow = TW_CL_FALSE;
501			}
502			start_index = ctlr->aen_head;
503		} else {
504			if (ctlr->aen_head == ctlr->aen_tail) {
505				tw_cli_dbg_printf(3, ctlr_handle,
506					tw_osl_cur_func(),
507					"Get Next Event: empty queue");
508				user_buf->driver_pkt.status =
509					TW_CL_ERROR_AEN_NO_EVENTS;
510				break;
511			}
512			start_index = ctlr->aen_tail;	/* = 0 */
513		}
514		tw_osl_memcpy(&event_buf, user_buf->data_buf,
515			sizeof(struct tw_cl_event_packet));
516
517		event_index = (start_index + event_buf.sequence_id -
518			ctlr->aen_queue[start_index].sequence_id + 1) %
519			ctlr->max_aens_supported;
520
521		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
522			"Get Next Event: si = %x, ei = %x, ebsi = %x, "
523			"sisi = %x, eisi = %x",
524			start_index, event_index, event_buf.sequence_id,
525			ctlr->aen_queue[start_index].sequence_id,
526			ctlr->aen_queue[event_index].sequence_id);
527
528		if (! (ctlr->aen_queue[event_index].sequence_id >
529			event_buf.sequence_id)) {
530			/*
531			 * We don't have any event matching the criterion.  So,
532			 * we have to report TW_CL_ERROR_NO_EVENTS.  If we also
533			 * encountered an overflow condition above, we cannot
534			 * report both conditions during this call.  We choose
535			 * to report NO_EVENTS this time, and an overflow the
536			 * next time we are called.
537			 */
538			if (user_buf->driver_pkt.status ==
539				TW_CL_ERROR_AEN_OVERFLOW) {
540				/*
541				 * Make a note so we report the overflow
542				 * next time.
543				 */
544				ctlr->aen_q_overflow = TW_CL_TRUE;
545			}
546			user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
547			break;
548		}
549		/* Copy the event -- even if there has been an overflow. */
550		tw_osl_memcpy(user_buf->data_buf,
551			&(ctlr->aen_queue[event_index]),
552			sizeof(struct tw_cl_event_packet));
553
554		ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
555
556		break;
557
558
559	case TW_CL_IOCTL_GET_PREVIOUS_EVENT:
560		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
561			"Get Previous Event");
562
563		user_buf->driver_pkt.status = 0;
564		if (ctlr->aen_q_wrapped) {
565			if (ctlr->aen_q_overflow) {
566				/*
567				 * The aen queue has wrapped, even before some
568				 * events have been retrieved.  Let the caller
569				 * know that he missed out on some AEN's.
570				 */
571				user_buf->driver_pkt.status =
572					TW_CL_ERROR_AEN_OVERFLOW;
573				ctlr->aen_q_overflow = TW_CL_FALSE;
574			}
575			start_index = ctlr->aen_head;
576		} else {
577			if (ctlr->aen_head == ctlr->aen_tail) {
578				user_buf->driver_pkt.status =
579					TW_CL_ERROR_AEN_NO_EVENTS;
580				break;
581			}
582			start_index = ctlr->aen_tail;	/* = 0 */
583		}
584		tw_osl_memcpy(&event_buf, user_buf->data_buf,
585			sizeof(struct tw_cl_event_packet));
586
587		event_index = (start_index + event_buf.sequence_id -
588			ctlr->aen_queue[start_index].sequence_id - 1) %
589			ctlr->max_aens_supported;
590
591		if (! (ctlr->aen_queue[event_index].sequence_id <
592			event_buf.sequence_id)) {
593			/*
594			 * We don't have any event matching the criterion.  So,
595			 * we have to report TW_CL_ERROR_NO_EVENTS.  If we also
596			 * encountered an overflow condition above, we cannot
597			 * report both conditions during this call.  We choose
598			 * to report NO_EVENTS this time, and an overflow the
599			 * next time we are called.
600			 */
601			if (user_buf->driver_pkt.status ==
602				TW_CL_ERROR_AEN_OVERFLOW) {
603				/*
604				 * Make a note so we report the overflow
605				 * next time.
606				 */
607				ctlr->aen_q_overflow = TW_CL_TRUE;
608			}
609			user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
610			break;
611		}
612		/* Copy the event -- even if there has been an overflow. */
613		tw_osl_memcpy(user_buf->data_buf,
614			&(ctlr->aen_queue[event_index]),
615			sizeof(struct tw_cl_event_packet));
616
617		ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
618
619		break;
620
621
622	case TW_CL_IOCTL_GET_LOCK:
623	{
624		struct tw_cl_lock_packet	lock_pkt;
625		TW_TIME				cur_time;
626
627		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
628			"Get ioctl lock");
629
630		cur_time = tw_osl_get_local_time();
631		tw_osl_memcpy(&lock_pkt, user_buf->data_buf,
632			sizeof(struct tw_cl_lock_packet));
633
634		if ((ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) ||
635			(lock_pkt.force_flag) ||
636			(cur_time >= ctlr->ioctl_lock.timeout)) {
637			tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
638				"GET_LOCK: Getting lock!");
639			ctlr->ioctl_lock.lock = TW_CLI_LOCK_HELD;
640			ctlr->ioctl_lock.timeout =
641				cur_time + (lock_pkt.timeout_msec / 1000);
642			lock_pkt.time_remaining_msec = lock_pkt.timeout_msec;
643			user_buf->driver_pkt.status = 0;
644		} else {
645			tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
646				"GET_LOCK: Lock already held!");
647			lock_pkt.time_remaining_msec = (TW_UINT32)(
648				(ctlr->ioctl_lock.timeout - cur_time) * 1000);
649			user_buf->driver_pkt.status =
650				TW_CL_ERROR_IOCTL_LOCK_ALREADY_HELD;
651		}
652		tw_osl_memcpy(user_buf->data_buf, &lock_pkt,
653			sizeof(struct tw_cl_lock_packet));
654		break;
655	}
656
657
658	case TW_CL_IOCTL_RELEASE_LOCK:
659		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
660			"Release ioctl lock");
661
662		if (ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) {
663			tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
664				"twa_ioctl: RELEASE_LOCK: Lock not held!");
665			user_buf->driver_pkt.status =
666				TW_CL_ERROR_IOCTL_LOCK_NOT_HELD;
667		} else {
668			tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
669				"RELEASE_LOCK: Releasing lock!");
670			ctlr->ioctl_lock.lock = TW_CLI_LOCK_FREE;
671			user_buf->driver_pkt.status = 0;
672		}
673		break;
674
675
676	case TW_CL_IOCTL_GET_COMPATIBILITY_INFO:
677	{
678		struct tw_cl_compatibility_packet	comp_pkt;
679
680		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
681			"Get compatibility info");
682
683		tw_osl_memcpy(comp_pkt.driver_version,
684			TW_OSL_DRIVER_VERSION_STRING,
685			sizeof(TW_OSL_DRIVER_VERSION_STRING));
686		comp_pkt.working_srl = ctlr->working_srl;
687		comp_pkt.working_branch = ctlr->working_branch;
688		comp_pkt.working_build = ctlr->working_build;
689		comp_pkt.driver_srl_high = TWA_CURRENT_FW_SRL;
690		comp_pkt.driver_branch_high =
691			TWA_CURRENT_FW_BRANCH(ctlr->arch_id);
692		comp_pkt.driver_build_high =
693			TWA_CURRENT_FW_BUILD(ctlr->arch_id);
694		comp_pkt.driver_srl_low = TWA_BASE_FW_SRL;
695		comp_pkt.driver_branch_low = TWA_BASE_FW_BRANCH;
696		comp_pkt.driver_build_low = TWA_BASE_FW_BUILD;
697		comp_pkt.fw_on_ctlr_srl = ctlr->fw_on_ctlr_srl;
698		comp_pkt.fw_on_ctlr_branch = ctlr->fw_on_ctlr_branch;
699		comp_pkt.fw_on_ctlr_build = ctlr->fw_on_ctlr_build;
700		user_buf->driver_pkt.status = 0;
701
702		/* Copy compatibility information to user space. */
703		tw_osl_memcpy(user_buf->data_buf, &comp_pkt,
704			(sizeof(struct tw_cl_compatibility_packet) <
705			user_buf->driver_pkt.buffer_length) ?
706			sizeof(struct tw_cl_compatibility_packet) :
707			user_buf->driver_pkt.buffer_length);
708		break;
709	}
710
711	default:
712		/* Unknown opcode. */
713		tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
714			"Unknown ioctl cmd 0x%x", cmd);
715		error = TW_OSL_ENOTTY;
716	}
717
718	tw_osl_free_lock(ctlr_handle, ctlr->gen_lock);
719	return(error);
720}
721
722
723
724/*
725 * Function name:	tw_cli_get_param
726 * Description:		Get a firmware parameter.
727 *
728 * Input:		ctlr		-- ptr to per ctlr structure
729 *			table_id	-- parameter table #
730 *			param_id	-- index of the parameter in the table
731 *			param_size	-- size of the parameter in bytes
732 *			callback	-- ptr to function, if any, to be called
733 *					back on completion; TW_CL_NULL if no callback.
734 * Output:		param_data	-- param value
735 * Return value:	0	-- success
736 *			non-zero-- failure
737 */
738TW_INT32
739tw_cli_get_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
740	TW_INT32 param_id, TW_VOID *param_data, TW_INT32 param_size,
741	TW_VOID (* callback)(struct tw_cli_req_context *req))
742{
743	struct tw_cli_req_context	*req;
744	union tw_cl_command_7k		*cmd;
745	struct tw_cl_param_9k		*param = TW_CL_NULL;
746	TW_INT32			error = TW_OSL_EBUSY;
747
748	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
749
750	/* Get a request packet. */
751	if ((req = tw_cli_get_request(ctlr
752		)) == TW_CL_NULL)
753		goto out;
754
755	/* Make sure this is the only CL internal request at this time. */
756	if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY) {
757		error = TW_OSL_EBUSY;
758		goto out;
759	}
760	ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
761	req->data = ctlr->internal_req_data;
762	req->data_phys = ctlr->internal_req_data_phys;
763	req->length = TW_CLI_SECTOR_SIZE;
764	req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
765
766	/* Initialize memory to read data into. */
767	param = (struct tw_cl_param_9k *)(req->data);
768	tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
769
770	/* Build the cmd pkt. */
771	cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
772
773	req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
774
775	cmd->param.sgl_off__opcode =
776		BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_GET_PARAM);
777	cmd->param.request_id =
778		(TW_UINT8)(TW_CL_SWAP16(req->request_id));
779	cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
780	cmd->param.param_count = TW_CL_SWAP16(1);
781
782	if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
783		((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
784			TW_CL_SWAP64(req->data_phys);
785		((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
786			TW_CL_SWAP32(req->length);
787		cmd->param.size = 2 + 3;
788	} else {
789		((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
790			TW_CL_SWAP32(req->data_phys);
791		((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
792			TW_CL_SWAP32(req->length);
793		cmd->param.size = 2 + 2;
794	}
795
796	/* Specify which parameter we need. */
797	param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
798	param->parameter_id = (TW_UINT8)(param_id);
799	param->parameter_size_bytes = TW_CL_SWAP16(param_size);
800
801	/* Submit the command. */
802	if (callback == TW_CL_NULL) {
803		/* There's no call back; wait till the command completes. */
804		error = tw_cli_submit_and_poll_request(req,
805				TW_CLI_REQUEST_TIMEOUT_PERIOD);
806		if (error == TW_OSL_ETIMEDOUT)
807			/* Clean-up done by tw_cli_submit_and_poll_request. */
808			return(error);
809		if (error)
810			goto out;
811		if ((error = cmd->param.status)) {
812			tw_cli_create_ctlr_event(ctlr,
813				TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
814				&(req->cmd_pkt->cmd_hdr));
815			goto out;
816		}
817		tw_osl_memcpy(param_data, param->data, param_size);
818		ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
819		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
820	} else {
821		/* There's a call back.  Simply submit the command. */
822		req->tw_cli_callback = callback;
823		if ((error = tw_cli_submit_cmd(req)))
824			goto out;
825	}
826	return(0);
827
828out:
829	tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
830		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
831		0x1101, 0x1, TW_CL_SEVERITY_ERROR_STRING,
832		"get_param failed",
833		"error = %d", error);
834	if (param)
835		ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
836	if (req)
837		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
838	return(1);
839}
840
841
842
843/*
844 * Function name:	tw_cli_set_param
845 * Description:		Set a firmware parameter.
846 *
847 * Input:		ctlr		-- ptr to per ctlr structure
848 *			table_id	-- parameter table #
849 *			param_id	-- index of the parameter in the table
850 *			param_size	-- size of the parameter in bytes
851 *			callback	-- ptr to function, if any, to be called
852 *					back on completion; TW_CL_NULL if no callback.
853 * Output:		None
854 * Return value:	0	-- success
855 *			non-zero-- failure
856 */
857TW_INT32
858tw_cli_set_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
859	TW_INT32 param_id, TW_INT32 param_size, TW_VOID *data,
860	TW_VOID (* callback)(struct tw_cli_req_context *req))
861{
862	struct tw_cli_req_context	*req;
863	union tw_cl_command_7k		*cmd;
864	struct tw_cl_param_9k		*param = TW_CL_NULL;
865	TW_INT32			error = TW_OSL_EBUSY;
866
867	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
868
869	/* Get a request packet. */
870	if ((req = tw_cli_get_request(ctlr
871		)) == TW_CL_NULL)
872		goto out;
873
874	/* Make sure this is the only CL internal request at this time. */
875	if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY) {
876		error = TW_OSL_EBUSY;
877		goto out;
878	}
879	ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
880	req->data = ctlr->internal_req_data;
881	req->data_phys = ctlr->internal_req_data_phys;
882	req->length = TW_CLI_SECTOR_SIZE;
883	req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
884
885	/* Initialize memory to send data using. */
886	param = (struct tw_cl_param_9k *)(req->data);
887	tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
888
889	/* Build the cmd pkt. */
890	cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
891
892	req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
893
894	cmd->param.sgl_off__opcode =
895		BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_SET_PARAM);
896	cmd->param.request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
897	cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
898	cmd->param.param_count = TW_CL_SWAP16(1);
899
900	if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
901		((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
902			TW_CL_SWAP64(req->data_phys);
903		((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
904			TW_CL_SWAP32(req->length);
905		cmd->param.size = 2 + 3;
906	} else {
907		((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
908			TW_CL_SWAP32(req->data_phys);
909		((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
910			TW_CL_SWAP32(req->length);
911		cmd->param.size = 2 + 2;
912	}
913
914	/* Specify which parameter we want to set. */
915	param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
916	param->parameter_id = (TW_UINT8)(param_id);
917	param->parameter_size_bytes = TW_CL_SWAP16(param_size);
918	tw_osl_memcpy(param->data, data, param_size);
919
920	/* Submit the command. */
921	if (callback == TW_CL_NULL) {
922		/* There's no call back;  wait till the command completes. */
923		error = tw_cli_submit_and_poll_request(req,
924			TW_CLI_REQUEST_TIMEOUT_PERIOD);
925		if (error == TW_OSL_ETIMEDOUT)
926			/* Clean-up done by tw_cli_submit_and_poll_request. */
927			return(error);
928		if (error)
929			goto out;
930		if ((error = cmd->param.status)) {
931			tw_cli_create_ctlr_event(ctlr,
932				TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
933				&(req->cmd_pkt->cmd_hdr));
934			goto out;
935		}
936		ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
937		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
938	} else {
939		/* There's a call back.  Simply submit the command. */
940		req->tw_cli_callback = callback;
941		if ((error = tw_cli_submit_cmd(req)))
942			goto out;
943	}
944	return(error);
945
946out:
947	tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
948		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
949		0x1102, 0x1, TW_CL_SEVERITY_ERROR_STRING,
950		"set_param failed",
951		"error = %d", error);
952	if (param)
953		ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
954	if (req)
955		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
956	return(error);
957}
958
959
960
961/*
962 * Function name:	tw_cli_submit_and_poll_request
963 * Description:		Sends down a firmware cmd, and waits for the completion
964 *			in a tight loop.
965 *
966 * Input:		req	-- ptr to request pkt
967 *			timeout -- max # of seconds to wait before giving up
968 * Output:		None
969 * Return value:	0	-- success
970 *			non-zero-- failure
971 */
972TW_INT32
973tw_cli_submit_and_poll_request(struct tw_cli_req_context *req,
974	TW_UINT32 timeout)
975{
976	struct tw_cli_ctlr_context	*ctlr = req->ctlr;
977	TW_TIME				end_time;
978	TW_INT32			error;
979
980	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
981
982	/*
983	 * If the cmd queue is full, tw_cli_submit_cmd will queue this
984	 * request in the pending queue, since this is an internal request.
985	 */
986	if ((error = tw_cli_submit_cmd(req))) {
987		tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
988			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
989			0x1103, 0x1, TW_CL_SEVERITY_ERROR_STRING,
990			"Failed to start internal request",
991			"error = %d", error);
992		return(error);
993	}
994
995	/*
996	 * Poll for the response until the command gets completed, or there's
997	 * a timeout.
998	 */
999	end_time = tw_osl_get_local_time() + timeout;
1000	do {
1001		if ((error = req->error_code))
1002			/*
1003			 * This will take care of completion due to a reset,
1004			 * or a failure in tw_cli_submit_pending_queue.
1005			 * The caller should do the clean-up.
1006			 */
1007			return(error);
1008
1009		/* See if the command completed. */
1010		tw_cli_process_resp_intr(ctlr);
1011
1012		if ((req->state != TW_CLI_REQ_STATE_BUSY) &&
1013			(req->state != TW_CLI_REQ_STATE_PENDING))
1014			return(req->state != TW_CLI_REQ_STATE_COMPLETE);
1015	} while (tw_osl_get_local_time() <= end_time);
1016
1017	/* Time out! */
1018	tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
1019		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1020		0x1104, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1021		"Internal request timed out",
1022		"request = %p", req);
1023
1024	/*
1025	 * We will reset the controller only if the request has already been
1026	 * submitted, so as to not lose the request packet.  If a busy request
1027	 * timed out, the reset will take care of freeing resources.  If a
1028	 * pending request timed out, we will free resources for that request,
1029	 * right here, thereby avoiding a reset.  So, the caller is expected
1030	 * to NOT cleanup when TW_OSL_ETIMEDOUT is returned.
1031	 */
1032
1033	/*
1034	 * We have to make sure that this timed out request, if it were in the
1035	 * pending queue, doesn't get submitted while we are here, from
1036	 * tw_cli_submit_pending_queue.  There could be a race in that case.
1037	 * Need to revisit.
1038	 */
1039	if (req->state != TW_CLI_REQ_STATE_PENDING)
1040		tw_cl_reset_ctlr(ctlr->ctlr_handle);
1041	else {
1042		tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(),
1043			"Removing request from pending queue");
1044		/*
1045		 * Request was never submitted.  Clean up.  Note that we did
1046		 * not do a reset.  So, we have to remove the request ourselves
1047		 * from the pending queue (as against tw_cli_drain_pendinq_queue
1048		 * taking care of it).
1049		 */
1050		tw_cli_req_q_remove_item(req, TW_CLI_PENDING_Q);
1051		if (req->data)
1052			ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
1053		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1054	}
1055
1056	return(TW_OSL_ETIMEDOUT);
1057}
1058
1059
1060
1061/*
1062 * Function name:	tw_cl_reset_ctlr
1063 * Description:		Soft resets and then initializes the controller;
1064 *			drains any incomplete requests.
1065 *
1066 * Input:		ctlr	-- ptr to per ctlr structure
1067 * Output:		None
1068 * Return value:	0	-- success
1069 *			non-zero-- failure
1070 */
1071TW_INT32
1072tw_cl_reset_ctlr(struct tw_cl_ctlr_handle *ctlr_handle)
1073{
1074	struct tw_cli_ctlr_context	*ctlr =
1075		(struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1076	TW_INT32			reset_attempt = 1;
1077	TW_INT32			error;
1078
1079	tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "entered");
1080
1081	ctlr->state |= TW_CLI_CTLR_STATE_RESET_IN_PROGRESS;
1082
1083	/*
1084	 * Error back all requests in the complete, busy, and pending queues.
1085	 * If any request is already on its way to getting submitted, it's in
1086	 * none of these queues and so, will not be completed.  That request
1087	 * will continue its course and get submitted to the controller after
1088	 * the reset is done (and io_lock is released).
1089	 */
1090	tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
1091		"Draining all queues following reset");
1092	tw_cli_drain_complete_queue(ctlr);
1093	tw_cli_drain_busy_queue(ctlr);
1094	tw_cli_drain_pending_queue(ctlr);
1095
1096	tw_cli_disable_interrupts(ctlr);
1097
1098	/* Soft reset the controller. */
1099try_reset:
1100	if ((error = tw_cli_soft_reset(ctlr))) {
1101		tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1102			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1103			0x1105, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1104			"Controller reset failed",
1105			"error = %d; attempt %d", error, reset_attempt++);
1106		if (reset_attempt <= TW_CLI_MAX_RESET_ATTEMPTS)
1107			goto try_reset;
1108		else
1109			goto out;
1110	}
1111
1112	/* Re-establish logical connection with the controller. */
1113	if ((error = tw_cli_init_connection(ctlr,
1114			(TW_UINT16)(ctlr->max_simult_reqs),
1115			0, 0, 0, 0, 0, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL,
1116			TW_CL_NULL, TW_CL_NULL))) {
1117		tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1118			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1119			0x1106, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1120			"Can't initialize connection after reset",
1121			"error = %d", error);
1122		goto out;
1123	}
1124
1125	tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1126		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1127		0x1107, 0x3, TW_CL_SEVERITY_INFO_STRING,
1128		"Controller reset done!",
1129		" ");
1130
1131out:
1132	ctlr->state &= ~TW_CLI_CTLR_STATE_RESET_IN_PROGRESS;
1133	/*
1134	 * Enable interrupts, and also clear attention and response interrupts.
1135	 */
1136	tw_cli_enable_interrupts(ctlr);
1137
1138	/* Request for a bus re-scan. */
1139	if (!error)
1140		tw_osl_scan_bus(ctlr_handle);
1141	return(error);
1142}
1143
1144
1145
1146/*
1147 * Function name:	tw_cli_soft_reset
1148 * Description:		Does the actual soft reset.
1149 *
1150 * Input:		ctlr	-- ptr to per ctlr structure
1151 * Output:		None
1152 * Return value:	0	-- success
1153 *			non-zero-- failure
1154 */
1155TW_INT32
1156tw_cli_soft_reset(struct tw_cli_ctlr_context *ctlr)
1157{
1158	struct tw_cl_ctlr_handle	*ctlr_handle = ctlr->ctlr_handle;
1159	TW_UINT32			status_reg;
1160	TW_UINT32			error;
1161
1162	tw_cli_dbg_printf(1, ctlr_handle, tw_osl_cur_func(), "entered");
1163
1164	tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1165		TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1166		0x1108, 0x3, TW_CL_SEVERITY_INFO_STRING,
1167		"Resetting controller...",
1168		" ");
1169
1170	/* Don't let any new commands get submitted to the controller. */
1171	tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
1172
1173	TW_CLI_SOFT_RESET(ctlr_handle);
1174
1175	if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_X) ||
1176	    (ctlr->device_id == TW_CL_DEVICE_ID_9K_E)) {
1177		/*
1178		 * There's a hardware bug in the G133 ASIC, which can lead to
1179		 * PCI parity errors and hangs, if the host accesses any
1180		 * registers when the firmware is resetting the hardware, as
1181		 * part of a hard/soft reset.  The window of time when the
1182		 * problem can occur is about 10 ms.  Here, we will handshake
1183		 * with the firmware to find out when the firmware is pulling
1184		 * down the hardware reset pin, and wait for about 500 ms to
1185		 * make sure we don't access any hardware registers (for
1186		 * polling) during that window.
1187		 */
1188		ctlr->state |= TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS;
1189		while (tw_cli_find_response(ctlr,
1190			TWA_RESET_PHASE1_NOTIFICATION_RESPONSE) != TW_OSL_ESUCCESS)
1191			tw_osl_delay(10);
1192		tw_osl_delay(TWA_RESET_PHASE1_WAIT_TIME_MS * 1000);
1193		ctlr->state &= ~TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS;
1194	}
1195
1196	if ((error = tw_cli_poll_status(ctlr,
1197			TWA_STATUS_MICROCONTROLLER_READY |
1198			TWA_STATUS_ATTENTION_INTERRUPT,
1199			TW_CLI_RESET_TIMEOUT_PERIOD))) {
1200		tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1201			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1202			0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1203			"Micro-ctlr not ready/No attn intr after reset",
1204			"error = %d", error);
1205		tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1206		return(error);
1207	}
1208
1209	TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
1210		TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
1211
1212	if ((error = tw_cli_drain_response_queue(ctlr))) {
1213		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1214			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1215			0x110A, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1216			"Can't drain response queue after reset",
1217			"error = %d", error);
1218		tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1219		return(error);
1220	}
1221
1222	tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1223
1224	if ((error = tw_cli_drain_aen_queue(ctlr))) {
1225		tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1226			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1227			0x110B, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1228			"Can't drain AEN queue after reset",
1229			"error = %d", error);
1230		return(error);
1231	}
1232
1233	if ((error = tw_cli_find_aen(ctlr, TWA_AEN_SOFT_RESET))) {
1234		tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1235			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1236			0x110C, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1237			"Reset not reported by controller",
1238			"error = %d", error);
1239		return(error);
1240	}
1241
1242	status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
1243
1244	if ((error = TW_CLI_STATUS_ERRORS(status_reg)) ||
1245			(error = tw_cli_check_ctlr_state(ctlr, status_reg))) {
1246		tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1247			TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1248			0x110D, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1249			"Controller errors detected after reset",
1250			"error = %d", error);
1251		return(error);
1252	}
1253
1254	return(TW_OSL_ESUCCESS);
1255}
1256
1257
1258
1259/*
1260 * Function name:	tw_cli_send_scsi_cmd
1261 * Description:		Sends down a scsi cmd to fw.
1262 *
1263 * Input:		req	-- ptr to request pkt
1264 *			cmd	-- opcode of scsi cmd to send
1265 * Output:		None
1266 * Return value:	0	-- success
1267 *			non-zero-- failure
1268 */
1269TW_INT32
1270tw_cli_send_scsi_cmd(struct tw_cli_req_context *req, TW_INT32 cmd)
1271{
1272	struct tw_cl_command_packet	*cmdpkt;
1273	struct tw_cl_command_9k		*cmd9k;
1274	struct tw_cli_ctlr_context	*ctlr;
1275	TW_INT32			error;
1276
1277	ctlr = req->ctlr;
1278	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1279
1280	/* Make sure this is the only CL internal request at this time. */
1281	if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY)
1282		return(TW_OSL_EBUSY);
1283	ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
1284	req->data = ctlr->internal_req_data;
1285	req->data_phys = ctlr->internal_req_data_phys;
1286	tw_osl_memzero(req->data, TW_CLI_SECTOR_SIZE);
1287	req->length = TW_CLI_SECTOR_SIZE;
1288
1289	/* Build the cmd pkt. */
1290	cmdpkt = req->cmd_pkt;
1291
1292	cmdpkt->cmd_hdr.header_desc.size_header = 128;
1293
1294	cmd9k = &(cmdpkt->command.cmd_pkt_9k);
1295
1296	cmd9k->res__opcode =
1297		BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
1298	cmd9k->unit = 0;
1299	cmd9k->lun_l4__req_id = TW_CL_SWAP16(req->request_id);
1300	cmd9k->status = 0;
1301	cmd9k->sgl_offset = 16; /* offset from end of hdr = max cdb len */
1302	cmd9k->lun_h4__sgl_entries = TW_CL_SWAP16(1);
1303
1304	if (req->ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1305		((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].address =
1306			TW_CL_SWAP64(req->data_phys);
1307		((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].length =
1308			TW_CL_SWAP32(req->length);
1309	} else {
1310		((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].address =
1311			TW_CL_SWAP32(req->data_phys);
1312		((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].length =
1313			TW_CL_SWAP32(req->length);
1314	}
1315
1316	cmd9k->cdb[0] = (TW_UINT8)cmd;
1317	cmd9k->cdb[4] = 128;
1318
1319	if ((error = tw_cli_submit_cmd(req)))
1320		if (error != TW_OSL_EBUSY) {
1321			tw_cli_dbg_printf(1, ctlr->ctlr_handle,
1322				tw_osl_cur_func(),
1323				"Failed to start SCSI command",
1324				"request = %p, error = %d", req, error);
1325			return(TW_OSL_EIO);
1326		}
1327	return(TW_OSL_ESUCCESS);
1328}
1329
1330
1331
1332/*
1333 * Function name:	tw_cli_get_aen
1334 * Description:		Sends down a Request Sense cmd to fw to fetch an AEN.
1335 *
1336 * Input:		ctlr	-- ptr to per ctlr structure
1337 * Output:		None
1338 * Return value:	0	-- success
1339 *			non-zero-- failure
1340 */
1341TW_INT32
1342tw_cli_get_aen(struct tw_cli_ctlr_context *ctlr)
1343{
1344	struct tw_cli_req_context	*req;
1345	TW_INT32			error;
1346
1347	tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1348
1349	if ((req = tw_cli_get_request(ctlr
1350		)) == TW_CL_NULL)
1351		return(TW_OSL_EBUSY);
1352
1353	req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
1354	req->flags |= TW_CLI_REQ_FLAGS_9K;
1355	req->tw_cli_callback = tw_cli_aen_callback;
1356	if ((error = tw_cli_send_scsi_cmd(req, 0x03 /* REQUEST_SENSE */))) {
1357		tw_cli_dbg_printf(1, ctlr->ctlr_handle, tw_osl_cur_func(),
1358			"Could not send SCSI command",
1359			"request = %p, error = %d", req, error);
1360		if (req->data)
1361			ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
1362		tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1363	}
1364	return(error);
1365}
1366
1367
1368
1369/*
1370 * Function name:	tw_cli_fill_sg_list
1371 * Description:		Fills in the scatter/gather list.
1372 *
1373 * Input:		ctlr	-- ptr to per ctlr structure
1374 *			sgl_src	-- ptr to fill the sg list from
1375 *			sgl_dest-- ptr to sg list
1376 *			nsegments--# of segments
1377 * Output:		None
1378 * Return value:	None
1379 */
1380TW_VOID
1381tw_cli_fill_sg_list(struct tw_cli_ctlr_context *ctlr, TW_VOID *sgl_src,
1382	TW_VOID *sgl_dest, TW_INT32 num_sgl_entries)
1383{
1384	TW_INT32	i;
1385
1386	tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1387
1388	if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1389		struct tw_cl_sg_desc64 *sgl_s =
1390			(struct tw_cl_sg_desc64 *)sgl_src;
1391		struct tw_cl_sg_desc64 *sgl_d =
1392			(struct tw_cl_sg_desc64 *)sgl_dest;
1393
1394		tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1395			"64 bit addresses");
1396		for (i = 0; i < num_sgl_entries; i++) {
1397			sgl_d[i].address = TW_CL_SWAP64(sgl_s->address);
1398			sgl_d[i].length = TW_CL_SWAP32(sgl_s->length);
1399			sgl_s++;
1400			if (ctlr->flags & TW_CL_64BIT_SG_LENGTH)
1401				sgl_s = (struct tw_cl_sg_desc64 *)
1402					(((TW_INT8 *)(sgl_s)) + 4);
1403		}
1404	} else {
1405		struct tw_cl_sg_desc32 *sgl_s =
1406			(struct tw_cl_sg_desc32 *)sgl_src;
1407		struct tw_cl_sg_desc32 *sgl_d =
1408			(struct tw_cl_sg_desc32 *)sgl_dest;
1409
1410		tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1411			"32 bit addresses");
1412		for (i = 0; i < num_sgl_entries; i++) {
1413			sgl_d[i].address = TW_CL_SWAP32(sgl_s[i].address);
1414			sgl_d[i].length = TW_CL_SWAP32(sgl_s[i].length);
1415		}
1416	}
1417}
1418
1419