Deleted Added
sdiff udiff text old ( 208969 ) new ( 212008 )
full compact
1/*
2 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/dev/twa/tw_cl_io.c 208969 2010-06-09 21:40:38Z delphij $
28 */
29
30/*
31 * AMCC'S 3ware driver for 9000 series storage controllers.
32 *
33 * Author: Vinod Kashyap
34 * Modifications by: Adam Radford
35 * Modifications by: Manjunath Ranganathaiah
36 */
37
38
39/*
40 * Common Layer I/O functions.
41 */
42
43
44#include "tw_osl_share.h"
45#include "tw_cl_share.h"
46#include "tw_cl_fwif.h"
47#include "tw_cl_ioctl.h"
48#include "tw_cl.h"
49#include "tw_cl_externs.h"
50#include "tw_osl_ioctl.h"
51
52#include <cam/cam.h>
53#include <cam/cam_ccb.h>
54#include <cam/cam_xpt_sim.h>
55
56
57
58/*
59 * Function name: tw_cl_start_io
60 * Description: Interface to OS Layer for accepting SCSI requests.
61 *
62 * Input: ctlr_handle -- controller handle
63 * req_pkt -- OSL built request packet
64 * req_handle -- request handle
65 * Output: None
66 * Return value: 0 -- success
67 * non-zero-- failure
68 */
69TW_INT32
70tw_cl_start_io(struct tw_cl_ctlr_handle *ctlr_handle,
71 struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
72{
73 struct tw_cli_ctlr_context *ctlr;
74 struct tw_cli_req_context *req;
75 struct tw_cl_command_9k *cmd;
76 struct tw_cl_scsi_req_packet *scsi_req;
77 TW_INT32 error;
78
79 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
80
81 ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
82
83 if (ctlr->reset_in_progress) {
84 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
85 "I/O during reset: returning busy.");
86 return(TW_OSL_EBUSY);
87 }
88
89 /*
90 * If working with a firmware version that does not support multiple
91 * luns, and this request is directed at a non-zero lun, error it
92 * back right away.
93 */
94 if ((req_pkt->gen_req_pkt.scsi_req.lun) &&
95 (ctlr->working_srl < TWA_MULTI_LUN_FW_SRL)) {
96 req_pkt->status |= (TW_CL_ERR_REQ_INVALID_LUN |
97 TW_CL_ERR_REQ_SCSI_ERROR);
98 req_pkt->tw_osl_callback(req_handle);
99 return(TW_CL_ERR_REQ_SUCCESS);
100 }
101
102 if ((req = tw_cli_get_request(ctlr
103 )) == TW_CL_NULL) {
104 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
105 "Out of request context packets: returning busy");
106 return(TW_OSL_EBUSY);
107 }
108
109 req_handle->cl_req_ctxt = req;
110 req->req_handle = req_handle;
111 req->orig_req = req_pkt;
112 req->tw_cli_callback = tw_cli_complete_io;
113
114 req->flags |= TW_CLI_REQ_FLAGS_EXTERNAL;
115 req->flags |= TW_CLI_REQ_FLAGS_9K;
116
117 scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
118
119 /* Build the cmd pkt. */
120 cmd = &(req->cmd_pkt->command.cmd_pkt_9k);
121
122 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
123
124 cmd->res__opcode = BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
125 cmd->unit = (TW_UINT8)(scsi_req->unit);
126 cmd->lun_l4__req_id = TW_CL_SWAP16(
127 BUILD_LUN_L4__REQ_ID(scsi_req->lun, req->request_id));
128 cmd->status = 0;
129 cmd->sgl_offset = 16; /* offset from end of hdr = max cdb len */
130 tw_osl_memcpy(cmd->cdb, scsi_req->cdb, scsi_req->cdb_len);
131
132 if (req_pkt->flags & TW_CL_REQ_CALLBACK_FOR_SGLIST) {
133 TW_UINT32 num_sgl_entries;
134
135 req_pkt->tw_osl_sgl_callback(req_handle, cmd->sg_list,
136 &num_sgl_entries);
137 cmd->lun_h4__sgl_entries =
138 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
139 num_sgl_entries));
140 } else {
141 cmd->lun_h4__sgl_entries =
142 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
143 scsi_req->sgl_entries));
144 tw_cli_fill_sg_list(ctlr, scsi_req->sg_list,
145 cmd->sg_list, scsi_req->sgl_entries);
146 }
147
148 if ((error = tw_cli_submit_cmd(req))) {
149 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
150 "Could not start request. request = %p, error = %d",
151 req, error);
152 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
153 }
154 return(error);
155}
156
157
158
159/*
160 * Function name: tw_cli_submit_cmd
161 * Description: Submits a cmd to firmware.
162 *
163 * Input: req -- ptr to CL internal request context
164 * Output: None
165 * Return value: 0 -- success
166 * non-zero-- failure
167 */
168TW_INT32
169tw_cli_submit_cmd(struct tw_cli_req_context *req)
170{
171 struct tw_cli_ctlr_context *ctlr = req->ctlr;
172 struct tw_cl_ctlr_handle *ctlr_handle = ctlr->ctlr_handle;
173 TW_UINT32 status_reg;
174 TW_INT32 error;
175
176 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
177
178 /* Serialize access to the controller cmd queue. */
179 tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
180
181 /* For 9650SE first write low 4 bytes */
182 if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
183 (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA))
184 tw_osl_write_reg(ctlr_handle,
185 TWA_COMMAND_QUEUE_OFFSET_LOW,
186 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
187
188 /* Check to see if we can post a command. */
189 status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
190 if ((error = tw_cli_check_ctlr_state(ctlr, status_reg)))
191 goto out;
192
193 if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
194 struct tw_cl_req_packet *req_pkt =
195 (struct tw_cl_req_packet *)(req->orig_req);
196
197 tw_cli_dbg_printf(7, ctlr_handle, tw_osl_cur_func(),
198 "Cmd queue full");
199
200 if ((req->flags & TW_CLI_REQ_FLAGS_INTERNAL)
201 || ((req_pkt) &&
202 (req_pkt->flags & TW_CL_REQ_RETRY_ON_BUSY))
203 ) {
204 if (req->state != TW_CLI_REQ_STATE_PENDING) {
205 tw_cli_dbg_printf(2, ctlr_handle,
206 tw_osl_cur_func(),
207 "pending internal/ioctl request");
208 req->state = TW_CLI_REQ_STATE_PENDING;
209 tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
210 error = 0;
211 /* Unmask command interrupt. */
212 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
213 TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
214 } else
215 error = TW_OSL_EBUSY;
216 } else {
217 tw_osl_ctlr_busy(ctlr_handle, req->req_handle);
218 error = TW_OSL_EBUSY;
219 }
220 } else {
221 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
222 "Submitting command");
223
224 /* Insert command into busy queue */
225 req->state = TW_CLI_REQ_STATE_BUSY;
226 tw_cli_req_q_insert_tail(req, TW_CLI_BUSY_Q);
227
228 if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
229 (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
230 /* Now write the high 4 bytes */
231 tw_osl_write_reg(ctlr_handle,
232 TWA_COMMAND_QUEUE_OFFSET_HIGH,
233 (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
234 } else {
235 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
236 /* First write the low 4 bytes, then the high 4. */
237 tw_osl_write_reg(ctlr_handle,
238 TWA_COMMAND_QUEUE_OFFSET_LOW,
239 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
240 tw_osl_write_reg(ctlr_handle,
241 TWA_COMMAND_QUEUE_OFFSET_HIGH,
242 (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
243 } else
244 tw_osl_write_reg(ctlr_handle,
245 TWA_COMMAND_QUEUE_OFFSET,
246 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
247 }
248 }
249out:
250 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
251
252 return(error);
253}
254
255
256
257/*
258 * Function name: tw_cl_fw_passthru
259 * Description: Interface to OS Layer for accepting firmware
260 * passthru requests.
261 * Input: ctlr_handle -- controller handle
262 * req_pkt -- OSL built request packet
263 * req_handle -- request handle
264 * Output: None
265 * Return value: 0 -- success
266 * non-zero-- failure
267 */
268TW_INT32
269tw_cl_fw_passthru(struct tw_cl_ctlr_handle *ctlr_handle,
270 struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
271{
272 struct tw_cli_ctlr_context *ctlr;
273 struct tw_cli_req_context *req;
274 union tw_cl_command_7k *cmd_7k;
275 struct tw_cl_command_9k *cmd_9k;
276 struct tw_cl_passthru_req_packet *pt_req;
277 TW_UINT8 opcode;
278 TW_UINT8 sgl_offset;
279 TW_VOID *sgl = TW_CL_NULL;
280 TW_INT32 error;
281
282 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
283
284 ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
285
286 if (ctlr->reset_in_progress) {
287 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
288 "Passthru request during reset: returning busy.");
289 return(TW_OSL_EBUSY);
290 }
291
292 if ((req = tw_cli_get_request(ctlr
293 )) == TW_CL_NULL) {
294 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
295 "Out of request context packets: returning busy");
296 return(TW_OSL_EBUSY);
297 }
298
299 req_handle->cl_req_ctxt = req;
300 req->req_handle = req_handle;
301 req->orig_req = req_pkt;
302 req->tw_cli_callback = tw_cli_complete_io;
303
304 req->flags |= (TW_CLI_REQ_FLAGS_EXTERNAL | TW_CLI_REQ_FLAGS_PASSTHRU);
305
306 pt_req = &(req_pkt->gen_req_pkt.pt_req);
307
308 tw_osl_memcpy(req->cmd_pkt, pt_req->cmd_pkt,
309 pt_req->cmd_pkt_length);
310 /* Build the cmd pkt. */
311 if ((opcode = GET_OPCODE(((TW_UINT8 *)
312 (pt_req->cmd_pkt))[sizeof(struct tw_cl_command_header)]))
313 == TWA_FW_CMD_EXECUTE_SCSI) {
314 TW_UINT16 lun_l4, lun_h4;
315
316 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
317 "passthru: 9k cmd pkt");
318 req->flags |= TW_CLI_REQ_FLAGS_9K;
319 cmd_9k = &(req->cmd_pkt->command.cmd_pkt_9k);
320 lun_l4 = GET_LUN_L4(cmd_9k->lun_l4__req_id);
321 lun_h4 = GET_LUN_H4(cmd_9k->lun_h4__sgl_entries);
322 cmd_9k->lun_l4__req_id = TW_CL_SWAP16(
323 BUILD_LUN_L4__REQ_ID(lun_l4, req->request_id));
324 if (pt_req->sgl_entries) {
325 cmd_9k->lun_h4__sgl_entries =
326 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(lun_h4,
327 pt_req->sgl_entries));
328 sgl = (TW_VOID *)(cmd_9k->sg_list);
329 }
330 } else {
331 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
332 "passthru: 7k cmd pkt");
333 cmd_7k = &(req->cmd_pkt->command.cmd_pkt_7k);
334 cmd_7k->generic.request_id =
335 (TW_UINT8)(TW_CL_SWAP16(req->request_id));
336 if ((sgl_offset =
337 GET_SGL_OFF(cmd_7k->generic.sgl_off__opcode))) {
338 if (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)
339 sgl = (((TW_UINT32 *)cmd_7k) + cmd_7k->generic.size);
340 else
341 sgl = (((TW_UINT32 *)cmd_7k) + sgl_offset);
342 cmd_7k->generic.size += pt_req->sgl_entries *
343 ((ctlr->flags & TW_CL_64BIT_ADDRESSES) ? 3 : 2);
344 }
345 }
346
347 if (sgl)
348 tw_cli_fill_sg_list(ctlr, pt_req->sg_list,
349 sgl, pt_req->sgl_entries);
350
351 if ((error = tw_cli_submit_cmd(req))) {
352 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
353 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
354 0x1100, 0x1, TW_CL_SEVERITY_ERROR_STRING,
355 "Failed to start passthru command",
356 "error = %d", error);
357 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
358 }
359 return(error);
360}
361
362
363
364/*
365 * Function name: tw_cl_ioctl
366 * Description: Handler of CL supported ioctl cmds.
367 *
368 * Input: ctlr -- ptr to per ctlr structure
369 * cmd -- ioctl cmd
370 * buf -- ptr to buffer in kernel memory, which is
371 * a copy of the input buffer in user-space
372 * Output: buf -- ptr to buffer in kernel memory, which will
373 * need to be copied to the output buffer in
374 * user-space
375 * Return value: 0 -- success
376 * non-zero-- failure
377 */
378TW_INT32
379tw_cl_ioctl(struct tw_cl_ctlr_handle *ctlr_handle, u_long cmd, TW_VOID *buf)
380{
381 struct tw_cli_ctlr_context *ctlr =
382 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
383 struct tw_cl_ioctl_packet *user_buf =
384 (struct tw_cl_ioctl_packet *)buf;
385 struct tw_cl_event_packet event_buf;
386 TW_INT32 event_index;
387 TW_INT32 start_index;
388 TW_INT32 error = TW_OSL_ESUCCESS;
389
390 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
391
392 /* Serialize access to the AEN queue and the ioctl lock. */
393 tw_osl_get_lock(ctlr_handle, ctlr->gen_lock);
394
395 switch (cmd) {
396 case TW_CL_IOCTL_GET_FIRST_EVENT:
397 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
398 "Get First Event");
399
400 if (ctlr->aen_q_wrapped) {
401 if (ctlr->aen_q_overflow) {
402 /*
403 * The aen queue has wrapped, even before some
404 * events have been retrieved. Let the caller
405 * know that he missed out on some AEN's.
406 */
407 user_buf->driver_pkt.status =
408 TW_CL_ERROR_AEN_OVERFLOW;
409 ctlr->aen_q_overflow = TW_CL_FALSE;
410 } else
411 user_buf->driver_pkt.status = 0;
412 event_index = ctlr->aen_head;
413 } else {
414 if (ctlr->aen_head == ctlr->aen_tail) {
415 user_buf->driver_pkt.status =
416 TW_CL_ERROR_AEN_NO_EVENTS;
417 break;
418 }
419 user_buf->driver_pkt.status = 0;
420 event_index = ctlr->aen_tail; /* = 0 */
421 }
422 tw_osl_memcpy(user_buf->data_buf,
423 &(ctlr->aen_queue[event_index]),
424 sizeof(struct tw_cl_event_packet));
425
426 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
427
428 break;
429
430
431 case TW_CL_IOCTL_GET_LAST_EVENT:
432 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
433 "Get Last Event");
434
435 if (ctlr->aen_q_wrapped) {
436 if (ctlr->aen_q_overflow) {
437 /*
438 * The aen queue has wrapped, even before some
439 * events have been retrieved. Let the caller
440 * know that he missed out on some AEN's.
441 */
442 user_buf->driver_pkt.status =
443 TW_CL_ERROR_AEN_OVERFLOW;
444 ctlr->aen_q_overflow = TW_CL_FALSE;
445 } else
446 user_buf->driver_pkt.status = 0;
447 } else {
448 if (ctlr->aen_head == ctlr->aen_tail) {
449 user_buf->driver_pkt.status =
450 TW_CL_ERROR_AEN_NO_EVENTS;
451 break;
452 }
453 user_buf->driver_pkt.status = 0;
454 }
455 event_index = (ctlr->aen_head - 1 + ctlr->max_aens_supported) %
456 ctlr->max_aens_supported;
457
458 tw_osl_memcpy(user_buf->data_buf,
459 &(ctlr->aen_queue[event_index]),
460 sizeof(struct tw_cl_event_packet));
461
462 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
463
464 break;
465
466
467 case TW_CL_IOCTL_GET_NEXT_EVENT:
468 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
469 "Get Next Event");
470
471 user_buf->driver_pkt.status = 0;
472 if (ctlr->aen_q_wrapped) {
473 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
474 "Get Next Event: wrapped");
475 if (ctlr->aen_q_overflow) {
476 /*
477 * The aen queue has wrapped, even before some
478 * events have been retrieved. Let the caller
479 * know that he missed out on some AEN's.
480 */
481 tw_cli_dbg_printf(2, ctlr_handle,
482 tw_osl_cur_func(),
483 "Get Next Event: overflow");
484 user_buf->driver_pkt.status =
485 TW_CL_ERROR_AEN_OVERFLOW;
486 ctlr->aen_q_overflow = TW_CL_FALSE;
487 }
488 start_index = ctlr->aen_head;
489 } else {
490 if (ctlr->aen_head == ctlr->aen_tail) {
491 tw_cli_dbg_printf(3, ctlr_handle,
492 tw_osl_cur_func(),
493 "Get Next Event: empty queue");
494 user_buf->driver_pkt.status =
495 TW_CL_ERROR_AEN_NO_EVENTS;
496 break;
497 }
498 start_index = ctlr->aen_tail; /* = 0 */
499 }
500 tw_osl_memcpy(&event_buf, user_buf->data_buf,
501 sizeof(struct tw_cl_event_packet));
502
503 event_index = (start_index + event_buf.sequence_id -
504 ctlr->aen_queue[start_index].sequence_id + 1) %
505 ctlr->max_aens_supported;
506
507 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
508 "Get Next Event: si = %x, ei = %x, ebsi = %x, "
509 "sisi = %x, eisi = %x",
510 start_index, event_index, event_buf.sequence_id,
511 ctlr->aen_queue[start_index].sequence_id,
512 ctlr->aen_queue[event_index].sequence_id);
513
514 if (! (ctlr->aen_queue[event_index].sequence_id >
515 event_buf.sequence_id)) {
516 /*
517 * We don't have any event matching the criterion. So,
518 * we have to report TW_CL_ERROR_NO_EVENTS. If we also
519 * encountered an overflow condition above, we cannot
520 * report both conditions during this call. We choose
521 * to report NO_EVENTS this time, and an overflow the
522 * next time we are called.
523 */
524 if (user_buf->driver_pkt.status ==
525 TW_CL_ERROR_AEN_OVERFLOW) {
526 /*
527 * Make a note so we report the overflow
528 * next time.
529 */
530 ctlr->aen_q_overflow = TW_CL_TRUE;
531 }
532 user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
533 break;
534 }
535 /* Copy the event -- even if there has been an overflow. */
536 tw_osl_memcpy(user_buf->data_buf,
537 &(ctlr->aen_queue[event_index]),
538 sizeof(struct tw_cl_event_packet));
539
540 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
541
542 break;
543
544
545 case TW_CL_IOCTL_GET_PREVIOUS_EVENT:
546 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
547 "Get Previous Event");
548
549 user_buf->driver_pkt.status = 0;
550 if (ctlr->aen_q_wrapped) {
551 if (ctlr->aen_q_overflow) {
552 /*
553 * The aen queue has wrapped, even before some
554 * events have been retrieved. Let the caller
555 * know that he missed out on some AEN's.
556 */
557 user_buf->driver_pkt.status =
558 TW_CL_ERROR_AEN_OVERFLOW;
559 ctlr->aen_q_overflow = TW_CL_FALSE;
560 }
561 start_index = ctlr->aen_head;
562 } else {
563 if (ctlr->aen_head == ctlr->aen_tail) {
564 user_buf->driver_pkt.status =
565 TW_CL_ERROR_AEN_NO_EVENTS;
566 break;
567 }
568 start_index = ctlr->aen_tail; /* = 0 */
569 }
570 tw_osl_memcpy(&event_buf, user_buf->data_buf,
571 sizeof(struct tw_cl_event_packet));
572
573 event_index = (start_index + event_buf.sequence_id -
574 ctlr->aen_queue[start_index].sequence_id - 1) %
575 ctlr->max_aens_supported;
576
577 if (! (ctlr->aen_queue[event_index].sequence_id <
578 event_buf.sequence_id)) {
579 /*
580 * We don't have any event matching the criterion. So,
581 * we have to report TW_CL_ERROR_NO_EVENTS. If we also
582 * encountered an overflow condition above, we cannot
583 * report both conditions during this call. We choose
584 * to report NO_EVENTS this time, and an overflow the
585 * next time we are called.
586 */
587 if (user_buf->driver_pkt.status ==
588 TW_CL_ERROR_AEN_OVERFLOW) {
589 /*
590 * Make a note so we report the overflow
591 * next time.
592 */
593 ctlr->aen_q_overflow = TW_CL_TRUE;
594 }
595 user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
596 break;
597 }
598 /* Copy the event -- even if there has been an overflow. */
599 tw_osl_memcpy(user_buf->data_buf,
600 &(ctlr->aen_queue[event_index]),
601 sizeof(struct tw_cl_event_packet));
602
603 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
604
605 break;
606
607
608 case TW_CL_IOCTL_GET_LOCK:
609 {
610 struct tw_cl_lock_packet lock_pkt;
611 TW_TIME cur_time;
612
613 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
614 "Get ioctl lock");
615
616 cur_time = tw_osl_get_local_time();
617 tw_osl_memcpy(&lock_pkt, user_buf->data_buf,
618 sizeof(struct tw_cl_lock_packet));
619
620 if ((ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) ||
621 (lock_pkt.force_flag) ||
622 (cur_time >= ctlr->ioctl_lock.timeout)) {
623 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
624 "GET_LOCK: Getting lock!");
625 ctlr->ioctl_lock.lock = TW_CLI_LOCK_HELD;
626 ctlr->ioctl_lock.timeout =
627 cur_time + (lock_pkt.timeout_msec / 1000);
628 lock_pkt.time_remaining_msec = lock_pkt.timeout_msec;
629 user_buf->driver_pkt.status = 0;
630 } else {
631 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
632 "GET_LOCK: Lock already held!");
633 lock_pkt.time_remaining_msec = (TW_UINT32)(
634 (ctlr->ioctl_lock.timeout - cur_time) * 1000);
635 user_buf->driver_pkt.status =
636 TW_CL_ERROR_IOCTL_LOCK_ALREADY_HELD;
637 }
638 tw_osl_memcpy(user_buf->data_buf, &lock_pkt,
639 sizeof(struct tw_cl_lock_packet));
640 break;
641 }
642
643
644 case TW_CL_IOCTL_RELEASE_LOCK:
645 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
646 "Release ioctl lock");
647
648 if (ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) {
649 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
650 "twa_ioctl: RELEASE_LOCK: Lock not held!");
651 user_buf->driver_pkt.status =
652 TW_CL_ERROR_IOCTL_LOCK_NOT_HELD;
653 } else {
654 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
655 "RELEASE_LOCK: Releasing lock!");
656 ctlr->ioctl_lock.lock = TW_CLI_LOCK_FREE;
657 user_buf->driver_pkt.status = 0;
658 }
659 break;
660
661
662 case TW_CL_IOCTL_GET_COMPATIBILITY_INFO:
663 {
664 struct tw_cl_compatibility_packet comp_pkt;
665
666 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
667 "Get compatibility info");
668
669 tw_osl_memcpy(comp_pkt.driver_version,
670 TW_OSL_DRIVER_VERSION_STRING,
671 sizeof(TW_OSL_DRIVER_VERSION_STRING));
672 comp_pkt.working_srl = ctlr->working_srl;
673 comp_pkt.working_branch = ctlr->working_branch;
674 comp_pkt.working_build = ctlr->working_build;
675 comp_pkt.driver_srl_high = TWA_CURRENT_FW_SRL;
676 comp_pkt.driver_branch_high =
677 TWA_CURRENT_FW_BRANCH(ctlr->arch_id);
678 comp_pkt.driver_build_high =
679 TWA_CURRENT_FW_BUILD(ctlr->arch_id);
680 comp_pkt.driver_srl_low = TWA_BASE_FW_SRL;
681 comp_pkt.driver_branch_low = TWA_BASE_FW_BRANCH;
682 comp_pkt.driver_build_low = TWA_BASE_FW_BUILD;
683 comp_pkt.fw_on_ctlr_srl = ctlr->fw_on_ctlr_srl;
684 comp_pkt.fw_on_ctlr_branch = ctlr->fw_on_ctlr_branch;
685 comp_pkt.fw_on_ctlr_build = ctlr->fw_on_ctlr_build;
686 user_buf->driver_pkt.status = 0;
687
688 /* Copy compatibility information to user space. */
689 tw_osl_memcpy(user_buf->data_buf, &comp_pkt,
690 (sizeof(struct tw_cl_compatibility_packet) <
691 user_buf->driver_pkt.buffer_length) ?
692 sizeof(struct tw_cl_compatibility_packet) :
693 user_buf->driver_pkt.buffer_length);
694 break;
695 }
696
697 default:
698 /* Unknown opcode. */
699 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
700 "Unknown ioctl cmd 0x%x", cmd);
701 error = TW_OSL_ENOTTY;
702 }
703
704 tw_osl_free_lock(ctlr_handle, ctlr->gen_lock);
705 return(error);
706}
707
708
709
710/*
711 * Function name: tw_cli_get_param
712 * Description: Get a firmware parameter.
713 *
714 * Input: ctlr -- ptr to per ctlr structure
715 * table_id -- parameter table #
716 * param_id -- index of the parameter in the table
717 * param_size -- size of the parameter in bytes
718 * callback -- ptr to function, if any, to be called
719 * back on completion; TW_CL_NULL if no callback.
720 * Output: param_data -- param value
721 * Return value: 0 -- success
722 * non-zero-- failure
723 */
724TW_INT32
725tw_cli_get_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
726 TW_INT32 param_id, TW_VOID *param_data, TW_INT32 param_size,
727 TW_VOID (* callback)(struct tw_cli_req_context *req))
728{
729 struct tw_cli_req_context *req;
730 union tw_cl_command_7k *cmd;
731 struct tw_cl_param_9k *param = TW_CL_NULL;
732 TW_INT32 error = TW_OSL_EBUSY;
733
734 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
735
736 /* Get a request packet. */
737 if ((req = tw_cli_get_request(ctlr
738 )) == TW_CL_NULL)
739 goto out;
740
741 /* Make sure this is the only CL internal request at this time. */
742 if (ctlr->internal_req_busy) {
743 error = TW_OSL_EBUSY;
744 goto out;
745 }
746 ctlr->internal_req_busy = TW_CL_TRUE;
747 req->data = ctlr->internal_req_data;
748 req->data_phys = ctlr->internal_req_data_phys;
749 req->length = TW_CLI_SECTOR_SIZE;
750 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
751
752 /* Initialize memory to read data into. */
753 param = (struct tw_cl_param_9k *)(req->data);
754 tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
755
756 /* Build the cmd pkt. */
757 cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
758
759 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
760
761 cmd->param.sgl_off__opcode =
762 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_GET_PARAM);
763 cmd->param.request_id =
764 (TW_UINT8)(TW_CL_SWAP16(req->request_id));
765 cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
766 cmd->param.param_count = TW_CL_SWAP16(1);
767
768 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
769 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
770 TW_CL_SWAP64(req->data_phys);
771 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
772 TW_CL_SWAP32(req->length);
773 cmd->param.size = 2 + 3;
774 } else {
775 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
776 TW_CL_SWAP32(req->data_phys);
777 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
778 TW_CL_SWAP32(req->length);
779 cmd->param.size = 2 + 2;
780 }
781
782 /* Specify which parameter we need. */
783 param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
784 param->parameter_id = (TW_UINT8)(param_id);
785 param->parameter_size_bytes = TW_CL_SWAP16(param_size);
786
787 /* Submit the command. */
788 if (callback == TW_CL_NULL) {
789 /* There's no call back; wait till the command completes. */
790 error = tw_cli_submit_and_poll_request(req,
791 TW_CLI_REQUEST_TIMEOUT_PERIOD);
792 if (error == TW_OSL_ETIMEDOUT)
793 /* Clean-up done by tw_cli_submit_and_poll_request. */
794 return(error);
795 if (error)
796 goto out;
797 if ((error = cmd->param.status)) {
798 tw_cli_create_ctlr_event(ctlr,
799 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
800 &(req->cmd_pkt->cmd_hdr));
801 goto out;
802 }
803 tw_osl_memcpy(param_data, param->data, param_size);
804 ctlr->internal_req_busy = TW_CL_FALSE;
805 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
806 } else {
807 /* There's a call back. Simply submit the command. */
808 req->tw_cli_callback = callback;
809 if ((error = tw_cli_submit_cmd(req)))
810 goto out;
811 }
812 return(0);
813
814out:
815 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
816 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
817 0x1101, 0x1, TW_CL_SEVERITY_ERROR_STRING,
818 "get_param failed",
819 "error = %d", error);
820 if (param)
821 ctlr->internal_req_busy = TW_CL_FALSE;
822 if (req)
823 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
824 return(1);
825}
826
827
828
829/*
830 * Function name: tw_cli_set_param
831 * Description: Set a firmware parameter.
832 *
833 * Input: ctlr -- ptr to per ctlr structure
834 * table_id -- parameter table #
835 * param_id -- index of the parameter in the table
836 * param_size -- size of the parameter in bytes
837 * callback -- ptr to function, if any, to be called
838 * back on completion; TW_CL_NULL if no callback.
839 * Output: None
840 * Return value: 0 -- success
841 * non-zero-- failure
842 */
843TW_INT32
844tw_cli_set_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
845 TW_INT32 param_id, TW_INT32 param_size, TW_VOID *data,
846 TW_VOID (* callback)(struct tw_cli_req_context *req))
847{
848 struct tw_cli_req_context *req;
849 union tw_cl_command_7k *cmd;
850 struct tw_cl_param_9k *param = TW_CL_NULL;
851 TW_INT32 error = TW_OSL_EBUSY;
852
853 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
854
855 /* Get a request packet. */
856 if ((req = tw_cli_get_request(ctlr
857 )) == TW_CL_NULL)
858 goto out;
859
860 /* Make sure this is the only CL internal request at this time. */
861 if (ctlr->internal_req_busy) {
862 error = TW_OSL_EBUSY;
863 goto out;
864 }
865 ctlr->internal_req_busy = TW_CL_TRUE;
866 req->data = ctlr->internal_req_data;
867 req->data_phys = ctlr->internal_req_data_phys;
868 req->length = TW_CLI_SECTOR_SIZE;
869 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
870
871 /* Initialize memory to send data using. */
872 param = (struct tw_cl_param_9k *)(req->data);
873 tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
874
875 /* Build the cmd pkt. */
876 cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
877
878 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
879
880 cmd->param.sgl_off__opcode =
881 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_SET_PARAM);
882 cmd->param.request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
883 cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
884 cmd->param.param_count = TW_CL_SWAP16(1);
885
886 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
887 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
888 TW_CL_SWAP64(req->data_phys);
889 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
890 TW_CL_SWAP32(req->length);
891 cmd->param.size = 2 + 3;
892 } else {
893 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
894 TW_CL_SWAP32(req->data_phys);
895 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
896 TW_CL_SWAP32(req->length);
897 cmd->param.size = 2 + 2;
898 }
899
900 /* Specify which parameter we want to set. */
901 param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
902 param->parameter_id = (TW_UINT8)(param_id);
903 param->parameter_size_bytes = TW_CL_SWAP16(param_size);
904 tw_osl_memcpy(param->data, data, param_size);
905
906 /* Submit the command. */
907 if (callback == TW_CL_NULL) {
908 /* There's no call back; wait till the command completes. */
909 error = tw_cli_submit_and_poll_request(req,
910 TW_CLI_REQUEST_TIMEOUT_PERIOD);
911 if (error == TW_OSL_ETIMEDOUT)
912 /* Clean-up done by tw_cli_submit_and_poll_request. */
913 return(error);
914 if (error)
915 goto out;
916 if ((error = cmd->param.status)) {
917 tw_cli_create_ctlr_event(ctlr,
918 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
919 &(req->cmd_pkt->cmd_hdr));
920 goto out;
921 }
922 ctlr->internal_req_busy = TW_CL_FALSE;
923 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
924 } else {
925 /* There's a call back. Simply submit the command. */
926 req->tw_cli_callback = callback;
927 if ((error = tw_cli_submit_cmd(req)))
928 goto out;
929 }
930 return(error);
931
932out:
933 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
934 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
935 0x1102, 0x1, TW_CL_SEVERITY_ERROR_STRING,
936 "set_param failed",
937 "error = %d", error);
938 if (param)
939 ctlr->internal_req_busy = TW_CL_FALSE;
940 if (req)
941 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
942 return(error);
943}
944
945
946
947/*
948 * Function name: tw_cli_submit_and_poll_request
949 * Description: Sends down a firmware cmd, and waits for the completion
950 * in a tight loop.
951 *
952 * Input: req -- ptr to request pkt
953 * timeout -- max # of seconds to wait before giving up
954 * Output: None
955 * Return value: 0 -- success
956 * non-zero-- failure
957 */
958TW_INT32
959tw_cli_submit_and_poll_request(struct tw_cli_req_context *req,
960 TW_UINT32 timeout)
961{
962 struct tw_cli_ctlr_context *ctlr = req->ctlr;
963 TW_TIME end_time;
964 TW_INT32 error;
965
966 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
967
968 /*
969 * If the cmd queue is full, tw_cli_submit_cmd will queue this
970 * request in the pending queue, since this is an internal request.
971 */
972 if ((error = tw_cli_submit_cmd(req))) {
973 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
974 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
975 0x1103, 0x1, TW_CL_SEVERITY_ERROR_STRING,
976 "Failed to start internal request",
977 "error = %d", error);
978 return(error);
979 }
980
981 /*
982 * Poll for the response until the command gets completed, or there's
983 * a timeout.
984 */
985 end_time = tw_osl_get_local_time() + timeout;
986 do {
987 if ((error = req->error_code))
988 /*
989 * This will take care of completion due to a reset,
990 * or a failure in tw_cli_submit_pending_queue.
991 * The caller should do the clean-up.
992 */
993 return(error);
994
995 /* See if the command completed. */
996 tw_cli_process_resp_intr(ctlr);
997
998 if ((req->state != TW_CLI_REQ_STATE_BUSY) &&
999 (req->state != TW_CLI_REQ_STATE_PENDING))
1000 return(req->state != TW_CLI_REQ_STATE_COMPLETE);
1001 } while (tw_osl_get_local_time() <= end_time);
1002
1003 /* Time out! */
1004 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
1005 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1006 0x1104, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1007 "Internal request timed out",
1008 "request = %p", req);
1009
1010 /*
1011 * We will reset the controller only if the request has already been
1012 * submitted, so as to not lose the request packet. If a busy request
1013 * timed out, the reset will take care of freeing resources. If a
1014 * pending request timed out, we will free resources for that request,
1015 * right here, thereby avoiding a reset. So, the caller is expected
1016 * to NOT cleanup when TW_OSL_ETIMEDOUT is returned.
1017 */
1018
1019 /*
1020 * We have to make sure that this timed out request, if it were in the
1021 * pending queue, doesn't get submitted while we are here, from
1022 * tw_cli_submit_pending_queue. There could be a race in that case.
1023 * Need to revisit.
1024 */
1025 if (req->state != TW_CLI_REQ_STATE_PENDING)
1026 tw_cl_reset_ctlr(ctlr->ctlr_handle);
1027 else {
1028 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(),
1029 "Removing request from pending queue");
1030 /*
1031 * Request was never submitted. Clean up. Note that we did
1032 * not do a reset. So, we have to remove the request ourselves
1033 * from the pending queue (as against tw_cli_drain_pendinq_queue
1034 * taking care of it).
1035 */
1036 tw_cli_req_q_remove_item(req, TW_CLI_PENDING_Q);
1037 if ((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) == TW_CL_NULL)
1038 TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
1039 TWA_CONTROL_MASK_COMMAND_INTERRUPT);
1040 if (req->data)
1041 ctlr->internal_req_busy = TW_CL_FALSE;
1042 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1043 }
1044
1045 return(TW_OSL_ETIMEDOUT);
1046}
1047
1048
1049
1050/*
1051 * Function name: tw_cl_reset_ctlr
1052 * Description: Soft resets and then initializes the controller;
1053 * drains any incomplete requests.
1054 *
1055 * Input: ctlr -- ptr to per ctlr structure
1056 * Output: None
1057 * Return value: 0 -- success
1058 * non-zero-- failure
1059 */
1060TW_INT32
1061tw_cl_reset_ctlr(struct tw_cl_ctlr_handle *ctlr_handle)
1062{
1063 struct tw_cli_ctlr_context *ctlr =
1064 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1065 struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt;
1066 TW_INT32 reset_attempt = 1;
1067 TW_INT32 error;
1068
1069 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "entered");
1070
1071 ctlr->reset_in_progress = TW_CL_TRUE;
1072 xpt_freeze_simq(sc->sim, 1);
1073
1074 tw_cli_disable_interrupts(ctlr);
1075
1076 /*
1077 * Error back all requests in the complete, busy, and pending queues.
1078 * If any request is already on its way to getting submitted, it's in
1079 * none of these queues and so, will not be completed. That request
1080 * will continue its course and get submitted to the controller after
1081 * the reset is done (and io_lock is released).
1082 */
1083 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
1084 "Draining all queues following reset");
1085 tw_cli_drain_complete_queue(ctlr);
1086 tw_cli_drain_busy_queue(ctlr);
1087 tw_cli_drain_pending_queue(ctlr);
1088 ctlr->internal_req_busy = TW_CL_FALSE;
1089 ctlr->get_more_aens = TW_CL_FALSE;
1090
1091 /* Soft reset the controller. */
1092try_reset:
1093 if ((error = tw_cli_soft_reset(ctlr))) {
1094 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1095 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1096 0x1105, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1097 "Controller reset failed",
1098 "error = %d; attempt %d", error, reset_attempt++);
1099 if (reset_attempt <= TW_CLI_MAX_RESET_ATTEMPTS)
1100 goto try_reset;
1101 else
1102 goto out;
1103 }
1104
1105 /* Re-establish logical connection with the controller. */
1106 if ((error = tw_cli_init_connection(ctlr,
1107 (TW_UINT16)(ctlr->max_simult_reqs),
1108 0, 0, 0, 0, 0, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL,
1109 TW_CL_NULL, TW_CL_NULL))) {
1110 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1111 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1112 0x1106, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1113 "Can't initialize connection after reset",
1114 "error = %d", error);
1115 goto out;
1116 }
1117
1118 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1119 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1120 0x1107, 0x3, TW_CL_SEVERITY_INFO_STRING,
1121 "Controller reset done!",
1122 " ");
1123
1124out:
1125 ctlr->reset_in_progress = TW_CL_FALSE;
1126 xpt_release_simq(sc->sim, 1);
1127
1128 /*
1129 * Enable interrupts, and also clear attention and response interrupts.
1130 */
1131 tw_cli_enable_interrupts(ctlr);
1132
1133 /* Request for a bus re-scan. */
1134 if (!error)
1135 tw_osl_scan_bus(ctlr_handle);
1136 return(error);
1137}
1138
1139
1140
1141/*
1142 * Function name: tw_cli_soft_reset
1143 * Description: Does the actual soft reset.
1144 *
1145 * Input: ctlr -- ptr to per ctlr structure
1146 * Output: None
1147 * Return value: 0 -- success
1148 * non-zero-- failure
1149 */
1150TW_INT32
1151tw_cli_soft_reset(struct tw_cli_ctlr_context *ctlr)
1152{
1153 struct tw_cl_ctlr_handle *ctlr_handle = ctlr->ctlr_handle;
1154 TW_UINT32 status_reg;
1155 int found;
1156 int loop_count;
1157 TW_UINT32 error;
1158
1159 tw_cli_dbg_printf(1, ctlr_handle, tw_osl_cur_func(), "entered");
1160
1161 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1162 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1163 0x1108, 0x3, TW_CL_SEVERITY_INFO_STRING,
1164 "Resetting controller...",
1165 " ");
1166
1167 /* Don't let any new commands get submitted to the controller. */
1168 tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
1169
1170 TW_CLI_SOFT_RESET(ctlr_handle);
1171
1172 if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_X) ||
1173 (ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
1174 (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
1175 /*
1176 * There's a hardware bug in the G133 ASIC, which can lead to
1177 * PCI parity errors and hangs, if the host accesses any
1178 * registers when the firmware is resetting the hardware, as
1179 * part of a hard/soft reset. The window of time when the
1180 * problem can occur is about 10 ms. Here, we will handshake
1181 * with the firmware to find out when the firmware is pulling
1182 * down the hardware reset pin, and wait for about 500 ms to
1183 * make sure we don't access any hardware registers (for
1184 * polling) during that window.
1185 */
1186 ctlr->reset_phase1_in_progress = TW_CL_TRUE;
1187 loop_count = 0;
1188 do {
1189 found = (tw_cli_find_response(ctlr, TWA_RESET_PHASE1_NOTIFICATION_RESPONSE) == TW_OSL_ESUCCESS);
1190 tw_osl_delay(10);
1191 loop_count++;
1192 error = 0x7888;
1193 } while (!found && (loop_count < 6000000)); /* Loop for no more than 60 seconds */
1194
1195 if (!found) {
1196 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1197 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1198 0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1199 "Missed firmware handshake after soft-reset",
1200 "error = %d", error);
1201 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1202 return(error);
1203 }
1204
1205 tw_osl_delay(TWA_RESET_PHASE1_WAIT_TIME_MS * 1000);
1206 ctlr->reset_phase1_in_progress = TW_CL_FALSE;
1207 }
1208
1209 if ((error = tw_cli_poll_status(ctlr,
1210 TWA_STATUS_MICROCONTROLLER_READY |
1211 TWA_STATUS_ATTENTION_INTERRUPT,
1212 TW_CLI_RESET_TIMEOUT_PERIOD))) {
1213 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1214 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1215 0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1216 "Micro-ctlr not ready/No attn intr after reset",
1217 "error = %d", error);
1218 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1219 return(error);
1220 }
1221
1222 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
1223 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
1224
1225 if ((error = tw_cli_drain_response_queue(ctlr))) {
1226 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1227 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1228 0x110A, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1229 "Can't drain response queue after reset",
1230 "error = %d", error);
1231 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1232 return(error);
1233 }
1234
1235 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1236
1237 if ((error = tw_cli_drain_aen_queue(ctlr))) {
1238 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1239 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1240 0x110B, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1241 "Can't drain AEN queue after reset",
1242 "error = %d", error);
1243 return(error);
1244 }
1245
1246 if ((error = tw_cli_find_aen(ctlr, TWA_AEN_SOFT_RESET))) {
1247 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1248 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1249 0x110C, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1250 "Reset not reported by controller",
1251 "error = %d", error);
1252 return(error);
1253 }
1254
1255 status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
1256
1257 if ((error = TW_CLI_STATUS_ERRORS(status_reg)) ||
1258 (error = tw_cli_check_ctlr_state(ctlr, status_reg))) {
1259 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1260 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1261 0x110D, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1262 "Controller errors detected after reset",
1263 "error = %d", error);
1264 return(error);
1265 }
1266
1267 return(TW_OSL_ESUCCESS);
1268}
1269
1270
1271
1272/*
1273 * Function name: tw_cli_send_scsi_cmd
1274 * Description: Sends down a scsi cmd to fw.
1275 *
1276 * Input: req -- ptr to request pkt
1277 * cmd -- opcode of scsi cmd to send
1278 * Output: None
1279 * Return value: 0 -- success
1280 * non-zero-- failure
1281 */
1282TW_INT32
1283tw_cli_send_scsi_cmd(struct tw_cli_req_context *req, TW_INT32 cmd)
1284{
1285 struct tw_cl_command_packet *cmdpkt;
1286 struct tw_cl_command_9k *cmd9k;
1287 struct tw_cli_ctlr_context *ctlr;
1288 TW_INT32 error;
1289
1290 ctlr = req->ctlr;
1291 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1292
1293 /* Make sure this is the only CL internal request at this time. */
1294 if (ctlr->internal_req_busy)
1295 return(TW_OSL_EBUSY);
1296 ctlr->internal_req_busy = TW_CL_TRUE;
1297 req->data = ctlr->internal_req_data;
1298 req->data_phys = ctlr->internal_req_data_phys;
1299 tw_osl_memzero(req->data, TW_CLI_SECTOR_SIZE);
1300 req->length = TW_CLI_SECTOR_SIZE;
1301
1302 /* Build the cmd pkt. */
1303 cmdpkt = req->cmd_pkt;
1304
1305 cmdpkt->cmd_hdr.header_desc.size_header = 128;
1306
1307 cmd9k = &(cmdpkt->command.cmd_pkt_9k);
1308
1309 cmd9k->res__opcode =
1310 BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
1311 cmd9k->unit = 0;
1312 cmd9k->lun_l4__req_id = TW_CL_SWAP16(req->request_id);
1313 cmd9k->status = 0;
1314 cmd9k->sgl_offset = 16; /* offset from end of hdr = max cdb len */
1315 cmd9k->lun_h4__sgl_entries = TW_CL_SWAP16(1);
1316
1317 if (req->ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1318 ((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].address =
1319 TW_CL_SWAP64(req->data_phys);
1320 ((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].length =
1321 TW_CL_SWAP32(req->length);
1322 } else {
1323 ((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].address =
1324 TW_CL_SWAP32(req->data_phys);
1325 ((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].length =
1326 TW_CL_SWAP32(req->length);
1327 }
1328
1329 cmd9k->cdb[0] = (TW_UINT8)cmd;
1330 cmd9k->cdb[4] = 128;
1331
1332 if ((error = tw_cli_submit_cmd(req)))
1333 if (error != TW_OSL_EBUSY) {
1334 tw_cli_dbg_printf(1, ctlr->ctlr_handle,
1335 tw_osl_cur_func(),
1336 "Failed to start SCSI command",
1337 "request = %p, error = %d", req, error);
1338 return(TW_OSL_EIO);
1339 }
1340 return(TW_OSL_ESUCCESS);
1341}
1342
1343
1344
1345/*
1346 * Function name: tw_cli_get_aen
1347 * Description: Sends down a Request Sense cmd to fw to fetch an AEN.
1348 *
1349 * Input: ctlr -- ptr to per ctlr structure
1350 * Output: None
1351 * Return value: 0 -- success
1352 * non-zero-- failure
1353 */
1354TW_INT32
1355tw_cli_get_aen(struct tw_cli_ctlr_context *ctlr)
1356{
1357 struct tw_cli_req_context *req;
1358 TW_INT32 error;
1359
1360 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1361
1362 if ((req = tw_cli_get_request(ctlr
1363 )) == TW_CL_NULL)
1364 return(TW_OSL_EBUSY);
1365
1366 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
1367 req->flags |= TW_CLI_REQ_FLAGS_9K;
1368 req->tw_cli_callback = tw_cli_aen_callback;
1369 if ((error = tw_cli_send_scsi_cmd(req, 0x03 /* REQUEST_SENSE */))) {
1370 tw_cli_dbg_printf(1, ctlr->ctlr_handle, tw_osl_cur_func(),
1371 "Could not send SCSI command",
1372 "request = %p, error = %d", req, error);
1373 if (req->data)
1374 ctlr->internal_req_busy = TW_CL_FALSE;
1375 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1376 }
1377 return(error);
1378}
1379
1380
1381
1382/*
1383 * Function name: tw_cli_fill_sg_list
1384 * Description: Fills in the scatter/gather list.
1385 *
1386 * Input: ctlr -- ptr to per ctlr structure
1387 * sgl_src -- ptr to fill the sg list from
1388 * sgl_dest-- ptr to sg list
1389 * nsegments--# of segments
1390 * Output: None
1391 * Return value: None
1392 */
1393TW_VOID
1394tw_cli_fill_sg_list(struct tw_cli_ctlr_context *ctlr, TW_VOID *sgl_src,
1395 TW_VOID *sgl_dest, TW_INT32 num_sgl_entries)
1396{
1397 TW_INT32 i;
1398
1399 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1400
1401 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1402 struct tw_cl_sg_desc64 *sgl_s =
1403 (struct tw_cl_sg_desc64 *)sgl_src;
1404 struct tw_cl_sg_desc64 *sgl_d =
1405 (struct tw_cl_sg_desc64 *)sgl_dest;
1406
1407 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1408 "64 bit addresses");
1409 for (i = 0; i < num_sgl_entries; i++) {
1410 sgl_d[i].address = TW_CL_SWAP64(sgl_s->address);
1411 sgl_d[i].length = TW_CL_SWAP32(sgl_s->length);
1412 sgl_s++;
1413 if (ctlr->flags & TW_CL_64BIT_SG_LENGTH)
1414 sgl_s = (struct tw_cl_sg_desc64 *)
1415 (((TW_INT8 *)(sgl_s)) + 4);
1416 }
1417 } else {
1418 struct tw_cl_sg_desc32 *sgl_s =
1419 (struct tw_cl_sg_desc32 *)sgl_src;
1420 struct tw_cl_sg_desc32 *sgl_d =
1421 (struct tw_cl_sg_desc32 *)sgl_dest;
1422
1423 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1424 "32 bit addresses");
1425 for (i = 0; i < num_sgl_entries; i++) {
1426 sgl_d[i].address = TW_CL_SWAP32(sgl_s[i].address);
1427 sgl_d[i].length = TW_CL_SWAP32(sgl_s[i].length);
1428 }
1429 }
1430}
1431