Deleted Added
full compact
tw_cl_io.c (208969) tw_cl_io.c (212008)
1/*
2 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
1/*
2 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/dev/twa/tw_cl_io.c 208969 2010-06-09 21:40:38Z delphij $
27 * $FreeBSD: head/sys/dev/twa/tw_cl_io.c 212008 2010-08-30 19:15:04Z delphij $
28 */
29
30/*
31 * AMCC'S 3ware driver for 9000 series storage controllers.
32 *
33 * Author: Vinod Kashyap
34 * Modifications by: Adam Radford
35 * Modifications by: Manjunath Ranganathaiah
36 */
37
38
39/*
40 * Common Layer I/O functions.
41 */
42
43
44#include "tw_osl_share.h"
45#include "tw_cl_share.h"
46#include "tw_cl_fwif.h"
47#include "tw_cl_ioctl.h"
48#include "tw_cl.h"
49#include "tw_cl_externs.h"
50#include "tw_osl_ioctl.h"
51
52#include <cam/cam.h>
53#include <cam/cam_ccb.h>
54#include <cam/cam_xpt_sim.h>
55
56
57
58/*
59 * Function name: tw_cl_start_io
60 * Description: Interface to OS Layer for accepting SCSI requests.
61 *
62 * Input: ctlr_handle -- controller handle
63 * req_pkt -- OSL built request packet
64 * req_handle -- request handle
65 * Output: None
66 * Return value: 0 -- success
67 * non-zero-- failure
68 */
69TW_INT32
70tw_cl_start_io(struct tw_cl_ctlr_handle *ctlr_handle,
71 struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
72{
73 struct tw_cli_ctlr_context *ctlr;
74 struct tw_cli_req_context *req;
75 struct tw_cl_command_9k *cmd;
76 struct tw_cl_scsi_req_packet *scsi_req;
28 */
29
30/*
31 * AMCC'S 3ware driver for 9000 series storage controllers.
32 *
33 * Author: Vinod Kashyap
34 * Modifications by: Adam Radford
35 * Modifications by: Manjunath Ranganathaiah
36 */
37
38
39/*
40 * Common Layer I/O functions.
41 */
42
43
44#include "tw_osl_share.h"
45#include "tw_cl_share.h"
46#include "tw_cl_fwif.h"
47#include "tw_cl_ioctl.h"
48#include "tw_cl.h"
49#include "tw_cl_externs.h"
50#include "tw_osl_ioctl.h"
51
52#include <cam/cam.h>
53#include <cam/cam_ccb.h>
54#include <cam/cam_xpt_sim.h>
55
56
57
58/*
59 * Function name: tw_cl_start_io
60 * Description: Interface to OS Layer for accepting SCSI requests.
61 *
62 * Input: ctlr_handle -- controller handle
63 * req_pkt -- OSL built request packet
64 * req_handle -- request handle
65 * Output: None
66 * Return value: 0 -- success
67 * non-zero-- failure
68 */
69TW_INT32
70tw_cl_start_io(struct tw_cl_ctlr_handle *ctlr_handle,
71 struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
72{
73 struct tw_cli_ctlr_context *ctlr;
74 struct tw_cli_req_context *req;
75 struct tw_cl_command_9k *cmd;
76 struct tw_cl_scsi_req_packet *scsi_req;
77 TW_INT32 error;
77 TW_INT32 error = TW_CL_ERR_REQ_SUCCESS;
78
79 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
80
81 ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
82
78
79 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
80
81 ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
82
83 if (ctlr->reset_in_progress) {
84 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
85 "I/O during reset: returning busy.");
86 return(TW_OSL_EBUSY);
87 }
88
89 /*
90 * If working with a firmware version that does not support multiple
91 * luns, and this request is directed at a non-zero lun, error it
92 * back right away.
93 */
94 if ((req_pkt->gen_req_pkt.scsi_req.lun) &&
95 (ctlr->working_srl < TWA_MULTI_LUN_FW_SRL)) {
96 req_pkt->status |= (TW_CL_ERR_REQ_INVALID_LUN |
97 TW_CL_ERR_REQ_SCSI_ERROR);
98 req_pkt->tw_osl_callback(req_handle);
99 return(TW_CL_ERR_REQ_SUCCESS);
100 }
101
102 if ((req = tw_cli_get_request(ctlr
103 )) == TW_CL_NULL) {
104 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
105 "Out of request context packets: returning busy");
106 return(TW_OSL_EBUSY);
107 }
108
109 req_handle->cl_req_ctxt = req;
110 req->req_handle = req_handle;
111 req->orig_req = req_pkt;
112 req->tw_cli_callback = tw_cli_complete_io;
113
114 req->flags |= TW_CLI_REQ_FLAGS_EXTERNAL;
115 req->flags |= TW_CLI_REQ_FLAGS_9K;
116
117 scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
118
119 /* Build the cmd pkt. */
120 cmd = &(req->cmd_pkt->command.cmd_pkt_9k);
121
122 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
123
124 cmd->res__opcode = BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
125 cmd->unit = (TW_UINT8)(scsi_req->unit);
126 cmd->lun_l4__req_id = TW_CL_SWAP16(
127 BUILD_LUN_L4__REQ_ID(scsi_req->lun, req->request_id));
128 cmd->status = 0;
129 cmd->sgl_offset = 16; /* offset from end of hdr = max cdb len */
130 tw_osl_memcpy(cmd->cdb, scsi_req->cdb, scsi_req->cdb_len);
131
132 if (req_pkt->flags & TW_CL_REQ_CALLBACK_FOR_SGLIST) {
133 TW_UINT32 num_sgl_entries;
134
135 req_pkt->tw_osl_sgl_callback(req_handle, cmd->sg_list,
136 &num_sgl_entries);
137 cmd->lun_h4__sgl_entries =
138 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
139 num_sgl_entries));
140 } else {
141 cmd->lun_h4__sgl_entries =
142 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
143 scsi_req->sgl_entries));
144 tw_cli_fill_sg_list(ctlr, scsi_req->sg_list,
145 cmd->sg_list, scsi_req->sgl_entries);
146 }
147
83 /*
84 * If working with a firmware version that does not support multiple
85 * luns, and this request is directed at a non-zero lun, error it
86 * back right away.
87 */
88 if ((req_pkt->gen_req_pkt.scsi_req.lun) &&
89 (ctlr->working_srl < TWA_MULTI_LUN_FW_SRL)) {
90 req_pkt->status |= (TW_CL_ERR_REQ_INVALID_LUN |
91 TW_CL_ERR_REQ_SCSI_ERROR);
92 req_pkt->tw_osl_callback(req_handle);
93 return(TW_CL_ERR_REQ_SUCCESS);
94 }
95
96 if ((req = tw_cli_get_request(ctlr
97 )) == TW_CL_NULL) {
98 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
99 "Out of request context packets: returning busy");
100 return(TW_OSL_EBUSY);
101 }
102
103 req_handle->cl_req_ctxt = req;
104 req->req_handle = req_handle;
105 req->orig_req = req_pkt;
106 req->tw_cli_callback = tw_cli_complete_io;
107
108 req->flags |= TW_CLI_REQ_FLAGS_EXTERNAL;
109 req->flags |= TW_CLI_REQ_FLAGS_9K;
110
111 scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
112
113 /* Build the cmd pkt. */
114 cmd = &(req->cmd_pkt->command.cmd_pkt_9k);
115
116 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
117
118 cmd->res__opcode = BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
119 cmd->unit = (TW_UINT8)(scsi_req->unit);
120 cmd->lun_l4__req_id = TW_CL_SWAP16(
121 BUILD_LUN_L4__REQ_ID(scsi_req->lun, req->request_id));
122 cmd->status = 0;
123 cmd->sgl_offset = 16; /* offset from end of hdr = max cdb len */
124 tw_osl_memcpy(cmd->cdb, scsi_req->cdb, scsi_req->cdb_len);
125
126 if (req_pkt->flags & TW_CL_REQ_CALLBACK_FOR_SGLIST) {
127 TW_UINT32 num_sgl_entries;
128
129 req_pkt->tw_osl_sgl_callback(req_handle, cmd->sg_list,
130 &num_sgl_entries);
131 cmd->lun_h4__sgl_entries =
132 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
133 num_sgl_entries));
134 } else {
135 cmd->lun_h4__sgl_entries =
136 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
137 scsi_req->sgl_entries));
138 tw_cli_fill_sg_list(ctlr, scsi_req->sg_list,
139 cmd->sg_list, scsi_req->sgl_entries);
140 }
141
148 if ((error = tw_cli_submit_cmd(req))) {
142 if (((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) != TW_CL_NULL) ||
143 (ctlr->reset_in_progress)) {
144 tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
145 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
146 TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
147 } else if ((error = tw_cli_submit_cmd(req))) {
149 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
150 "Could not start request. request = %p, error = %d",
151 req, error);
152 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
153 }
154 return(error);
155}
156
157
158
159/*
160 * Function name: tw_cli_submit_cmd
161 * Description: Submits a cmd to firmware.
162 *
163 * Input: req -- ptr to CL internal request context
164 * Output: None
165 * Return value: 0 -- success
166 * non-zero-- failure
167 */
168TW_INT32
169tw_cli_submit_cmd(struct tw_cli_req_context *req)
170{
171 struct tw_cli_ctlr_context *ctlr = req->ctlr;
172 struct tw_cl_ctlr_handle *ctlr_handle = ctlr->ctlr_handle;
173 TW_UINT32 status_reg;
148 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
149 "Could not start request. request = %p, error = %d",
150 req, error);
151 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
152 }
153 return(error);
154}
155
156
157
158/*
159 * Function name: tw_cli_submit_cmd
160 * Description: Submits a cmd to firmware.
161 *
162 * Input: req -- ptr to CL internal request context
163 * Output: None
164 * Return value: 0 -- success
165 * non-zero-- failure
166 */
167TW_INT32
168tw_cli_submit_cmd(struct tw_cli_req_context *req)
169{
170 struct tw_cli_ctlr_context *ctlr = req->ctlr;
171 struct tw_cl_ctlr_handle *ctlr_handle = ctlr->ctlr_handle;
172 TW_UINT32 status_reg;
174 TW_INT32 error;
173 TW_INT32 error = 0;
175
176 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
177
178 /* Serialize access to the controller cmd queue. */
179 tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
180
181 /* For 9650SE first write low 4 bytes */
182 if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
183 (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA))
184 tw_osl_write_reg(ctlr_handle,
185 TWA_COMMAND_QUEUE_OFFSET_LOW,
186 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
187
174
175 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
176
177 /* Serialize access to the controller cmd queue. */
178 tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
179
180 /* For 9650SE first write low 4 bytes */
181 if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
182 (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA))
183 tw_osl_write_reg(ctlr_handle,
184 TWA_COMMAND_QUEUE_OFFSET_LOW,
185 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
186
188 /* Check to see if we can post a command. */
189 status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
187 status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
190 if ((error = tw_cli_check_ctlr_state(ctlr, status_reg)))
191 goto out;
192
193 if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
194 struct tw_cl_req_packet *req_pkt =
195 (struct tw_cl_req_packet *)(req->orig_req);
196
197 tw_cli_dbg_printf(7, ctlr_handle, tw_osl_cur_func(),
198 "Cmd queue full");
199
200 if ((req->flags & TW_CLI_REQ_FLAGS_INTERNAL)
201 || ((req_pkt) &&
202 (req_pkt->flags & TW_CL_REQ_RETRY_ON_BUSY))
203 ) {
204 if (req->state != TW_CLI_REQ_STATE_PENDING) {
205 tw_cli_dbg_printf(2, ctlr_handle,
206 tw_osl_cur_func(),
207 "pending internal/ioctl request");
208 req->state = TW_CLI_REQ_STATE_PENDING;
209 tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
188 if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
189 struct tw_cl_req_packet *req_pkt =
190 (struct tw_cl_req_packet *)(req->orig_req);
191
192 tw_cli_dbg_printf(7, ctlr_handle, tw_osl_cur_func(),
193 "Cmd queue full");
194
195 if ((req->flags & TW_CLI_REQ_FLAGS_INTERNAL)
196 || ((req_pkt) &&
197 (req_pkt->flags & TW_CL_REQ_RETRY_ON_BUSY))
198 ) {
199 if (req->state != TW_CLI_REQ_STATE_PENDING) {
200 tw_cli_dbg_printf(2, ctlr_handle,
201 tw_osl_cur_func(),
202 "pending internal/ioctl request");
203 req->state = TW_CLI_REQ_STATE_PENDING;
204 tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
210 error = 0;
211 /* Unmask command interrupt. */
212 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
213 TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
214 } else
215 error = TW_OSL_EBUSY;
216 } else {
205 /* Unmask command interrupt. */
206 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
207 TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
208 } else
209 error = TW_OSL_EBUSY;
210 } else {
217 tw_osl_ctlr_busy(ctlr_handle, req->req_handle);
218 error = TW_OSL_EBUSY;
219 }
220 } else {
221 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
222 "Submitting command");
223
224 /* Insert command into busy queue */
225 req->state = TW_CLI_REQ_STATE_BUSY;
226 tw_cli_req_q_insert_tail(req, TW_CLI_BUSY_Q);
227
228 if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
229 (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
230 /* Now write the high 4 bytes */
231 tw_osl_write_reg(ctlr_handle,
232 TWA_COMMAND_QUEUE_OFFSET_HIGH,
233 (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
234 } else {
235 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
236 /* First write the low 4 bytes, then the high 4. */
237 tw_osl_write_reg(ctlr_handle,
238 TWA_COMMAND_QUEUE_OFFSET_LOW,
239 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
240 tw_osl_write_reg(ctlr_handle,
241 TWA_COMMAND_QUEUE_OFFSET_HIGH,
242 (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
243 } else
244 tw_osl_write_reg(ctlr_handle,
245 TWA_COMMAND_QUEUE_OFFSET,
246 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
247 }
248 }
211 error = TW_OSL_EBUSY;
212 }
213 } else {
214 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
215 "Submitting command");
216
217 /* Insert command into busy queue */
218 req->state = TW_CLI_REQ_STATE_BUSY;
219 tw_cli_req_q_insert_tail(req, TW_CLI_BUSY_Q);
220
221 if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
222 (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
223 /* Now write the high 4 bytes */
224 tw_osl_write_reg(ctlr_handle,
225 TWA_COMMAND_QUEUE_OFFSET_HIGH,
226 (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
227 } else {
228 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
229 /* First write the low 4 bytes, then the high 4. */
230 tw_osl_write_reg(ctlr_handle,
231 TWA_COMMAND_QUEUE_OFFSET_LOW,
232 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
233 tw_osl_write_reg(ctlr_handle,
234 TWA_COMMAND_QUEUE_OFFSET_HIGH,
235 (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
236 } else
237 tw_osl_write_reg(ctlr_handle,
238 TWA_COMMAND_QUEUE_OFFSET,
239 (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
240 }
241 }
249out:
242
250 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
251
252 return(error);
253}
254
255
256
257/*
258 * Function name: tw_cl_fw_passthru
259 * Description: Interface to OS Layer for accepting firmware
260 * passthru requests.
261 * Input: ctlr_handle -- controller handle
262 * req_pkt -- OSL built request packet
263 * req_handle -- request handle
264 * Output: None
265 * Return value: 0 -- success
266 * non-zero-- failure
267 */
268TW_INT32
269tw_cl_fw_passthru(struct tw_cl_ctlr_handle *ctlr_handle,
270 struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
271{
272 struct tw_cli_ctlr_context *ctlr;
273 struct tw_cli_req_context *req;
274 union tw_cl_command_7k *cmd_7k;
275 struct tw_cl_command_9k *cmd_9k;
276 struct tw_cl_passthru_req_packet *pt_req;
277 TW_UINT8 opcode;
278 TW_UINT8 sgl_offset;
279 TW_VOID *sgl = TW_CL_NULL;
243 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
244
245 return(error);
246}
247
248
249
250/*
251 * Function name: tw_cl_fw_passthru
252 * Description: Interface to OS Layer for accepting firmware
253 * passthru requests.
254 * Input: ctlr_handle -- controller handle
255 * req_pkt -- OSL built request packet
256 * req_handle -- request handle
257 * Output: None
258 * Return value: 0 -- success
259 * non-zero-- failure
260 */
261TW_INT32
262tw_cl_fw_passthru(struct tw_cl_ctlr_handle *ctlr_handle,
263 struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
264{
265 struct tw_cli_ctlr_context *ctlr;
266 struct tw_cli_req_context *req;
267 union tw_cl_command_7k *cmd_7k;
268 struct tw_cl_command_9k *cmd_9k;
269 struct tw_cl_passthru_req_packet *pt_req;
270 TW_UINT8 opcode;
271 TW_UINT8 sgl_offset;
272 TW_VOID *sgl = TW_CL_NULL;
280 TW_INT32 error;
273 TW_INT32 error = TW_CL_ERR_REQ_SUCCESS;
281
282 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
283
284 ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
285
274
275 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
276
277 ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
278
286 if (ctlr->reset_in_progress) {
287 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
288 "Passthru request during reset: returning busy.");
289 return(TW_OSL_EBUSY);
290 }
291
292 if ((req = tw_cli_get_request(ctlr
293 )) == TW_CL_NULL) {
294 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
295 "Out of request context packets: returning busy");
296 return(TW_OSL_EBUSY);
297 }
298
299 req_handle->cl_req_ctxt = req;
300 req->req_handle = req_handle;
301 req->orig_req = req_pkt;
302 req->tw_cli_callback = tw_cli_complete_io;
303
279 if ((req = tw_cli_get_request(ctlr
280 )) == TW_CL_NULL) {
281 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
282 "Out of request context packets: returning busy");
283 return(TW_OSL_EBUSY);
284 }
285
286 req_handle->cl_req_ctxt = req;
287 req->req_handle = req_handle;
288 req->orig_req = req_pkt;
289 req->tw_cli_callback = tw_cli_complete_io;
290
304 req->flags |= (TW_CLI_REQ_FLAGS_EXTERNAL | TW_CLI_REQ_FLAGS_PASSTHRU);
291 req->flags |= TW_CLI_REQ_FLAGS_PASSTHRU;
305
306 pt_req = &(req_pkt->gen_req_pkt.pt_req);
307
308 tw_osl_memcpy(req->cmd_pkt, pt_req->cmd_pkt,
309 pt_req->cmd_pkt_length);
310 /* Build the cmd pkt. */
311 if ((opcode = GET_OPCODE(((TW_UINT8 *)
312 (pt_req->cmd_pkt))[sizeof(struct tw_cl_command_header)]))
313 == TWA_FW_CMD_EXECUTE_SCSI) {
314 TW_UINT16 lun_l4, lun_h4;
315
316 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
317 "passthru: 9k cmd pkt");
318 req->flags |= TW_CLI_REQ_FLAGS_9K;
319 cmd_9k = &(req->cmd_pkt->command.cmd_pkt_9k);
320 lun_l4 = GET_LUN_L4(cmd_9k->lun_l4__req_id);
321 lun_h4 = GET_LUN_H4(cmd_9k->lun_h4__sgl_entries);
322 cmd_9k->lun_l4__req_id = TW_CL_SWAP16(
323 BUILD_LUN_L4__REQ_ID(lun_l4, req->request_id));
324 if (pt_req->sgl_entries) {
325 cmd_9k->lun_h4__sgl_entries =
326 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(lun_h4,
327 pt_req->sgl_entries));
328 sgl = (TW_VOID *)(cmd_9k->sg_list);
329 }
330 } else {
331 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
332 "passthru: 7k cmd pkt");
333 cmd_7k = &(req->cmd_pkt->command.cmd_pkt_7k);
334 cmd_7k->generic.request_id =
335 (TW_UINT8)(TW_CL_SWAP16(req->request_id));
336 if ((sgl_offset =
337 GET_SGL_OFF(cmd_7k->generic.sgl_off__opcode))) {
338 if (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)
339 sgl = (((TW_UINT32 *)cmd_7k) + cmd_7k->generic.size);
340 else
341 sgl = (((TW_UINT32 *)cmd_7k) + sgl_offset);
342 cmd_7k->generic.size += pt_req->sgl_entries *
343 ((ctlr->flags & TW_CL_64BIT_ADDRESSES) ? 3 : 2);
344 }
345 }
346
347 if (sgl)
348 tw_cli_fill_sg_list(ctlr, pt_req->sg_list,
349 sgl, pt_req->sgl_entries);
350
292
293 pt_req = &(req_pkt->gen_req_pkt.pt_req);
294
295 tw_osl_memcpy(req->cmd_pkt, pt_req->cmd_pkt,
296 pt_req->cmd_pkt_length);
297 /* Build the cmd pkt. */
298 if ((opcode = GET_OPCODE(((TW_UINT8 *)
299 (pt_req->cmd_pkt))[sizeof(struct tw_cl_command_header)]))
300 == TWA_FW_CMD_EXECUTE_SCSI) {
301 TW_UINT16 lun_l4, lun_h4;
302
303 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
304 "passthru: 9k cmd pkt");
305 req->flags |= TW_CLI_REQ_FLAGS_9K;
306 cmd_9k = &(req->cmd_pkt->command.cmd_pkt_9k);
307 lun_l4 = GET_LUN_L4(cmd_9k->lun_l4__req_id);
308 lun_h4 = GET_LUN_H4(cmd_9k->lun_h4__sgl_entries);
309 cmd_9k->lun_l4__req_id = TW_CL_SWAP16(
310 BUILD_LUN_L4__REQ_ID(lun_l4, req->request_id));
311 if (pt_req->sgl_entries) {
312 cmd_9k->lun_h4__sgl_entries =
313 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(lun_h4,
314 pt_req->sgl_entries));
315 sgl = (TW_VOID *)(cmd_9k->sg_list);
316 }
317 } else {
318 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
319 "passthru: 7k cmd pkt");
320 cmd_7k = &(req->cmd_pkt->command.cmd_pkt_7k);
321 cmd_7k->generic.request_id =
322 (TW_UINT8)(TW_CL_SWAP16(req->request_id));
323 if ((sgl_offset =
324 GET_SGL_OFF(cmd_7k->generic.sgl_off__opcode))) {
325 if (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)
326 sgl = (((TW_UINT32 *)cmd_7k) + cmd_7k->generic.size);
327 else
328 sgl = (((TW_UINT32 *)cmd_7k) + sgl_offset);
329 cmd_7k->generic.size += pt_req->sgl_entries *
330 ((ctlr->flags & TW_CL_64BIT_ADDRESSES) ? 3 : 2);
331 }
332 }
333
334 if (sgl)
335 tw_cli_fill_sg_list(ctlr, pt_req->sg_list,
336 sgl, pt_req->sgl_entries);
337
351 if ((error = tw_cli_submit_cmd(req))) {
338 if (((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) != TW_CL_NULL) ||
339 (ctlr->reset_in_progress)) {
340 tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
341 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
342 TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
343 } else if ((error = tw_cli_submit_cmd(req))) {
352 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
353 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
354 0x1100, 0x1, TW_CL_SEVERITY_ERROR_STRING,
355 "Failed to start passthru command",
356 "error = %d", error);
357 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
358 }
359 return(error);
360}
361
362
363
364/*
365 * Function name: tw_cl_ioctl
366 * Description: Handler of CL supported ioctl cmds.
367 *
368 * Input: ctlr -- ptr to per ctlr structure
369 * cmd -- ioctl cmd
370 * buf -- ptr to buffer in kernel memory, which is
371 * a copy of the input buffer in user-space
372 * Output: buf -- ptr to buffer in kernel memory, which will
373 * need to be copied to the output buffer in
374 * user-space
375 * Return value: 0 -- success
376 * non-zero-- failure
377 */
378TW_INT32
379tw_cl_ioctl(struct tw_cl_ctlr_handle *ctlr_handle, u_long cmd, TW_VOID *buf)
380{
381 struct tw_cli_ctlr_context *ctlr =
382 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
383 struct tw_cl_ioctl_packet *user_buf =
384 (struct tw_cl_ioctl_packet *)buf;
385 struct tw_cl_event_packet event_buf;
386 TW_INT32 event_index;
387 TW_INT32 start_index;
388 TW_INT32 error = TW_OSL_ESUCCESS;
389
390 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
391
392 /* Serialize access to the AEN queue and the ioctl lock. */
393 tw_osl_get_lock(ctlr_handle, ctlr->gen_lock);
394
395 switch (cmd) {
396 case TW_CL_IOCTL_GET_FIRST_EVENT:
397 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
398 "Get First Event");
399
400 if (ctlr->aen_q_wrapped) {
401 if (ctlr->aen_q_overflow) {
402 /*
403 * The aen queue has wrapped, even before some
404 * events have been retrieved. Let the caller
405 * know that he missed out on some AEN's.
406 */
407 user_buf->driver_pkt.status =
408 TW_CL_ERROR_AEN_OVERFLOW;
409 ctlr->aen_q_overflow = TW_CL_FALSE;
410 } else
411 user_buf->driver_pkt.status = 0;
412 event_index = ctlr->aen_head;
413 } else {
414 if (ctlr->aen_head == ctlr->aen_tail) {
415 user_buf->driver_pkt.status =
416 TW_CL_ERROR_AEN_NO_EVENTS;
417 break;
418 }
419 user_buf->driver_pkt.status = 0;
420 event_index = ctlr->aen_tail; /* = 0 */
421 }
422 tw_osl_memcpy(user_buf->data_buf,
423 &(ctlr->aen_queue[event_index]),
424 sizeof(struct tw_cl_event_packet));
425
426 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
427
428 break;
429
430
431 case TW_CL_IOCTL_GET_LAST_EVENT:
432 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
433 "Get Last Event");
434
435 if (ctlr->aen_q_wrapped) {
436 if (ctlr->aen_q_overflow) {
437 /*
438 * The aen queue has wrapped, even before some
439 * events have been retrieved. Let the caller
440 * know that he missed out on some AEN's.
441 */
442 user_buf->driver_pkt.status =
443 TW_CL_ERROR_AEN_OVERFLOW;
444 ctlr->aen_q_overflow = TW_CL_FALSE;
445 } else
446 user_buf->driver_pkt.status = 0;
447 } else {
448 if (ctlr->aen_head == ctlr->aen_tail) {
449 user_buf->driver_pkt.status =
450 TW_CL_ERROR_AEN_NO_EVENTS;
451 break;
452 }
453 user_buf->driver_pkt.status = 0;
454 }
455 event_index = (ctlr->aen_head - 1 + ctlr->max_aens_supported) %
456 ctlr->max_aens_supported;
457
458 tw_osl_memcpy(user_buf->data_buf,
459 &(ctlr->aen_queue[event_index]),
460 sizeof(struct tw_cl_event_packet));
461
462 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
463
464 break;
465
466
467 case TW_CL_IOCTL_GET_NEXT_EVENT:
468 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
469 "Get Next Event");
470
471 user_buf->driver_pkt.status = 0;
472 if (ctlr->aen_q_wrapped) {
473 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
474 "Get Next Event: wrapped");
475 if (ctlr->aen_q_overflow) {
476 /*
477 * The aen queue has wrapped, even before some
478 * events have been retrieved. Let the caller
479 * know that he missed out on some AEN's.
480 */
481 tw_cli_dbg_printf(2, ctlr_handle,
482 tw_osl_cur_func(),
483 "Get Next Event: overflow");
484 user_buf->driver_pkt.status =
485 TW_CL_ERROR_AEN_OVERFLOW;
486 ctlr->aen_q_overflow = TW_CL_FALSE;
487 }
488 start_index = ctlr->aen_head;
489 } else {
490 if (ctlr->aen_head == ctlr->aen_tail) {
491 tw_cli_dbg_printf(3, ctlr_handle,
492 tw_osl_cur_func(),
493 "Get Next Event: empty queue");
494 user_buf->driver_pkt.status =
495 TW_CL_ERROR_AEN_NO_EVENTS;
496 break;
497 }
498 start_index = ctlr->aen_tail; /* = 0 */
499 }
500 tw_osl_memcpy(&event_buf, user_buf->data_buf,
501 sizeof(struct tw_cl_event_packet));
502
503 event_index = (start_index + event_buf.sequence_id -
504 ctlr->aen_queue[start_index].sequence_id + 1) %
505 ctlr->max_aens_supported;
506
507 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
508 "Get Next Event: si = %x, ei = %x, ebsi = %x, "
509 "sisi = %x, eisi = %x",
510 start_index, event_index, event_buf.sequence_id,
511 ctlr->aen_queue[start_index].sequence_id,
512 ctlr->aen_queue[event_index].sequence_id);
513
514 if (! (ctlr->aen_queue[event_index].sequence_id >
515 event_buf.sequence_id)) {
516 /*
517 * We don't have any event matching the criterion. So,
518 * we have to report TW_CL_ERROR_NO_EVENTS. If we also
519 * encountered an overflow condition above, we cannot
520 * report both conditions during this call. We choose
521 * to report NO_EVENTS this time, and an overflow the
522 * next time we are called.
523 */
524 if (user_buf->driver_pkt.status ==
525 TW_CL_ERROR_AEN_OVERFLOW) {
526 /*
527 * Make a note so we report the overflow
528 * next time.
529 */
530 ctlr->aen_q_overflow = TW_CL_TRUE;
531 }
532 user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
533 break;
534 }
535 /* Copy the event -- even if there has been an overflow. */
536 tw_osl_memcpy(user_buf->data_buf,
537 &(ctlr->aen_queue[event_index]),
538 sizeof(struct tw_cl_event_packet));
539
540 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
541
542 break;
543
544
545 case TW_CL_IOCTL_GET_PREVIOUS_EVENT:
546 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
547 "Get Previous Event");
548
549 user_buf->driver_pkt.status = 0;
550 if (ctlr->aen_q_wrapped) {
551 if (ctlr->aen_q_overflow) {
552 /*
553 * The aen queue has wrapped, even before some
554 * events have been retrieved. Let the caller
555 * know that he missed out on some AEN's.
556 */
557 user_buf->driver_pkt.status =
558 TW_CL_ERROR_AEN_OVERFLOW;
559 ctlr->aen_q_overflow = TW_CL_FALSE;
560 }
561 start_index = ctlr->aen_head;
562 } else {
563 if (ctlr->aen_head == ctlr->aen_tail) {
564 user_buf->driver_pkt.status =
565 TW_CL_ERROR_AEN_NO_EVENTS;
566 break;
567 }
568 start_index = ctlr->aen_tail; /* = 0 */
569 }
570 tw_osl_memcpy(&event_buf, user_buf->data_buf,
571 sizeof(struct tw_cl_event_packet));
572
573 event_index = (start_index + event_buf.sequence_id -
574 ctlr->aen_queue[start_index].sequence_id - 1) %
575 ctlr->max_aens_supported;
576
577 if (! (ctlr->aen_queue[event_index].sequence_id <
578 event_buf.sequence_id)) {
579 /*
580 * We don't have any event matching the criterion. So,
581 * we have to report TW_CL_ERROR_NO_EVENTS. If we also
582 * encountered an overflow condition above, we cannot
583 * report both conditions during this call. We choose
584 * to report NO_EVENTS this time, and an overflow the
585 * next time we are called.
586 */
587 if (user_buf->driver_pkt.status ==
588 TW_CL_ERROR_AEN_OVERFLOW) {
589 /*
590 * Make a note so we report the overflow
591 * next time.
592 */
593 ctlr->aen_q_overflow = TW_CL_TRUE;
594 }
595 user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
596 break;
597 }
598 /* Copy the event -- even if there has been an overflow. */
599 tw_osl_memcpy(user_buf->data_buf,
600 &(ctlr->aen_queue[event_index]),
601 sizeof(struct tw_cl_event_packet));
602
603 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
604
605 break;
606
607
608 case TW_CL_IOCTL_GET_LOCK:
609 {
610 struct tw_cl_lock_packet lock_pkt;
611 TW_TIME cur_time;
612
613 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
614 "Get ioctl lock");
615
616 cur_time = tw_osl_get_local_time();
617 tw_osl_memcpy(&lock_pkt, user_buf->data_buf,
618 sizeof(struct tw_cl_lock_packet));
619
620 if ((ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) ||
621 (lock_pkt.force_flag) ||
622 (cur_time >= ctlr->ioctl_lock.timeout)) {
623 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
624 "GET_LOCK: Getting lock!");
625 ctlr->ioctl_lock.lock = TW_CLI_LOCK_HELD;
626 ctlr->ioctl_lock.timeout =
627 cur_time + (lock_pkt.timeout_msec / 1000);
628 lock_pkt.time_remaining_msec = lock_pkt.timeout_msec;
629 user_buf->driver_pkt.status = 0;
630 } else {
631 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
632 "GET_LOCK: Lock already held!");
633 lock_pkt.time_remaining_msec = (TW_UINT32)(
634 (ctlr->ioctl_lock.timeout - cur_time) * 1000);
635 user_buf->driver_pkt.status =
636 TW_CL_ERROR_IOCTL_LOCK_ALREADY_HELD;
637 }
638 tw_osl_memcpy(user_buf->data_buf, &lock_pkt,
639 sizeof(struct tw_cl_lock_packet));
640 break;
641 }
642
643
644 case TW_CL_IOCTL_RELEASE_LOCK:
645 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
646 "Release ioctl lock");
647
648 if (ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) {
649 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
650 "twa_ioctl: RELEASE_LOCK: Lock not held!");
651 user_buf->driver_pkt.status =
652 TW_CL_ERROR_IOCTL_LOCK_NOT_HELD;
653 } else {
654 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
655 "RELEASE_LOCK: Releasing lock!");
656 ctlr->ioctl_lock.lock = TW_CLI_LOCK_FREE;
657 user_buf->driver_pkt.status = 0;
658 }
659 break;
660
661
662 case TW_CL_IOCTL_GET_COMPATIBILITY_INFO:
663 {
664 struct tw_cl_compatibility_packet comp_pkt;
665
666 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
667 "Get compatibility info");
668
669 tw_osl_memcpy(comp_pkt.driver_version,
670 TW_OSL_DRIVER_VERSION_STRING,
671 sizeof(TW_OSL_DRIVER_VERSION_STRING));
672 comp_pkt.working_srl = ctlr->working_srl;
673 comp_pkt.working_branch = ctlr->working_branch;
674 comp_pkt.working_build = ctlr->working_build;
675 comp_pkt.driver_srl_high = TWA_CURRENT_FW_SRL;
676 comp_pkt.driver_branch_high =
677 TWA_CURRENT_FW_BRANCH(ctlr->arch_id);
678 comp_pkt.driver_build_high =
679 TWA_CURRENT_FW_BUILD(ctlr->arch_id);
680 comp_pkt.driver_srl_low = TWA_BASE_FW_SRL;
681 comp_pkt.driver_branch_low = TWA_BASE_FW_BRANCH;
682 comp_pkt.driver_build_low = TWA_BASE_FW_BUILD;
683 comp_pkt.fw_on_ctlr_srl = ctlr->fw_on_ctlr_srl;
684 comp_pkt.fw_on_ctlr_branch = ctlr->fw_on_ctlr_branch;
685 comp_pkt.fw_on_ctlr_build = ctlr->fw_on_ctlr_build;
686 user_buf->driver_pkt.status = 0;
687
688 /* Copy compatibility information to user space. */
689 tw_osl_memcpy(user_buf->data_buf, &comp_pkt,
690 (sizeof(struct tw_cl_compatibility_packet) <
691 user_buf->driver_pkt.buffer_length) ?
692 sizeof(struct tw_cl_compatibility_packet) :
693 user_buf->driver_pkt.buffer_length);
694 break;
695 }
696
697 default:
698 /* Unknown opcode. */
699 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
700 "Unknown ioctl cmd 0x%x", cmd);
701 error = TW_OSL_ENOTTY;
702 }
703
704 tw_osl_free_lock(ctlr_handle, ctlr->gen_lock);
705 return(error);
706}
707
708
709
710/*
711 * Function name: tw_cli_get_param
712 * Description: Get a firmware parameter.
713 *
714 * Input: ctlr -- ptr to per ctlr structure
715 * table_id -- parameter table #
716 * param_id -- index of the parameter in the table
717 * param_size -- size of the parameter in bytes
718 * callback -- ptr to function, if any, to be called
719 * back on completion; TW_CL_NULL if no callback.
720 * Output: param_data -- param value
721 * Return value: 0 -- success
722 * non-zero-- failure
723 */
724TW_INT32
725tw_cli_get_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
726 TW_INT32 param_id, TW_VOID *param_data, TW_INT32 param_size,
727 TW_VOID (* callback)(struct tw_cli_req_context *req))
728{
729 struct tw_cli_req_context *req;
730 union tw_cl_command_7k *cmd;
731 struct tw_cl_param_9k *param = TW_CL_NULL;
732 TW_INT32 error = TW_OSL_EBUSY;
733
734 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
735
736 /* Get a request packet. */
737 if ((req = tw_cli_get_request(ctlr
738 )) == TW_CL_NULL)
739 goto out;
740
741 /* Make sure this is the only CL internal request at this time. */
742 if (ctlr->internal_req_busy) {
743 error = TW_OSL_EBUSY;
744 goto out;
745 }
746 ctlr->internal_req_busy = TW_CL_TRUE;
747 req->data = ctlr->internal_req_data;
748 req->data_phys = ctlr->internal_req_data_phys;
749 req->length = TW_CLI_SECTOR_SIZE;
750 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
751
752 /* Initialize memory to read data into. */
753 param = (struct tw_cl_param_9k *)(req->data);
754 tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
755
756 /* Build the cmd pkt. */
757 cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
758
759 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
760
761 cmd->param.sgl_off__opcode =
762 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_GET_PARAM);
344 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
345 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
346 0x1100, 0x1, TW_CL_SEVERITY_ERROR_STRING,
347 "Failed to start passthru command",
348 "error = %d", error);
349 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
350 }
351 return(error);
352}
353
354
355
356/*
357 * Function name: tw_cl_ioctl
358 * Description: Handler of CL supported ioctl cmds.
359 *
360 * Input: ctlr -- ptr to per ctlr structure
361 * cmd -- ioctl cmd
362 * buf -- ptr to buffer in kernel memory, which is
363 * a copy of the input buffer in user-space
364 * Output: buf -- ptr to buffer in kernel memory, which will
365 * need to be copied to the output buffer in
366 * user-space
367 * Return value: 0 -- success
368 * non-zero-- failure
369 */
370TW_INT32
371tw_cl_ioctl(struct tw_cl_ctlr_handle *ctlr_handle, u_long cmd, TW_VOID *buf)
372{
373 struct tw_cli_ctlr_context *ctlr =
374 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
375 struct tw_cl_ioctl_packet *user_buf =
376 (struct tw_cl_ioctl_packet *)buf;
377 struct tw_cl_event_packet event_buf;
378 TW_INT32 event_index;
379 TW_INT32 start_index;
380 TW_INT32 error = TW_OSL_ESUCCESS;
381
382 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
383
384 /* Serialize access to the AEN queue and the ioctl lock. */
385 tw_osl_get_lock(ctlr_handle, ctlr->gen_lock);
386
387 switch (cmd) {
388 case TW_CL_IOCTL_GET_FIRST_EVENT:
389 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
390 "Get First Event");
391
392 if (ctlr->aen_q_wrapped) {
393 if (ctlr->aen_q_overflow) {
394 /*
395 * The aen queue has wrapped, even before some
396 * events have been retrieved. Let the caller
397 * know that he missed out on some AEN's.
398 */
399 user_buf->driver_pkt.status =
400 TW_CL_ERROR_AEN_OVERFLOW;
401 ctlr->aen_q_overflow = TW_CL_FALSE;
402 } else
403 user_buf->driver_pkt.status = 0;
404 event_index = ctlr->aen_head;
405 } else {
406 if (ctlr->aen_head == ctlr->aen_tail) {
407 user_buf->driver_pkt.status =
408 TW_CL_ERROR_AEN_NO_EVENTS;
409 break;
410 }
411 user_buf->driver_pkt.status = 0;
412 event_index = ctlr->aen_tail; /* = 0 */
413 }
414 tw_osl_memcpy(user_buf->data_buf,
415 &(ctlr->aen_queue[event_index]),
416 sizeof(struct tw_cl_event_packet));
417
418 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
419
420 break;
421
422
423 case TW_CL_IOCTL_GET_LAST_EVENT:
424 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
425 "Get Last Event");
426
427 if (ctlr->aen_q_wrapped) {
428 if (ctlr->aen_q_overflow) {
429 /*
430 * The aen queue has wrapped, even before some
431 * events have been retrieved. Let the caller
432 * know that he missed out on some AEN's.
433 */
434 user_buf->driver_pkt.status =
435 TW_CL_ERROR_AEN_OVERFLOW;
436 ctlr->aen_q_overflow = TW_CL_FALSE;
437 } else
438 user_buf->driver_pkt.status = 0;
439 } else {
440 if (ctlr->aen_head == ctlr->aen_tail) {
441 user_buf->driver_pkt.status =
442 TW_CL_ERROR_AEN_NO_EVENTS;
443 break;
444 }
445 user_buf->driver_pkt.status = 0;
446 }
447 event_index = (ctlr->aen_head - 1 + ctlr->max_aens_supported) %
448 ctlr->max_aens_supported;
449
450 tw_osl_memcpy(user_buf->data_buf,
451 &(ctlr->aen_queue[event_index]),
452 sizeof(struct tw_cl_event_packet));
453
454 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
455
456 break;
457
458
459 case TW_CL_IOCTL_GET_NEXT_EVENT:
460 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
461 "Get Next Event");
462
463 user_buf->driver_pkt.status = 0;
464 if (ctlr->aen_q_wrapped) {
465 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
466 "Get Next Event: wrapped");
467 if (ctlr->aen_q_overflow) {
468 /*
469 * The aen queue has wrapped, even before some
470 * events have been retrieved. Let the caller
471 * know that he missed out on some AEN's.
472 */
473 tw_cli_dbg_printf(2, ctlr_handle,
474 tw_osl_cur_func(),
475 "Get Next Event: overflow");
476 user_buf->driver_pkt.status =
477 TW_CL_ERROR_AEN_OVERFLOW;
478 ctlr->aen_q_overflow = TW_CL_FALSE;
479 }
480 start_index = ctlr->aen_head;
481 } else {
482 if (ctlr->aen_head == ctlr->aen_tail) {
483 tw_cli_dbg_printf(3, ctlr_handle,
484 tw_osl_cur_func(),
485 "Get Next Event: empty queue");
486 user_buf->driver_pkt.status =
487 TW_CL_ERROR_AEN_NO_EVENTS;
488 break;
489 }
490 start_index = ctlr->aen_tail; /* = 0 */
491 }
492 tw_osl_memcpy(&event_buf, user_buf->data_buf,
493 sizeof(struct tw_cl_event_packet));
494
495 event_index = (start_index + event_buf.sequence_id -
496 ctlr->aen_queue[start_index].sequence_id + 1) %
497 ctlr->max_aens_supported;
498
499 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
500 "Get Next Event: si = %x, ei = %x, ebsi = %x, "
501 "sisi = %x, eisi = %x",
502 start_index, event_index, event_buf.sequence_id,
503 ctlr->aen_queue[start_index].sequence_id,
504 ctlr->aen_queue[event_index].sequence_id);
505
506 if (! (ctlr->aen_queue[event_index].sequence_id >
507 event_buf.sequence_id)) {
508 /*
509 * We don't have any event matching the criterion. So,
510 * we have to report TW_CL_ERROR_NO_EVENTS. If we also
511 * encountered an overflow condition above, we cannot
512 * report both conditions during this call. We choose
513 * to report NO_EVENTS this time, and an overflow the
514 * next time we are called.
515 */
516 if (user_buf->driver_pkt.status ==
517 TW_CL_ERROR_AEN_OVERFLOW) {
518 /*
519 * Make a note so we report the overflow
520 * next time.
521 */
522 ctlr->aen_q_overflow = TW_CL_TRUE;
523 }
524 user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
525 break;
526 }
527 /* Copy the event -- even if there has been an overflow. */
528 tw_osl_memcpy(user_buf->data_buf,
529 &(ctlr->aen_queue[event_index]),
530 sizeof(struct tw_cl_event_packet));
531
532 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
533
534 break;
535
536
537 case TW_CL_IOCTL_GET_PREVIOUS_EVENT:
538 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
539 "Get Previous Event");
540
541 user_buf->driver_pkt.status = 0;
542 if (ctlr->aen_q_wrapped) {
543 if (ctlr->aen_q_overflow) {
544 /*
545 * The aen queue has wrapped, even before some
546 * events have been retrieved. Let the caller
547 * know that he missed out on some AEN's.
548 */
549 user_buf->driver_pkt.status =
550 TW_CL_ERROR_AEN_OVERFLOW;
551 ctlr->aen_q_overflow = TW_CL_FALSE;
552 }
553 start_index = ctlr->aen_head;
554 } else {
555 if (ctlr->aen_head == ctlr->aen_tail) {
556 user_buf->driver_pkt.status =
557 TW_CL_ERROR_AEN_NO_EVENTS;
558 break;
559 }
560 start_index = ctlr->aen_tail; /* = 0 */
561 }
562 tw_osl_memcpy(&event_buf, user_buf->data_buf,
563 sizeof(struct tw_cl_event_packet));
564
565 event_index = (start_index + event_buf.sequence_id -
566 ctlr->aen_queue[start_index].sequence_id - 1) %
567 ctlr->max_aens_supported;
568
569 if (! (ctlr->aen_queue[event_index].sequence_id <
570 event_buf.sequence_id)) {
571 /*
572 * We don't have any event matching the criterion. So,
573 * we have to report TW_CL_ERROR_NO_EVENTS. If we also
574 * encountered an overflow condition above, we cannot
575 * report both conditions during this call. We choose
576 * to report NO_EVENTS this time, and an overflow the
577 * next time we are called.
578 */
579 if (user_buf->driver_pkt.status ==
580 TW_CL_ERROR_AEN_OVERFLOW) {
581 /*
582 * Make a note so we report the overflow
583 * next time.
584 */
585 ctlr->aen_q_overflow = TW_CL_TRUE;
586 }
587 user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
588 break;
589 }
590 /* Copy the event -- even if there has been an overflow. */
591 tw_osl_memcpy(user_buf->data_buf,
592 &(ctlr->aen_queue[event_index]),
593 sizeof(struct tw_cl_event_packet));
594
595 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
596
597 break;
598
599
600 case TW_CL_IOCTL_GET_LOCK:
601 {
602 struct tw_cl_lock_packet lock_pkt;
603 TW_TIME cur_time;
604
605 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
606 "Get ioctl lock");
607
608 cur_time = tw_osl_get_local_time();
609 tw_osl_memcpy(&lock_pkt, user_buf->data_buf,
610 sizeof(struct tw_cl_lock_packet));
611
612 if ((ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) ||
613 (lock_pkt.force_flag) ||
614 (cur_time >= ctlr->ioctl_lock.timeout)) {
615 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
616 "GET_LOCK: Getting lock!");
617 ctlr->ioctl_lock.lock = TW_CLI_LOCK_HELD;
618 ctlr->ioctl_lock.timeout =
619 cur_time + (lock_pkt.timeout_msec / 1000);
620 lock_pkt.time_remaining_msec = lock_pkt.timeout_msec;
621 user_buf->driver_pkt.status = 0;
622 } else {
623 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
624 "GET_LOCK: Lock already held!");
625 lock_pkt.time_remaining_msec = (TW_UINT32)(
626 (ctlr->ioctl_lock.timeout - cur_time) * 1000);
627 user_buf->driver_pkt.status =
628 TW_CL_ERROR_IOCTL_LOCK_ALREADY_HELD;
629 }
630 tw_osl_memcpy(user_buf->data_buf, &lock_pkt,
631 sizeof(struct tw_cl_lock_packet));
632 break;
633 }
634
635
636 case TW_CL_IOCTL_RELEASE_LOCK:
637 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
638 "Release ioctl lock");
639
640 if (ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) {
641 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
642 "twa_ioctl: RELEASE_LOCK: Lock not held!");
643 user_buf->driver_pkt.status =
644 TW_CL_ERROR_IOCTL_LOCK_NOT_HELD;
645 } else {
646 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
647 "RELEASE_LOCK: Releasing lock!");
648 ctlr->ioctl_lock.lock = TW_CLI_LOCK_FREE;
649 user_buf->driver_pkt.status = 0;
650 }
651 break;
652
653
654 case TW_CL_IOCTL_GET_COMPATIBILITY_INFO:
655 {
656 struct tw_cl_compatibility_packet comp_pkt;
657
658 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
659 "Get compatibility info");
660
661 tw_osl_memcpy(comp_pkt.driver_version,
662 TW_OSL_DRIVER_VERSION_STRING,
663 sizeof(TW_OSL_DRIVER_VERSION_STRING));
664 comp_pkt.working_srl = ctlr->working_srl;
665 comp_pkt.working_branch = ctlr->working_branch;
666 comp_pkt.working_build = ctlr->working_build;
667 comp_pkt.driver_srl_high = TWA_CURRENT_FW_SRL;
668 comp_pkt.driver_branch_high =
669 TWA_CURRENT_FW_BRANCH(ctlr->arch_id);
670 comp_pkt.driver_build_high =
671 TWA_CURRENT_FW_BUILD(ctlr->arch_id);
672 comp_pkt.driver_srl_low = TWA_BASE_FW_SRL;
673 comp_pkt.driver_branch_low = TWA_BASE_FW_BRANCH;
674 comp_pkt.driver_build_low = TWA_BASE_FW_BUILD;
675 comp_pkt.fw_on_ctlr_srl = ctlr->fw_on_ctlr_srl;
676 comp_pkt.fw_on_ctlr_branch = ctlr->fw_on_ctlr_branch;
677 comp_pkt.fw_on_ctlr_build = ctlr->fw_on_ctlr_build;
678 user_buf->driver_pkt.status = 0;
679
680 /* Copy compatibility information to user space. */
681 tw_osl_memcpy(user_buf->data_buf, &comp_pkt,
682 (sizeof(struct tw_cl_compatibility_packet) <
683 user_buf->driver_pkt.buffer_length) ?
684 sizeof(struct tw_cl_compatibility_packet) :
685 user_buf->driver_pkt.buffer_length);
686 break;
687 }
688
689 default:
690 /* Unknown opcode. */
691 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
692 "Unknown ioctl cmd 0x%x", cmd);
693 error = TW_OSL_ENOTTY;
694 }
695
696 tw_osl_free_lock(ctlr_handle, ctlr->gen_lock);
697 return(error);
698}
699
700
701
702/*
703 * Function name: tw_cli_get_param
704 * Description: Get a firmware parameter.
705 *
706 * Input: ctlr -- ptr to per ctlr structure
707 * table_id -- parameter table #
708 * param_id -- index of the parameter in the table
709 * param_size -- size of the parameter in bytes
710 * callback -- ptr to function, if any, to be called
711 * back on completion; TW_CL_NULL if no callback.
712 * Output: param_data -- param value
713 * Return value: 0 -- success
714 * non-zero-- failure
715 */
716TW_INT32
717tw_cli_get_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
718 TW_INT32 param_id, TW_VOID *param_data, TW_INT32 param_size,
719 TW_VOID (* callback)(struct tw_cli_req_context *req))
720{
721 struct tw_cli_req_context *req;
722 union tw_cl_command_7k *cmd;
723 struct tw_cl_param_9k *param = TW_CL_NULL;
724 TW_INT32 error = TW_OSL_EBUSY;
725
726 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
727
728 /* Get a request packet. */
729 if ((req = tw_cli_get_request(ctlr
730 )) == TW_CL_NULL)
731 goto out;
732
733 /* Make sure this is the only CL internal request at this time. */
734 if (ctlr->internal_req_busy) {
735 error = TW_OSL_EBUSY;
736 goto out;
737 }
738 ctlr->internal_req_busy = TW_CL_TRUE;
739 req->data = ctlr->internal_req_data;
740 req->data_phys = ctlr->internal_req_data_phys;
741 req->length = TW_CLI_SECTOR_SIZE;
742 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
743
744 /* Initialize memory to read data into. */
745 param = (struct tw_cl_param_9k *)(req->data);
746 tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
747
748 /* Build the cmd pkt. */
749 cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
750
751 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
752
753 cmd->param.sgl_off__opcode =
754 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_GET_PARAM);
763 cmd->param.request_id =
764 (TW_UINT8)(TW_CL_SWAP16(req->request_id));
755 cmd->param.request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
765 cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
766 cmd->param.param_count = TW_CL_SWAP16(1);
767
768 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
769 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
770 TW_CL_SWAP64(req->data_phys);
771 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
772 TW_CL_SWAP32(req->length);
773 cmd->param.size = 2 + 3;
774 } else {
775 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
776 TW_CL_SWAP32(req->data_phys);
777 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
778 TW_CL_SWAP32(req->length);
779 cmd->param.size = 2 + 2;
780 }
781
782 /* Specify which parameter we need. */
783 param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
784 param->parameter_id = (TW_UINT8)(param_id);
785 param->parameter_size_bytes = TW_CL_SWAP16(param_size);
786
787 /* Submit the command. */
788 if (callback == TW_CL_NULL) {
789 /* There's no call back; wait till the command completes. */
790 error = tw_cli_submit_and_poll_request(req,
791 TW_CLI_REQUEST_TIMEOUT_PERIOD);
756 cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
757 cmd->param.param_count = TW_CL_SWAP16(1);
758
759 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
760 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
761 TW_CL_SWAP64(req->data_phys);
762 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
763 TW_CL_SWAP32(req->length);
764 cmd->param.size = 2 + 3;
765 } else {
766 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
767 TW_CL_SWAP32(req->data_phys);
768 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
769 TW_CL_SWAP32(req->length);
770 cmd->param.size = 2 + 2;
771 }
772
773 /* Specify which parameter we need. */
774 param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
775 param->parameter_id = (TW_UINT8)(param_id);
776 param->parameter_size_bytes = TW_CL_SWAP16(param_size);
777
778 /* Submit the command. */
779 if (callback == TW_CL_NULL) {
780 /* There's no call back; wait till the command completes. */
781 error = tw_cli_submit_and_poll_request(req,
782 TW_CLI_REQUEST_TIMEOUT_PERIOD);
792 if (error == TW_OSL_ETIMEDOUT)
793 /* Clean-up done by tw_cli_submit_and_poll_request. */
794 return(error);
795 if (error)
796 goto out;
797 if ((error = cmd->param.status)) {
783 if (error)
784 goto out;
785 if ((error = cmd->param.status)) {
786#if 0
798 tw_cli_create_ctlr_event(ctlr,
799 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
800 &(req->cmd_pkt->cmd_hdr));
787 tw_cli_create_ctlr_event(ctlr,
788 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
789 &(req->cmd_pkt->cmd_hdr));
790#endif // 0
801 goto out;
802 }
803 tw_osl_memcpy(param_data, param->data, param_size);
804 ctlr->internal_req_busy = TW_CL_FALSE;
805 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
806 } else {
807 /* There's a call back. Simply submit the command. */
808 req->tw_cli_callback = callback;
809 if ((error = tw_cli_submit_cmd(req)))
810 goto out;
811 }
812 return(0);
813
814out:
815 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
816 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
817 0x1101, 0x1, TW_CL_SEVERITY_ERROR_STRING,
818 "get_param failed",
819 "error = %d", error);
820 if (param)
821 ctlr->internal_req_busy = TW_CL_FALSE;
822 if (req)
823 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
824 return(1);
825}
826
827
828
829/*
830 * Function name: tw_cli_set_param
831 * Description: Set a firmware parameter.
832 *
833 * Input: ctlr -- ptr to per ctlr structure
834 * table_id -- parameter table #
835 * param_id -- index of the parameter in the table
836 * param_size -- size of the parameter in bytes
837 * callback -- ptr to function, if any, to be called
838 * back on completion; TW_CL_NULL if no callback.
839 * Output: None
840 * Return value: 0 -- success
841 * non-zero-- failure
842 */
843TW_INT32
844tw_cli_set_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
845 TW_INT32 param_id, TW_INT32 param_size, TW_VOID *data,
846 TW_VOID (* callback)(struct tw_cli_req_context *req))
847{
848 struct tw_cli_req_context *req;
849 union tw_cl_command_7k *cmd;
850 struct tw_cl_param_9k *param = TW_CL_NULL;
851 TW_INT32 error = TW_OSL_EBUSY;
852
853 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
854
855 /* Get a request packet. */
856 if ((req = tw_cli_get_request(ctlr
857 )) == TW_CL_NULL)
858 goto out;
859
860 /* Make sure this is the only CL internal request at this time. */
861 if (ctlr->internal_req_busy) {
862 error = TW_OSL_EBUSY;
863 goto out;
864 }
865 ctlr->internal_req_busy = TW_CL_TRUE;
866 req->data = ctlr->internal_req_data;
867 req->data_phys = ctlr->internal_req_data_phys;
868 req->length = TW_CLI_SECTOR_SIZE;
869 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
870
871 /* Initialize memory to send data using. */
872 param = (struct tw_cl_param_9k *)(req->data);
873 tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
874
875 /* Build the cmd pkt. */
876 cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
877
878 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
879
880 cmd->param.sgl_off__opcode =
881 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_SET_PARAM);
882 cmd->param.request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
883 cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
884 cmd->param.param_count = TW_CL_SWAP16(1);
885
886 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
887 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
888 TW_CL_SWAP64(req->data_phys);
889 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
890 TW_CL_SWAP32(req->length);
891 cmd->param.size = 2 + 3;
892 } else {
893 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
894 TW_CL_SWAP32(req->data_phys);
895 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
896 TW_CL_SWAP32(req->length);
897 cmd->param.size = 2 + 2;
898 }
899
900 /* Specify which parameter we want to set. */
901 param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
902 param->parameter_id = (TW_UINT8)(param_id);
903 param->parameter_size_bytes = TW_CL_SWAP16(param_size);
904 tw_osl_memcpy(param->data, data, param_size);
905
906 /* Submit the command. */
907 if (callback == TW_CL_NULL) {
791 goto out;
792 }
793 tw_osl_memcpy(param_data, param->data, param_size);
794 ctlr->internal_req_busy = TW_CL_FALSE;
795 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
796 } else {
797 /* There's a call back. Simply submit the command. */
798 req->tw_cli_callback = callback;
799 if ((error = tw_cli_submit_cmd(req)))
800 goto out;
801 }
802 return(0);
803
804out:
805 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
806 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
807 0x1101, 0x1, TW_CL_SEVERITY_ERROR_STRING,
808 "get_param failed",
809 "error = %d", error);
810 if (param)
811 ctlr->internal_req_busy = TW_CL_FALSE;
812 if (req)
813 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
814 return(1);
815}
816
817
818
819/*
820 * Function name: tw_cli_set_param
821 * Description: Set a firmware parameter.
822 *
823 * Input: ctlr -- ptr to per ctlr structure
824 * table_id -- parameter table #
825 * param_id -- index of the parameter in the table
826 * param_size -- size of the parameter in bytes
827 * callback -- ptr to function, if any, to be called
828 * back on completion; TW_CL_NULL if no callback.
829 * Output: None
830 * Return value: 0 -- success
831 * non-zero-- failure
832 */
833TW_INT32
834tw_cli_set_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
835 TW_INT32 param_id, TW_INT32 param_size, TW_VOID *data,
836 TW_VOID (* callback)(struct tw_cli_req_context *req))
837{
838 struct tw_cli_req_context *req;
839 union tw_cl_command_7k *cmd;
840 struct tw_cl_param_9k *param = TW_CL_NULL;
841 TW_INT32 error = TW_OSL_EBUSY;
842
843 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
844
845 /* Get a request packet. */
846 if ((req = tw_cli_get_request(ctlr
847 )) == TW_CL_NULL)
848 goto out;
849
850 /* Make sure this is the only CL internal request at this time. */
851 if (ctlr->internal_req_busy) {
852 error = TW_OSL_EBUSY;
853 goto out;
854 }
855 ctlr->internal_req_busy = TW_CL_TRUE;
856 req->data = ctlr->internal_req_data;
857 req->data_phys = ctlr->internal_req_data_phys;
858 req->length = TW_CLI_SECTOR_SIZE;
859 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
860
861 /* Initialize memory to send data using. */
862 param = (struct tw_cl_param_9k *)(req->data);
863 tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
864
865 /* Build the cmd pkt. */
866 cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
867
868 req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
869
870 cmd->param.sgl_off__opcode =
871 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_SET_PARAM);
872 cmd->param.request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
873 cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
874 cmd->param.param_count = TW_CL_SWAP16(1);
875
876 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
877 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
878 TW_CL_SWAP64(req->data_phys);
879 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
880 TW_CL_SWAP32(req->length);
881 cmd->param.size = 2 + 3;
882 } else {
883 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
884 TW_CL_SWAP32(req->data_phys);
885 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
886 TW_CL_SWAP32(req->length);
887 cmd->param.size = 2 + 2;
888 }
889
890 /* Specify which parameter we want to set. */
891 param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
892 param->parameter_id = (TW_UINT8)(param_id);
893 param->parameter_size_bytes = TW_CL_SWAP16(param_size);
894 tw_osl_memcpy(param->data, data, param_size);
895
896 /* Submit the command. */
897 if (callback == TW_CL_NULL) {
908 /* There's no call back; wait till the command completes. */
898 /* There's no call back; wait till the command completes. */
909 error = tw_cli_submit_and_poll_request(req,
899 error = tw_cli_submit_and_poll_request(req,
910 TW_CLI_REQUEST_TIMEOUT_PERIOD);
911 if (error == TW_OSL_ETIMEDOUT)
912 /* Clean-up done by tw_cli_submit_and_poll_request. */
913 return(error);
900 TW_CLI_REQUEST_TIMEOUT_PERIOD);
914 if (error)
915 goto out;
916 if ((error = cmd->param.status)) {
901 if (error)
902 goto out;
903 if ((error = cmd->param.status)) {
904#if 0
917 tw_cli_create_ctlr_event(ctlr,
918 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
919 &(req->cmd_pkt->cmd_hdr));
905 tw_cli_create_ctlr_event(ctlr,
906 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
907 &(req->cmd_pkt->cmd_hdr));
908#endif // 0
920 goto out;
921 }
922 ctlr->internal_req_busy = TW_CL_FALSE;
923 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
924 } else {
925 /* There's a call back. Simply submit the command. */
926 req->tw_cli_callback = callback;
927 if ((error = tw_cli_submit_cmd(req)))
928 goto out;
929 }
930 return(error);
931
932out:
933 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
934 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
935 0x1102, 0x1, TW_CL_SEVERITY_ERROR_STRING,
936 "set_param failed",
937 "error = %d", error);
938 if (param)
939 ctlr->internal_req_busy = TW_CL_FALSE;
940 if (req)
941 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
942 return(error);
943}
944
945
946
947/*
948 * Function name: tw_cli_submit_and_poll_request
949 * Description: Sends down a firmware cmd, and waits for the completion
950 * in a tight loop.
951 *
952 * Input: req -- ptr to request pkt
953 * timeout -- max # of seconds to wait before giving up
954 * Output: None
955 * Return value: 0 -- success
956 * non-zero-- failure
957 */
958TW_INT32
959tw_cli_submit_and_poll_request(struct tw_cli_req_context *req,
960 TW_UINT32 timeout)
961{
962 struct tw_cli_ctlr_context *ctlr = req->ctlr;
963 TW_TIME end_time;
964 TW_INT32 error;
965
966 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
967
968 /*
969 * If the cmd queue is full, tw_cli_submit_cmd will queue this
970 * request in the pending queue, since this is an internal request.
971 */
972 if ((error = tw_cli_submit_cmd(req))) {
973 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
974 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
975 0x1103, 0x1, TW_CL_SEVERITY_ERROR_STRING,
976 "Failed to start internal request",
977 "error = %d", error);
978 return(error);
979 }
980
981 /*
982 * Poll for the response until the command gets completed, or there's
983 * a timeout.
984 */
985 end_time = tw_osl_get_local_time() + timeout;
986 do {
987 if ((error = req->error_code))
988 /*
989 * This will take care of completion due to a reset,
990 * or a failure in tw_cli_submit_pending_queue.
991 * The caller should do the clean-up.
992 */
993 return(error);
994
995 /* See if the command completed. */
996 tw_cli_process_resp_intr(ctlr);
997
998 if ((req->state != TW_CLI_REQ_STATE_BUSY) &&
999 (req->state != TW_CLI_REQ_STATE_PENDING))
1000 return(req->state != TW_CLI_REQ_STATE_COMPLETE);
1001 } while (tw_osl_get_local_time() <= end_time);
1002
1003 /* Time out! */
1004 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
1005 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1006 0x1104, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1007 "Internal request timed out",
1008 "request = %p", req);
1009
1010 /*
1011 * We will reset the controller only if the request has already been
1012 * submitted, so as to not lose the request packet. If a busy request
1013 * timed out, the reset will take care of freeing resources. If a
1014 * pending request timed out, we will free resources for that request,
1015 * right here, thereby avoiding a reset. So, the caller is expected
1016 * to NOT cleanup when TW_OSL_ETIMEDOUT is returned.
1017 */
1018
1019 /*
1020 * We have to make sure that this timed out request, if it were in the
1021 * pending queue, doesn't get submitted while we are here, from
1022 * tw_cli_submit_pending_queue. There could be a race in that case.
1023 * Need to revisit.
1024 */
909 goto out;
910 }
911 ctlr->internal_req_busy = TW_CL_FALSE;
912 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
913 } else {
914 /* There's a call back. Simply submit the command. */
915 req->tw_cli_callback = callback;
916 if ((error = tw_cli_submit_cmd(req)))
917 goto out;
918 }
919 return(error);
920
921out:
922 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
923 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
924 0x1102, 0x1, TW_CL_SEVERITY_ERROR_STRING,
925 "set_param failed",
926 "error = %d", error);
927 if (param)
928 ctlr->internal_req_busy = TW_CL_FALSE;
929 if (req)
930 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
931 return(error);
932}
933
934
935
936/*
937 * Function name: tw_cli_submit_and_poll_request
938 * Description: Sends down a firmware cmd, and waits for the completion
939 * in a tight loop.
940 *
941 * Input: req -- ptr to request pkt
942 * timeout -- max # of seconds to wait before giving up
943 * Output: None
944 * Return value: 0 -- success
945 * non-zero-- failure
946 */
947TW_INT32
948tw_cli_submit_and_poll_request(struct tw_cli_req_context *req,
949 TW_UINT32 timeout)
950{
951 struct tw_cli_ctlr_context *ctlr = req->ctlr;
952 TW_TIME end_time;
953 TW_INT32 error;
954
955 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
956
957 /*
958 * If the cmd queue is full, tw_cli_submit_cmd will queue this
959 * request in the pending queue, since this is an internal request.
960 */
961 if ((error = tw_cli_submit_cmd(req))) {
962 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
963 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
964 0x1103, 0x1, TW_CL_SEVERITY_ERROR_STRING,
965 "Failed to start internal request",
966 "error = %d", error);
967 return(error);
968 }
969
970 /*
971 * Poll for the response until the command gets completed, or there's
972 * a timeout.
973 */
974 end_time = tw_osl_get_local_time() + timeout;
975 do {
976 if ((error = req->error_code))
977 /*
978 * This will take care of completion due to a reset,
979 * or a failure in tw_cli_submit_pending_queue.
980 * The caller should do the clean-up.
981 */
982 return(error);
983
984 /* See if the command completed. */
985 tw_cli_process_resp_intr(ctlr);
986
987 if ((req->state != TW_CLI_REQ_STATE_BUSY) &&
988 (req->state != TW_CLI_REQ_STATE_PENDING))
989 return(req->state != TW_CLI_REQ_STATE_COMPLETE);
990 } while (tw_osl_get_local_time() <= end_time);
991
992 /* Time out! */
993 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
994 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
995 0x1104, 0x1, TW_CL_SEVERITY_ERROR_STRING,
996 "Internal request timed out",
997 "request = %p", req);
998
999 /*
1000 * We will reset the controller only if the request has already been
1001 * submitted, so as to not lose the request packet. If a busy request
1002 * timed out, the reset will take care of freeing resources. If a
1003 * pending request timed out, we will free resources for that request,
1004 * right here, thereby avoiding a reset. So, the caller is expected
1005 * to NOT cleanup when TW_OSL_ETIMEDOUT is returned.
1006 */
1007
1008 /*
1009 * We have to make sure that this timed out request, if it were in the
1010 * pending queue, doesn't get submitted while we are here, from
1011 * tw_cli_submit_pending_queue. There could be a race in that case.
1012 * Need to revisit.
1013 */
1025 if (req->state != TW_CLI_REQ_STATE_PENDING)
1026 tw_cl_reset_ctlr(ctlr->ctlr_handle);
1027 else {
1014 if (req->state == TW_CLI_REQ_STATE_PENDING) {
1028 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(),
1029 "Removing request from pending queue");
1030 /*
1031 * Request was never submitted. Clean up. Note that we did
1032 * not do a reset. So, we have to remove the request ourselves
1033 * from the pending queue (as against tw_cli_drain_pendinq_queue
1034 * taking care of it).
1035 */
1036 tw_cli_req_q_remove_item(req, TW_CLI_PENDING_Q);
1037 if ((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) == TW_CL_NULL)
1038 TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
1039 TWA_CONTROL_MASK_COMMAND_INTERRUPT);
1040 if (req->data)
1041 ctlr->internal_req_busy = TW_CL_FALSE;
1042 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1043 }
1044
1045 return(TW_OSL_ETIMEDOUT);
1046}
1047
1048
1049
1050/*
1051 * Function name: tw_cl_reset_ctlr
1052 * Description: Soft resets and then initializes the controller;
1053 * drains any incomplete requests.
1054 *
1055 * Input: ctlr -- ptr to per ctlr structure
1015 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(),
1016 "Removing request from pending queue");
1017 /*
1018 * Request was never submitted. Clean up. Note that we did
1019 * not do a reset. So, we have to remove the request ourselves
1020 * from the pending queue (as against tw_cli_drain_pendinq_queue
1021 * taking care of it).
1022 */
1023 tw_cli_req_q_remove_item(req, TW_CLI_PENDING_Q);
1024 if ((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) == TW_CL_NULL)
1025 TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
1026 TWA_CONTROL_MASK_COMMAND_INTERRUPT);
1027 if (req->data)
1028 ctlr->internal_req_busy = TW_CL_FALSE;
1029 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1030 }
1031
1032 return(TW_OSL_ETIMEDOUT);
1033}
1034
1035
1036
1037/*
1038 * Function name: tw_cl_reset_ctlr
1039 * Description: Soft resets and then initializes the controller;
1040 * drains any incomplete requests.
1041 *
1042 * Input: ctlr -- ptr to per ctlr structure
1043 * req_handle -- ptr to request handle
1056 * Output: None
1057 * Return value: 0 -- success
1058 * non-zero-- failure
1059 */
1060TW_INT32
1061tw_cl_reset_ctlr(struct tw_cl_ctlr_handle *ctlr_handle)
1062{
1063 struct tw_cli_ctlr_context *ctlr =
1064 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1065 struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt;
1044 * Output: None
1045 * Return value: 0 -- success
1046 * non-zero-- failure
1047 */
1048TW_INT32
1049tw_cl_reset_ctlr(struct tw_cl_ctlr_handle *ctlr_handle)
1050{
1051 struct tw_cli_ctlr_context *ctlr =
1052 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1053 struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt;
1054 struct tw_cli_req_context *req;
1066 TW_INT32 reset_attempt = 1;
1055 TW_INT32 reset_attempt = 1;
1067 TW_INT32 error;
1056 TW_INT32 error = 0;
1068
1069 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "entered");
1070
1071 ctlr->reset_in_progress = TW_CL_TRUE;
1057
1058 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "entered");
1059
1060 ctlr->reset_in_progress = TW_CL_TRUE;
1072 xpt_freeze_simq(sc->sim, 1);
1061 twa_teardown_intr(sc);
1073
1062
1074 tw_cli_disable_interrupts(ctlr);
1075
1076 /*
1077 * Error back all requests in the complete, busy, and pending queues.
1078 * If any request is already on its way to getting submitted, it's in
1079 * none of these queues and so, will not be completed. That request
1080 * will continue its course and get submitted to the controller after
1081 * the reset is done (and io_lock is released).
1082 */
1063
1064 /*
1065 * Error back all requests in the complete, busy, and pending queues.
1066 * If any request is already on its way to getting submitted, it's in
1067 * none of these queues and so, will not be completed. That request
1068 * will continue its course and get submitted to the controller after
1069 * the reset is done (and io_lock is released).
1070 */
1083 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
1084 "Draining all queues following reset");
1085 tw_cli_drain_complete_queue(ctlr);
1086 tw_cli_drain_busy_queue(ctlr);
1087 tw_cli_drain_pending_queue(ctlr);
1088 ctlr->internal_req_busy = TW_CL_FALSE;
1089 ctlr->get_more_aens = TW_CL_FALSE;
1090
1091 /* Soft reset the controller. */
1071 tw_cli_drain_complete_queue(ctlr);
1072 tw_cli_drain_busy_queue(ctlr);
1073 tw_cli_drain_pending_queue(ctlr);
1074 ctlr->internal_req_busy = TW_CL_FALSE;
1075 ctlr->get_more_aens = TW_CL_FALSE;
1076
1077 /* Soft reset the controller. */
1092try_reset:
1093 if ((error = tw_cli_soft_reset(ctlr))) {
1094 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1095 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1096 0x1105, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1097 "Controller reset failed",
1098 "error = %d; attempt %d", error, reset_attempt++);
1099 if (reset_attempt <= TW_CLI_MAX_RESET_ATTEMPTS)
1100 goto try_reset;
1101 else
1102 goto out;
1103 }
1078 while (reset_attempt <= TW_CLI_MAX_RESET_ATTEMPTS) {
1079 if ((error = tw_cli_soft_reset(ctlr))) {
1080 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1081 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1082 0x1105, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1083 "Controller reset failed",
1084 "error = %d; attempt %d", error, reset_attempt++);
1085 reset_attempt++;
1086 continue;
1087 }
1104
1088
1105 /* Re-establish logical connection with the controller. */
1106 if ((error = tw_cli_init_connection(ctlr,
1107 (TW_UINT16)(ctlr->max_simult_reqs),
1108 0, 0, 0, 0, 0, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL,
1109 TW_CL_NULL, TW_CL_NULL))) {
1110 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1089 /* Re-establish logical connection with the controller. */
1090 if ((error = tw_cli_init_connection(ctlr,
1091 (TW_UINT16)(ctlr->max_simult_reqs),
1092 0, 0, 0, 0, 0, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL,
1093 TW_CL_NULL, TW_CL_NULL))) {
1094 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1095 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1096 0x1106, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1097 "Can't initialize connection after reset",
1098 "error = %d", error);
1099 reset_attempt++;
1100 continue;
1101 }
1102
1103#ifdef TW_OSL_DEBUG
1104 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1111 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1105 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1112 0x1106, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1113 "Can't initialize connection after reset",
1114 "error = %d", error);
1115 goto out;
1106 0x1107, 0x3, TW_CL_SEVERITY_INFO_STRING,
1107 "Controller reset done!", " ");
1108#endif /* TW_OSL_DEBUG */
1109 break;
1110 } /* End of while */
1111
1112 /* Move commands from the reset queue to the pending queue. */
1113 while ((req = tw_cli_req_q_remove_head(ctlr, TW_CLI_RESET_Q)) != TW_CL_NULL) {
1114 tw_osl_timeout(req->req_handle);
1115 tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
1116 }
1117
1116 }
1117
1118 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1119 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1120 0x1107, 0x3, TW_CL_SEVERITY_INFO_STRING,
1121 "Controller reset done!",
1122 " ");
1123
1124out:
1118 twa_setup_intr(sc);
1119 tw_cli_enable_interrupts(ctlr);
1120 if ((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) != TW_CL_NULL)
1121 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
1122 TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
1125 ctlr->reset_in_progress = TW_CL_FALSE;
1123 ctlr->reset_in_progress = TW_CL_FALSE;
1126 xpt_release_simq(sc->sim, 1);
1124 ctlr->reset_needed = TW_CL_FALSE;
1127
1125
1128 /*
1129 * Enable interrupts, and also clear attention and response interrupts.
1130 */
1131 tw_cli_enable_interrupts(ctlr);
1132
1133 /* Request for a bus re-scan. */
1126 /* Request for a bus re-scan. */
1134 if (!error)
1135 tw_osl_scan_bus(ctlr_handle);
1127 tw_osl_scan_bus(ctlr_handle);
1128
1136 return(error);
1137}
1138
1129 return(error);
1130}
1131
1132TW_VOID
1133tw_cl_set_reset_needed(struct tw_cl_ctlr_handle *ctlr_handle)
1134{
1135 struct tw_cli_ctlr_context *ctlr =
1136 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1139
1137
1138 ctlr->reset_needed = TW_CL_TRUE;
1139}
1140
1140
1141TW_INT32
1142tw_cl_is_reset_needed(struct tw_cl_ctlr_handle *ctlr_handle)
1143{
1144 struct tw_cli_ctlr_context *ctlr =
1145 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1146
1147 return(ctlr->reset_needed);
1148}
1149
1150TW_INT32
1151tw_cl_is_active(struct tw_cl_ctlr_handle *ctlr_handle)
1152{
1153 struct tw_cli_ctlr_context *ctlr =
1154 (struct tw_cli_ctlr_context *)
1155 (ctlr_handle->cl_ctlr_ctxt);
1156
1157 return(ctlr->active);
1158}
1159
1160
1161
1141/*
1142 * Function name: tw_cli_soft_reset
1143 * Description: Does the actual soft reset.
1144 *
1145 * Input: ctlr -- ptr to per ctlr structure
1146 * Output: None
1147 * Return value: 0 -- success
1148 * non-zero-- failure
1149 */
1150TW_INT32
1151tw_cli_soft_reset(struct tw_cli_ctlr_context *ctlr)
1152{
1153 struct tw_cl_ctlr_handle *ctlr_handle = ctlr->ctlr_handle;
1162/*
1163 * Function name: tw_cli_soft_reset
1164 * Description: Does the actual soft reset.
1165 *
1166 * Input: ctlr -- ptr to per ctlr structure
1167 * Output: None
1168 * Return value: 0 -- success
1169 * non-zero-- failure
1170 */
1171TW_INT32
1172tw_cli_soft_reset(struct tw_cli_ctlr_context *ctlr)
1173{
1174 struct tw_cl_ctlr_handle *ctlr_handle = ctlr->ctlr_handle;
1154 TW_UINT32 status_reg;
1155 int found;
1156 int loop_count;
1157 TW_UINT32 error;
1158
1159 tw_cli_dbg_printf(1, ctlr_handle, tw_osl_cur_func(), "entered");
1160
1175 int found;
1176 int loop_count;
1177 TW_UINT32 error;
1178
1179 tw_cli_dbg_printf(1, ctlr_handle, tw_osl_cur_func(), "entered");
1180
1161 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1181 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1162 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1163 0x1108, 0x3, TW_CL_SEVERITY_INFO_STRING,
1164 "Resetting controller...",
1165 " ");
1166
1167 /* Don't let any new commands get submitted to the controller. */
1168 tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
1169
1170 TW_CLI_SOFT_RESET(ctlr_handle);
1171
1172 if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_X) ||
1173 (ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
1174 (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
1175 /*
1176 * There's a hardware bug in the G133 ASIC, which can lead to
1177 * PCI parity errors and hangs, if the host accesses any
1178 * registers when the firmware is resetting the hardware, as
1179 * part of a hard/soft reset. The window of time when the
1180 * problem can occur is about 10 ms. Here, we will handshake
1181 * with the firmware to find out when the firmware is pulling
1182 * down the hardware reset pin, and wait for about 500 ms to
1183 * make sure we don't access any hardware registers (for
1184 * polling) during that window.
1185 */
1186 ctlr->reset_phase1_in_progress = TW_CL_TRUE;
1187 loop_count = 0;
1188 do {
1189 found = (tw_cli_find_response(ctlr, TWA_RESET_PHASE1_NOTIFICATION_RESPONSE) == TW_OSL_ESUCCESS);
1190 tw_osl_delay(10);
1191 loop_count++;
1192 error = 0x7888;
1193 } while (!found && (loop_count < 6000000)); /* Loop for no more than 60 seconds */
1194
1195 if (!found) {
1182 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1183 0x1108, 0x3, TW_CL_SEVERITY_INFO_STRING,
1184 "Resetting controller...",
1185 " ");
1186
1187 /* Don't let any new commands get submitted to the controller. */
1188 tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
1189
1190 TW_CLI_SOFT_RESET(ctlr_handle);
1191
1192 if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_X) ||
1193 (ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
1194 (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
1195 /*
1196 * There's a hardware bug in the G133 ASIC, which can lead to
1197 * PCI parity errors and hangs, if the host accesses any
1198 * registers when the firmware is resetting the hardware, as
1199 * part of a hard/soft reset. The window of time when the
1200 * problem can occur is about 10 ms. Here, we will handshake
1201 * with the firmware to find out when the firmware is pulling
1202 * down the hardware reset pin, and wait for about 500 ms to
1203 * make sure we don't access any hardware registers (for
1204 * polling) during that window.
1205 */
1206 ctlr->reset_phase1_in_progress = TW_CL_TRUE;
1207 loop_count = 0;
1208 do {
1209 found = (tw_cli_find_response(ctlr, TWA_RESET_PHASE1_NOTIFICATION_RESPONSE) == TW_OSL_ESUCCESS);
1210 tw_osl_delay(10);
1211 loop_count++;
1212 error = 0x7888;
1213 } while (!found && (loop_count < 6000000)); /* Loop for no more than 60 seconds */
1214
1215 if (!found) {
1196 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1216 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1197 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1198 0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1199 "Missed firmware handshake after soft-reset",
1200 "error = %d", error);
1201 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1202 return(error);
1203 }
1204
1205 tw_osl_delay(TWA_RESET_PHASE1_WAIT_TIME_MS * 1000);
1206 ctlr->reset_phase1_in_progress = TW_CL_FALSE;
1207 }
1208
1209 if ((error = tw_cli_poll_status(ctlr,
1210 TWA_STATUS_MICROCONTROLLER_READY |
1211 TWA_STATUS_ATTENTION_INTERRUPT,
1212 TW_CLI_RESET_TIMEOUT_PERIOD))) {
1217 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1218 0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1219 "Missed firmware handshake after soft-reset",
1220 "error = %d", error);
1221 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1222 return(error);
1223 }
1224
1225 tw_osl_delay(TWA_RESET_PHASE1_WAIT_TIME_MS * 1000);
1226 ctlr->reset_phase1_in_progress = TW_CL_FALSE;
1227 }
1228
1229 if ((error = tw_cli_poll_status(ctlr,
1230 TWA_STATUS_MICROCONTROLLER_READY |
1231 TWA_STATUS_ATTENTION_INTERRUPT,
1232 TW_CLI_RESET_TIMEOUT_PERIOD))) {
1213 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1233 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1214 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1215 0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1216 "Micro-ctlr not ready/No attn intr after reset",
1217 "error = %d", error);
1218 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1219 return(error);
1220 }
1221
1222 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
1223 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
1224
1225 if ((error = tw_cli_drain_response_queue(ctlr))) {
1226 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1227 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1228 0x110A, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1229 "Can't drain response queue after reset",
1230 "error = %d", error);
1231 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1232 return(error);
1233 }
1234
1235 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1236
1237 if ((error = tw_cli_drain_aen_queue(ctlr))) {
1238 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1239 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1240 0x110B, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1241 "Can't drain AEN queue after reset",
1242 "error = %d", error);
1243 return(error);
1244 }
1245
1246 if ((error = tw_cli_find_aen(ctlr, TWA_AEN_SOFT_RESET))) {
1234 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1235 0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1236 "Micro-ctlr not ready/No attn intr after reset",
1237 "error = %d", error);
1238 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1239 return(error);
1240 }
1241
1242 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
1243 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
1244
1245 if ((error = tw_cli_drain_response_queue(ctlr))) {
1246 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1247 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1248 0x110A, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1249 "Can't drain response queue after reset",
1250 "error = %d", error);
1251 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1252 return(error);
1253 }
1254
1255 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1256
1257 if ((error = tw_cli_drain_aen_queue(ctlr))) {
1258 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1259 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1260 0x110B, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1261 "Can't drain AEN queue after reset",
1262 "error = %d", error);
1263 return(error);
1264 }
1265
1266 if ((error = tw_cli_find_aen(ctlr, TWA_AEN_SOFT_RESET))) {
1247 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1267 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1248 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1249 0x110C, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1250 "Reset not reported by controller",
1251 "error = %d", error);
1252 return(error);
1253 }
1268 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1269 0x110C, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1270 "Reset not reported by controller",
1271 "error = %d", error);
1272 return(error);
1273 }
1254
1255 status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
1256
1257 if ((error = TW_CLI_STATUS_ERRORS(status_reg)) ||
1258 (error = tw_cli_check_ctlr_state(ctlr, status_reg))) {
1259 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1260 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1261 0x110D, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1262 "Controller errors detected after reset",
1263 "error = %d", error);
1264 return(error);
1265 }
1266
1274
1267 return(TW_OSL_ESUCCESS);
1268}
1269
1270
1271
1272/*
1273 * Function name: tw_cli_send_scsi_cmd
1274 * Description: Sends down a scsi cmd to fw.
1275 *
1276 * Input: req -- ptr to request pkt
1277 * cmd -- opcode of scsi cmd to send
1278 * Output: None
1279 * Return value: 0 -- success
1280 * non-zero-- failure
1281 */
1282TW_INT32
1283tw_cli_send_scsi_cmd(struct tw_cli_req_context *req, TW_INT32 cmd)
1284{
1285 struct tw_cl_command_packet *cmdpkt;
1286 struct tw_cl_command_9k *cmd9k;
1287 struct tw_cli_ctlr_context *ctlr;
1288 TW_INT32 error;
1289
1290 ctlr = req->ctlr;
1291 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1292
1293 /* Make sure this is the only CL internal request at this time. */
1294 if (ctlr->internal_req_busy)
1295 return(TW_OSL_EBUSY);
1296 ctlr->internal_req_busy = TW_CL_TRUE;
1297 req->data = ctlr->internal_req_data;
1298 req->data_phys = ctlr->internal_req_data_phys;
1299 tw_osl_memzero(req->data, TW_CLI_SECTOR_SIZE);
1300 req->length = TW_CLI_SECTOR_SIZE;
1301
1302 /* Build the cmd pkt. */
1303 cmdpkt = req->cmd_pkt;
1304
1305 cmdpkt->cmd_hdr.header_desc.size_header = 128;
1306
1307 cmd9k = &(cmdpkt->command.cmd_pkt_9k);
1308
1309 cmd9k->res__opcode =
1310 BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
1311 cmd9k->unit = 0;
1312 cmd9k->lun_l4__req_id = TW_CL_SWAP16(req->request_id);
1313 cmd9k->status = 0;
1314 cmd9k->sgl_offset = 16; /* offset from end of hdr = max cdb len */
1315 cmd9k->lun_h4__sgl_entries = TW_CL_SWAP16(1);
1316
1317 if (req->ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1318 ((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].address =
1319 TW_CL_SWAP64(req->data_phys);
1320 ((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].length =
1321 TW_CL_SWAP32(req->length);
1322 } else {
1323 ((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].address =
1324 TW_CL_SWAP32(req->data_phys);
1325 ((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].length =
1326 TW_CL_SWAP32(req->length);
1327 }
1328
1329 cmd9k->cdb[0] = (TW_UINT8)cmd;
1330 cmd9k->cdb[4] = 128;
1331
1332 if ((error = tw_cli_submit_cmd(req)))
1333 if (error != TW_OSL_EBUSY) {
1334 tw_cli_dbg_printf(1, ctlr->ctlr_handle,
1335 tw_osl_cur_func(),
1336 "Failed to start SCSI command",
1337 "request = %p, error = %d", req, error);
1338 return(TW_OSL_EIO);
1339 }
1340 return(TW_OSL_ESUCCESS);
1341}
1342
1343
1344
1345/*
1346 * Function name: tw_cli_get_aen
1347 * Description: Sends down a Request Sense cmd to fw to fetch an AEN.
1348 *
1349 * Input: ctlr -- ptr to per ctlr structure
1350 * Output: None
1351 * Return value: 0 -- success
1352 * non-zero-- failure
1353 */
1354TW_INT32
1355tw_cli_get_aen(struct tw_cli_ctlr_context *ctlr)
1356{
1357 struct tw_cli_req_context *req;
1358 TW_INT32 error;
1359
1360 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1361
1362 if ((req = tw_cli_get_request(ctlr
1363 )) == TW_CL_NULL)
1364 return(TW_OSL_EBUSY);
1365
1366 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
1367 req->flags |= TW_CLI_REQ_FLAGS_9K;
1368 req->tw_cli_callback = tw_cli_aen_callback;
1369 if ((error = tw_cli_send_scsi_cmd(req, 0x03 /* REQUEST_SENSE */))) {
1370 tw_cli_dbg_printf(1, ctlr->ctlr_handle, tw_osl_cur_func(),
1371 "Could not send SCSI command",
1372 "request = %p, error = %d", req, error);
1373 if (req->data)
1374 ctlr->internal_req_busy = TW_CL_FALSE;
1375 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1376 }
1377 return(error);
1378}
1379
1380
1381
1382/*
1383 * Function name: tw_cli_fill_sg_list
1384 * Description: Fills in the scatter/gather list.
1385 *
1386 * Input: ctlr -- ptr to per ctlr structure
1387 * sgl_src -- ptr to fill the sg list from
1388 * sgl_dest-- ptr to sg list
1389 * nsegments--# of segments
1390 * Output: None
1391 * Return value: None
1392 */
1393TW_VOID
1394tw_cli_fill_sg_list(struct tw_cli_ctlr_context *ctlr, TW_VOID *sgl_src,
1395 TW_VOID *sgl_dest, TW_INT32 num_sgl_entries)
1396{
1397 TW_INT32 i;
1398
1399 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1400
1401 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1402 struct tw_cl_sg_desc64 *sgl_s =
1403 (struct tw_cl_sg_desc64 *)sgl_src;
1404 struct tw_cl_sg_desc64 *sgl_d =
1405 (struct tw_cl_sg_desc64 *)sgl_dest;
1406
1407 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1408 "64 bit addresses");
1409 for (i = 0; i < num_sgl_entries; i++) {
1410 sgl_d[i].address = TW_CL_SWAP64(sgl_s->address);
1411 sgl_d[i].length = TW_CL_SWAP32(sgl_s->length);
1412 sgl_s++;
1413 if (ctlr->flags & TW_CL_64BIT_SG_LENGTH)
1414 sgl_s = (struct tw_cl_sg_desc64 *)
1415 (((TW_INT8 *)(sgl_s)) + 4);
1416 }
1417 } else {
1418 struct tw_cl_sg_desc32 *sgl_s =
1419 (struct tw_cl_sg_desc32 *)sgl_src;
1420 struct tw_cl_sg_desc32 *sgl_d =
1421 (struct tw_cl_sg_desc32 *)sgl_dest;
1422
1423 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1424 "32 bit addresses");
1425 for (i = 0; i < num_sgl_entries; i++) {
1426 sgl_d[i].address = TW_CL_SWAP32(sgl_s[i].address);
1427 sgl_d[i].length = TW_CL_SWAP32(sgl_s[i].length);
1428 }
1429 }
1430}
1431
1275 return(TW_OSL_ESUCCESS);
1276}
1277
1278
1279
1280/*
1281 * Function name: tw_cli_send_scsi_cmd
1282 * Description: Sends down a scsi cmd to fw.
1283 *
1284 * Input: req -- ptr to request pkt
1285 * cmd -- opcode of scsi cmd to send
1286 * Output: None
1287 * Return value: 0 -- success
1288 * non-zero-- failure
1289 */
1290TW_INT32
1291tw_cli_send_scsi_cmd(struct tw_cli_req_context *req, TW_INT32 cmd)
1292{
1293 struct tw_cl_command_packet *cmdpkt;
1294 struct tw_cl_command_9k *cmd9k;
1295 struct tw_cli_ctlr_context *ctlr;
1296 TW_INT32 error;
1297
1298 ctlr = req->ctlr;
1299 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1300
1301 /* Make sure this is the only CL internal request at this time. */
1302 if (ctlr->internal_req_busy)
1303 return(TW_OSL_EBUSY);
1304 ctlr->internal_req_busy = TW_CL_TRUE;
1305 req->data = ctlr->internal_req_data;
1306 req->data_phys = ctlr->internal_req_data_phys;
1307 tw_osl_memzero(req->data, TW_CLI_SECTOR_SIZE);
1308 req->length = TW_CLI_SECTOR_SIZE;
1309
1310 /* Build the cmd pkt. */
1311 cmdpkt = req->cmd_pkt;
1312
1313 cmdpkt->cmd_hdr.header_desc.size_header = 128;
1314
1315 cmd9k = &(cmdpkt->command.cmd_pkt_9k);
1316
1317 cmd9k->res__opcode =
1318 BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
1319 cmd9k->unit = 0;
1320 cmd9k->lun_l4__req_id = TW_CL_SWAP16(req->request_id);
1321 cmd9k->status = 0;
1322 cmd9k->sgl_offset = 16; /* offset from end of hdr = max cdb len */
1323 cmd9k->lun_h4__sgl_entries = TW_CL_SWAP16(1);
1324
1325 if (req->ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1326 ((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].address =
1327 TW_CL_SWAP64(req->data_phys);
1328 ((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].length =
1329 TW_CL_SWAP32(req->length);
1330 } else {
1331 ((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].address =
1332 TW_CL_SWAP32(req->data_phys);
1333 ((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].length =
1334 TW_CL_SWAP32(req->length);
1335 }
1336
1337 cmd9k->cdb[0] = (TW_UINT8)cmd;
1338 cmd9k->cdb[4] = 128;
1339
1340 if ((error = tw_cli_submit_cmd(req)))
1341 if (error != TW_OSL_EBUSY) {
1342 tw_cli_dbg_printf(1, ctlr->ctlr_handle,
1343 tw_osl_cur_func(),
1344 "Failed to start SCSI command",
1345 "request = %p, error = %d", req, error);
1346 return(TW_OSL_EIO);
1347 }
1348 return(TW_OSL_ESUCCESS);
1349}
1350
1351
1352
1353/*
1354 * Function name: tw_cli_get_aen
1355 * Description: Sends down a Request Sense cmd to fw to fetch an AEN.
1356 *
1357 * Input: ctlr -- ptr to per ctlr structure
1358 * Output: None
1359 * Return value: 0 -- success
1360 * non-zero-- failure
1361 */
1362TW_INT32
1363tw_cli_get_aen(struct tw_cli_ctlr_context *ctlr)
1364{
1365 struct tw_cli_req_context *req;
1366 TW_INT32 error;
1367
1368 tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1369
1370 if ((req = tw_cli_get_request(ctlr
1371 )) == TW_CL_NULL)
1372 return(TW_OSL_EBUSY);
1373
1374 req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
1375 req->flags |= TW_CLI_REQ_FLAGS_9K;
1376 req->tw_cli_callback = tw_cli_aen_callback;
1377 if ((error = tw_cli_send_scsi_cmd(req, 0x03 /* REQUEST_SENSE */))) {
1378 tw_cli_dbg_printf(1, ctlr->ctlr_handle, tw_osl_cur_func(),
1379 "Could not send SCSI command",
1380 "request = %p, error = %d", req, error);
1381 if (req->data)
1382 ctlr->internal_req_busy = TW_CL_FALSE;
1383 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1384 }
1385 return(error);
1386}
1387
1388
1389
1390/*
1391 * Function name: tw_cli_fill_sg_list
1392 * Description: Fills in the scatter/gather list.
1393 *
1394 * Input: ctlr -- ptr to per ctlr structure
1395 * sgl_src -- ptr to fill the sg list from
1396 * sgl_dest-- ptr to sg list
1397 * nsegments--# of segments
1398 * Output: None
1399 * Return value: None
1400 */
1401TW_VOID
1402tw_cli_fill_sg_list(struct tw_cli_ctlr_context *ctlr, TW_VOID *sgl_src,
1403 TW_VOID *sgl_dest, TW_INT32 num_sgl_entries)
1404{
1405 TW_INT32 i;
1406
1407 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1408
1409 if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1410 struct tw_cl_sg_desc64 *sgl_s =
1411 (struct tw_cl_sg_desc64 *)sgl_src;
1412 struct tw_cl_sg_desc64 *sgl_d =
1413 (struct tw_cl_sg_desc64 *)sgl_dest;
1414
1415 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1416 "64 bit addresses");
1417 for (i = 0; i < num_sgl_entries; i++) {
1418 sgl_d[i].address = TW_CL_SWAP64(sgl_s->address);
1419 sgl_d[i].length = TW_CL_SWAP32(sgl_s->length);
1420 sgl_s++;
1421 if (ctlr->flags & TW_CL_64BIT_SG_LENGTH)
1422 sgl_s = (struct tw_cl_sg_desc64 *)
1423 (((TW_INT8 *)(sgl_s)) + 4);
1424 }
1425 } else {
1426 struct tw_cl_sg_desc32 *sgl_s =
1427 (struct tw_cl_sg_desc32 *)sgl_src;
1428 struct tw_cl_sg_desc32 *sgl_d =
1429 (struct tw_cl_sg_desc32 *)sgl_dest;
1430
1431 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1432 "32 bit addresses");
1433 for (i = 0; i < num_sgl_entries; i++) {
1434 sgl_d[i].address = TW_CL_SWAP32(sgl_s[i].address);
1435 sgl_d[i].length = TW_CL_SWAP32(sgl_s[i].length);
1436 }
1437 }
1438}
1439