Deleted Added
full compact
tw_osl_freebsd.c (169452) tw_osl_freebsd.c (172496)
1/*
2 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap.
4 * Copyright (c) 2000 Michael Smith
5 * Copyright (c) 2000 BSDi
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
1/*
2 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap.
4 * Copyright (c) 2000 Michael Smith
5 * Copyright (c) 2000 BSDi
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: head/sys/dev/twa/tw_osl_freebsd.c 169452 2007-05-10 15:36:58Z scottl $
29 * $FreeBSD: head/sys/dev/twa/tw_osl_freebsd.c 172496 2007-10-09 17:43:57Z scottl $
30 */
31
32/*
33 * AMCC'S 3ware driver for 9000 series storage controllers.
34 *
35 * Author: Vinod Kashyap
36 * Modifications by: Adam Radford
30 */
31
32/*
33 * AMCC'S 3ware driver for 9000 series storage controllers.
34 *
35 * Author: Vinod Kashyap
36 * Modifications by: Adam Radford
37 * Modifications by: Manjunath Ranganathaiah
37 */
38
39
40/*
41 * FreeBSD specific functions not related to CAM, and other
42 * miscellaneous functions.
43 */
44
45
46#include <dev/twa/tw_osl_includes.h>
47#include <dev/twa/tw_cl_fwif.h>
48#include <dev/twa/tw_cl_ioctl.h>
49#include <dev/twa/tw_osl_ioctl.h>
50
51#ifdef TW_OSL_DEBUG
52TW_INT32 TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG;
53TW_INT32 TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG;
54#endif /* TW_OSL_DEBUG */
55
56MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands");
57
58
59static d_open_t twa_open;
60static d_close_t twa_close;
61static d_ioctl_t twa_ioctl;
62
63static struct cdevsw twa_cdevsw = {
64 .d_version = D_VERSION,
65 .d_open = twa_open,
66 .d_close = twa_close,
67 .d_ioctl = twa_ioctl,
68 .d_name = "twa",
69};
70
71static devclass_t twa_devclass;
72
73
74/*
75 * Function name: twa_open
76 * Description: Called when the controller is opened.
77 * Simply marks the controller as open.
78 *
79 * Input: dev -- control device corresponding to the ctlr
80 * flags -- mode of open
81 * fmt -- device type (character/block etc.)
82 * proc -- current process
83 * Output: None
84 * Return value: 0 -- success
85 * non-zero-- failure
86 */
87static TW_INT32
88twa_open(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, d_thread_t *proc)
89{
90 TW_INT32 unit = minor(dev);
91 struct twa_softc *sc = devclass_get_softc(twa_devclass, unit);
92
93 tw_osli_dbg_dprintf(5, sc, "entered");
94 sc->state |= TW_OSLI_CTLR_STATE_OPEN;
95 return(0);
96}
97
98
99
100/*
101 * Function name: twa_close
102 * Description: Called when the controller is closed.
103 * Simply marks the controller as not open.
104 *
105 * Input: dev -- control device corresponding to the ctlr
106 * flags -- mode of corresponding open
107 * fmt -- device type (character/block etc.)
108 * proc -- current process
109 * Output: None
110 * Return value: 0 -- success
111 * non-zero-- failure
112 */
113static TW_INT32
114twa_close(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, d_thread_t *proc)
115{
116 TW_INT32 unit = minor(dev);
117 struct twa_softc *sc = devclass_get_softc(twa_devclass, unit);
118
119 tw_osli_dbg_dprintf(5, sc, "entered");
120 sc->state &= ~TW_OSLI_CTLR_STATE_OPEN;
121 return(0);
122}
123
124
125
126/*
127 * Function name: twa_ioctl
128 * Description: Called when an ioctl is posted to the controller.
129 * Handles any OS Layer specific cmds, passes the rest
130 * on to the Common Layer.
131 *
132 * Input: dev -- control device corresponding to the ctlr
133 * cmd -- ioctl cmd
134 * buf -- ptr to buffer in kernel memory, which is
135 * a copy of the input buffer in user-space
136 * flags -- mode of corresponding open
137 * proc -- current process
138 * Output: buf -- ptr to buffer in kernel memory, which will
139 * be copied to the output buffer in user-space
140 * Return value: 0 -- success
141 * non-zero-- failure
142 */
143static TW_INT32
144twa_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, TW_INT32 flags, d_thread_t *proc)
145{
146 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
147 TW_INT32 error;
148
149 tw_osli_dbg_dprintf(5, sc, "entered");
150
151 switch (cmd) {
152 case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
153 tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru");
154 error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf);
155 break;
156
157 case TW_OSL_IOCTL_SCAN_BUS:
158 /* Request CAM for a bus scan. */
159 tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus");
160 error = tw_osli_request_bus_scan(sc);
161 break;
162
163 default:
164 tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd);
165 error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf);
166 break;
167 }
168 return(error);
169}
170
171
172
173static TW_INT32 twa_probe(device_t dev);
174static TW_INT32 twa_attach(device_t dev);
175static TW_INT32 twa_detach(device_t dev);
176static TW_INT32 twa_shutdown(device_t dev);
177static TW_VOID twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op);
178#ifdef TW_OSLI_DEFERRED_INTR_USED
179static int twa_pci_intr_fast(TW_VOID *arg);
180static TW_VOID twa_deferred_intr(TW_VOID *context, TW_INT32 pending);
181#else
182static TW_VOID twa_pci_intr(TW_VOID *arg);
183#endif /* TW_OSLI_DEFERRED_INTR_USED */
184
185static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc);
186static TW_VOID tw_osli_free_resources(struct twa_softc *sc);
187
188static TW_VOID twa_map_load_data_callback(TW_VOID *arg,
189 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
190static TW_VOID twa_map_load_callback(TW_VOID *arg,
191 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
192
193
194static device_method_t twa_methods[] = {
195 /* Device interface */
196 DEVMETHOD(device_probe, twa_probe),
197 DEVMETHOD(device_attach, twa_attach),
198 DEVMETHOD(device_detach, twa_detach),
199 DEVMETHOD(device_shutdown, twa_shutdown),
200
201 DEVMETHOD(bus_print_child, bus_generic_print_child),
202 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
203 {0, 0}
204};
205
206static driver_t twa_pci_driver = {
207 "twa",
208 twa_methods,
209 sizeof(struct twa_softc)
210};
211
212DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0);
213MODULE_DEPEND(twa, cam, 1, 1, 1);
214MODULE_DEPEND(twa, pci, 1, 1, 1);
215
216
217/*
218 * Function name: twa_probe
219 * Description: Called at driver load time. Claims 9000 ctlrs.
220 *
221 * Input: dev -- bus device corresponding to the ctlr
222 * Output: None
223 * Return value: <= 0 -- success
224 * > 0 -- failure
225 */
226static TW_INT32
227twa_probe(device_t dev)
228{
229 static TW_UINT8 first_ctlr = 1;
230
231 tw_osli_dbg_printf(3, "entered");
232
233 if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) {
234 device_set_desc(dev, TW_OSLI_DEVICE_NAME);
235 /* Print the driver version only once. */
236 if (first_ctlr) {
237 printf("3ware device driver for 9000 series storage "
238 "controllers, version: %s\n",
239 TW_OSL_DRIVER_VERSION_STRING);
240 first_ctlr = 0;
241 }
242 return(0);
243 }
244 return(ENXIO);
245}
246
247
248
249/*
250 * Function name: twa_attach
251 * Description: Allocates pci resources; updates sc; adds a node to the
252 * sysctl tree to expose the driver version; makes calls
253 * (to the Common Layer) to initialize ctlr, and to
254 * attach to CAM.
255 *
256 * Input: dev -- bus device corresponding to the ctlr
257 * Output: None
258 * Return value: 0 -- success
259 * non-zero-- failure
260 */
261static TW_INT32
262twa_attach(device_t dev)
263{
264 struct twa_softc *sc = device_get_softc(dev);
265 TW_UINT32 command;
266 TW_INT32 bar_num;
267 TW_INT32 bar0_offset;
268 TW_INT32 bar_size;
269 TW_INT32 error;
270
271 tw_osli_dbg_dprintf(3, sc, "entered");
272
273 sc->ctlr_handle.osl_ctlr_ctxt = sc;
274
275 /* Initialize the softc structure. */
276 sc->bus_dev = dev;
277 sc->device_id = pci_get_device(dev);
278
279 /* Initialize the mutexes right here. */
280 sc->io_lock = &(sc->io_lock_handle);
281 mtx_init(sc->io_lock, "tw_osl_io_lock", NULL, MTX_SPIN);
282 sc->q_lock = &(sc->q_lock_handle);
283 mtx_init(sc->q_lock, "tw_osl_q_lock", NULL, MTX_SPIN);
38 */
39
40
41/*
42 * FreeBSD specific functions not related to CAM, and other
43 * miscellaneous functions.
44 */
45
46
47#include <dev/twa/tw_osl_includes.h>
48#include <dev/twa/tw_cl_fwif.h>
49#include <dev/twa/tw_cl_ioctl.h>
50#include <dev/twa/tw_osl_ioctl.h>
51
52#ifdef TW_OSL_DEBUG
53TW_INT32 TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG;
54TW_INT32 TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG;
55#endif /* TW_OSL_DEBUG */
56
57MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands");
58
59
60static d_open_t twa_open;
61static d_close_t twa_close;
62static d_ioctl_t twa_ioctl;
63
64static struct cdevsw twa_cdevsw = {
65 .d_version = D_VERSION,
66 .d_open = twa_open,
67 .d_close = twa_close,
68 .d_ioctl = twa_ioctl,
69 .d_name = "twa",
70};
71
72static devclass_t twa_devclass;
73
74
75/*
76 * Function name: twa_open
77 * Description: Called when the controller is opened.
78 * Simply marks the controller as open.
79 *
80 * Input: dev -- control device corresponding to the ctlr
81 * flags -- mode of open
82 * fmt -- device type (character/block etc.)
83 * proc -- current process
84 * Output: None
85 * Return value: 0 -- success
86 * non-zero-- failure
87 */
88static TW_INT32
89twa_open(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, d_thread_t *proc)
90{
91 TW_INT32 unit = minor(dev);
92 struct twa_softc *sc = devclass_get_softc(twa_devclass, unit);
93
94 tw_osli_dbg_dprintf(5, sc, "entered");
95 sc->state |= TW_OSLI_CTLR_STATE_OPEN;
96 return(0);
97}
98
99
100
101/*
102 * Function name: twa_close
103 * Description: Called when the controller is closed.
104 * Simply marks the controller as not open.
105 *
106 * Input: dev -- control device corresponding to the ctlr
107 * flags -- mode of corresponding open
108 * fmt -- device type (character/block etc.)
109 * proc -- current process
110 * Output: None
111 * Return value: 0 -- success
112 * non-zero-- failure
113 */
114static TW_INT32
115twa_close(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, d_thread_t *proc)
116{
117 TW_INT32 unit = minor(dev);
118 struct twa_softc *sc = devclass_get_softc(twa_devclass, unit);
119
120 tw_osli_dbg_dprintf(5, sc, "entered");
121 sc->state &= ~TW_OSLI_CTLR_STATE_OPEN;
122 return(0);
123}
124
125
126
127/*
128 * Function name: twa_ioctl
129 * Description: Called when an ioctl is posted to the controller.
130 * Handles any OS Layer specific cmds, passes the rest
131 * on to the Common Layer.
132 *
133 * Input: dev -- control device corresponding to the ctlr
134 * cmd -- ioctl cmd
135 * buf -- ptr to buffer in kernel memory, which is
136 * a copy of the input buffer in user-space
137 * flags -- mode of corresponding open
138 * proc -- current process
139 * Output: buf -- ptr to buffer in kernel memory, which will
140 * be copied to the output buffer in user-space
141 * Return value: 0 -- success
142 * non-zero-- failure
143 */
144static TW_INT32
145twa_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, TW_INT32 flags, d_thread_t *proc)
146{
147 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
148 TW_INT32 error;
149
150 tw_osli_dbg_dprintf(5, sc, "entered");
151
152 switch (cmd) {
153 case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
154 tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru");
155 error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf);
156 break;
157
158 case TW_OSL_IOCTL_SCAN_BUS:
159 /* Request CAM for a bus scan. */
160 tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus");
161 error = tw_osli_request_bus_scan(sc);
162 break;
163
164 default:
165 tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd);
166 error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf);
167 break;
168 }
169 return(error);
170}
171
172
173
174static TW_INT32 twa_probe(device_t dev);
175static TW_INT32 twa_attach(device_t dev);
176static TW_INT32 twa_detach(device_t dev);
177static TW_INT32 twa_shutdown(device_t dev);
178static TW_VOID twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op);
179#ifdef TW_OSLI_DEFERRED_INTR_USED
180static int twa_pci_intr_fast(TW_VOID *arg);
181static TW_VOID twa_deferred_intr(TW_VOID *context, TW_INT32 pending);
182#else
183static TW_VOID twa_pci_intr(TW_VOID *arg);
184#endif /* TW_OSLI_DEFERRED_INTR_USED */
185
186static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc);
187static TW_VOID tw_osli_free_resources(struct twa_softc *sc);
188
189static TW_VOID twa_map_load_data_callback(TW_VOID *arg,
190 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
191static TW_VOID twa_map_load_callback(TW_VOID *arg,
192 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
193
194
195static device_method_t twa_methods[] = {
196 /* Device interface */
197 DEVMETHOD(device_probe, twa_probe),
198 DEVMETHOD(device_attach, twa_attach),
199 DEVMETHOD(device_detach, twa_detach),
200 DEVMETHOD(device_shutdown, twa_shutdown),
201
202 DEVMETHOD(bus_print_child, bus_generic_print_child),
203 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
204 {0, 0}
205};
206
207static driver_t twa_pci_driver = {
208 "twa",
209 twa_methods,
210 sizeof(struct twa_softc)
211};
212
213DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0);
214MODULE_DEPEND(twa, cam, 1, 1, 1);
215MODULE_DEPEND(twa, pci, 1, 1, 1);
216
217
218/*
219 * Function name: twa_probe
220 * Description: Called at driver load time. Claims 9000 ctlrs.
221 *
222 * Input: dev -- bus device corresponding to the ctlr
223 * Output: None
224 * Return value: <= 0 -- success
225 * > 0 -- failure
226 */
227static TW_INT32
228twa_probe(device_t dev)
229{
230 static TW_UINT8 first_ctlr = 1;
231
232 tw_osli_dbg_printf(3, "entered");
233
234 if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) {
235 device_set_desc(dev, TW_OSLI_DEVICE_NAME);
236 /* Print the driver version only once. */
237 if (first_ctlr) {
238 printf("3ware device driver for 9000 series storage "
239 "controllers, version: %s\n",
240 TW_OSL_DRIVER_VERSION_STRING);
241 first_ctlr = 0;
242 }
243 return(0);
244 }
245 return(ENXIO);
246}
247
248
249
250/*
251 * Function name: twa_attach
252 * Description: Allocates pci resources; updates sc; adds a node to the
253 * sysctl tree to expose the driver version; makes calls
254 * (to the Common Layer) to initialize ctlr, and to
255 * attach to CAM.
256 *
257 * Input: dev -- bus device corresponding to the ctlr
258 * Output: None
259 * Return value: 0 -- success
260 * non-zero-- failure
261 */
262static TW_INT32
263twa_attach(device_t dev)
264{
265 struct twa_softc *sc = device_get_softc(dev);
266 TW_UINT32 command;
267 TW_INT32 bar_num;
268 TW_INT32 bar0_offset;
269 TW_INT32 bar_size;
270 TW_INT32 error;
271
272 tw_osli_dbg_dprintf(3, sc, "entered");
273
274 sc->ctlr_handle.osl_ctlr_ctxt = sc;
275
276 /* Initialize the softc structure. */
277 sc->bus_dev = dev;
278 sc->device_id = pci_get_device(dev);
279
280 /* Initialize the mutexes right here. */
281 sc->io_lock = &(sc->io_lock_handle);
282 mtx_init(sc->io_lock, "tw_osl_io_lock", NULL, MTX_SPIN);
283 sc->q_lock = &(sc->q_lock_handle);
284 mtx_init(sc->q_lock, "tw_osl_q_lock", NULL, MTX_SPIN);
285 sc->sim_lock = &(sc->sim_lock_handle);
286 mtx_init(sc->sim_lock, "tw_osl_sim_lock", NULL, MTX_DEF | MTX_RECURSE);
284
285 sysctl_ctx_init(&sc->sysctl_ctxt);
286 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt,
287 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
288 device_get_nameunit(dev), CTLFLAG_RD, 0, "");
289 if (sc->sysctl_tree == NULL) {
290 tw_osli_printf(sc, "error = %d",
291 TW_CL_SEVERITY_ERROR_STRING,
292 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
293 0x2000,
294 "Cannot add sysctl tree node",
295 ENXIO);
296 return(ENXIO);
297 }
298 SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree),
299 OID_AUTO, "driver_version", CTLFLAG_RD,
300 TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version");
301
302 /* Make sure we are going to be able to talk to this board. */
303 command = pci_read_config(dev, PCIR_COMMAND, 2);
304 if ((command & PCIM_CMD_PORTEN) == 0) {
305 tw_osli_printf(sc, "error = %d",
306 TW_CL_SEVERITY_ERROR_STRING,
307 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
308 0x2001,
309 "Register window not available",
310 ENXIO);
311 tw_osli_free_resources(sc);
312 return(ENXIO);
313 }
314
315 /* Force the busmaster enable bit on, in case the BIOS forgot. */
316 command |= PCIM_CMD_BUSMASTEREN;
317 pci_write_config(dev, PCIR_COMMAND, command, 2);
318
319 /* Allocate the PCI register window. */
320 if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM,
321 &bar_num, &bar0_offset, &bar_size))) {
322 tw_osli_printf(sc, "error = %d",
323 TW_CL_SEVERITY_ERROR_STRING,
324 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
325 0x201F,
326 "Can't get PCI BAR info",
327 error);
328 tw_osli_free_resources(sc);
329 return(error);
330 }
331 sc->reg_res_id = PCIR_BARS + bar0_offset;
332 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
333 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
334 == NULL) {
335 tw_osli_printf(sc, "error = %d",
336 TW_CL_SEVERITY_ERROR_STRING,
337 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
338 0x2002,
339 "Can't allocate register window",
340 ENXIO);
341 tw_osli_free_resources(sc);
342 return(ENXIO);
343 }
344 sc->bus_tag = rman_get_bustag(sc->reg_res);
345 sc->bus_handle = rman_get_bushandle(sc->reg_res);
346
347 /* Allocate and register our interrupt. */
348 sc->irq_res_id = 0;
349 if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ,
350 &(sc->irq_res_id), 0, ~0, 1,
351 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
352 tw_osli_printf(sc, "error = %d",
353 TW_CL_SEVERITY_ERROR_STRING,
354 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
355 0x2003,
356 "Can't allocate interrupt",
357 ENXIO);
358 tw_osli_free_resources(sc);
359 return(ENXIO);
360 }
361 if ((error = bus_setup_intr(sc->bus_dev, sc->irq_res,
287
288 sysctl_ctx_init(&sc->sysctl_ctxt);
289 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt,
290 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
291 device_get_nameunit(dev), CTLFLAG_RD, 0, "");
292 if (sc->sysctl_tree == NULL) {
293 tw_osli_printf(sc, "error = %d",
294 TW_CL_SEVERITY_ERROR_STRING,
295 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
296 0x2000,
297 "Cannot add sysctl tree node",
298 ENXIO);
299 return(ENXIO);
300 }
301 SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree),
302 OID_AUTO, "driver_version", CTLFLAG_RD,
303 TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version");
304
305 /* Make sure we are going to be able to talk to this board. */
306 command = pci_read_config(dev, PCIR_COMMAND, 2);
307 if ((command & PCIM_CMD_PORTEN) == 0) {
308 tw_osli_printf(sc, "error = %d",
309 TW_CL_SEVERITY_ERROR_STRING,
310 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
311 0x2001,
312 "Register window not available",
313 ENXIO);
314 tw_osli_free_resources(sc);
315 return(ENXIO);
316 }
317
318 /* Force the busmaster enable bit on, in case the BIOS forgot. */
319 command |= PCIM_CMD_BUSMASTEREN;
320 pci_write_config(dev, PCIR_COMMAND, command, 2);
321
322 /* Allocate the PCI register window. */
323 if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM,
324 &bar_num, &bar0_offset, &bar_size))) {
325 tw_osli_printf(sc, "error = %d",
326 TW_CL_SEVERITY_ERROR_STRING,
327 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
328 0x201F,
329 "Can't get PCI BAR info",
330 error);
331 tw_osli_free_resources(sc);
332 return(error);
333 }
334 sc->reg_res_id = PCIR_BARS + bar0_offset;
335 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
336 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
337 == NULL) {
338 tw_osli_printf(sc, "error = %d",
339 TW_CL_SEVERITY_ERROR_STRING,
340 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
341 0x2002,
342 "Can't allocate register window",
343 ENXIO);
344 tw_osli_free_resources(sc);
345 return(ENXIO);
346 }
347 sc->bus_tag = rman_get_bustag(sc->reg_res);
348 sc->bus_handle = rman_get_bushandle(sc->reg_res);
349
350 /* Allocate and register our interrupt. */
351 sc->irq_res_id = 0;
352 if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ,
353 &(sc->irq_res_id), 0, ~0, 1,
354 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
355 tw_osli_printf(sc, "error = %d",
356 TW_CL_SEVERITY_ERROR_STRING,
357 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
358 0x2003,
359 "Can't allocate interrupt",
360 ENXIO);
361 tw_osli_free_resources(sc);
362 return(ENXIO);
363 }
364 if ((error = bus_setup_intr(sc->bus_dev, sc->irq_res,
362 INTR_TYPE_CAM,
363#ifdef TW_OSLI_DEFERRED_INTR_USED
365#ifdef TW_OSLI_DEFERRED_INTR_USED
366 INTR_TYPE_CAM | INTR_FAST,
364 twa_pci_intr_fast, NULL,
365#else
367 twa_pci_intr_fast, NULL,
368#else
369 INTR_TYPE_CAM | INTR_MPSAFE,
366 NULL, twa_pci_intr,
367#endif
368 sc, &sc->intr_handle))) {
369 tw_osli_printf(sc, "error = %d",
370 TW_CL_SEVERITY_ERROR_STRING,
371 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
372 0x2004,
373 "Can't set up interrupt",
374 error);
375 tw_osli_free_resources(sc);
376 return(error);
377 }
378
379#ifdef TW_OSLI_DEFERRED_INTR_USED
380 TASK_INIT(&sc->deferred_intr_callback, 0, twa_deferred_intr, sc);
381#endif /* TW_OSLI_DEFERRED_INTR_USED */
382
383 if ((error = tw_osli_alloc_mem(sc))) {
384 tw_osli_printf(sc, "error = %d",
385 TW_CL_SEVERITY_ERROR_STRING,
386 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
387 0x2005,
388 "Memory allocation failure",
389 error);
390 tw_osli_free_resources(sc);
391 return(error);
392 }
393
394 /* Initialize the Common Layer for this controller. */
395 if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id,
396 TW_OSLI_MAX_NUM_IOS, TW_OSLI_MAX_NUM_AENS,
397 sc->non_dma_mem, sc->dma_mem,
398 sc->dma_mem_phys
399 ))) {
400 tw_osli_printf(sc, "error = %d",
401 TW_CL_SEVERITY_ERROR_STRING,
402 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
403 0x2006,
404 "Failed to initialize Common Layer/controller",
405 error);
406 tw_osli_free_resources(sc);
407 return(error);
408 }
409
410 /* Create the control device. */
411 sc->ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->bus_dev),
412 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
413 "twa%d", device_get_unit(sc->bus_dev));
414 sc->ctrl_dev->si_drv1 = sc;
415
416 if ((error = tw_osli_cam_attach(sc))) {
417 tw_osli_free_resources(sc);
418 tw_osli_printf(sc, "error = %d",
419 TW_CL_SEVERITY_ERROR_STRING,
420 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
421 0x2007,
422 "Failed to initialize CAM",
423 error);
424 return(error);
425 }
426
427 return(0);
428}
429
430
431
432/*
433 * Function name: tw_osli_alloc_mem
434 * Description: Allocates memory needed both by CL and OSL.
435 *
436 * Input: sc -- OSL internal controller context
437 * Output: None
438 * Return value: 0 -- success
439 * non-zero-- failure
440 */
441static TW_INT32
442tw_osli_alloc_mem(struct twa_softc *sc)
443{
444 struct tw_osli_req_context *req;
445 TW_UINT32 max_sg_elements;
446 TW_UINT32 non_dma_mem_size;
447 TW_UINT32 dma_mem_size;
448 TW_INT32 error;
449 TW_INT32 i;
450
451 tw_osli_dbg_dprintf(3, sc, "entered");
452
453 sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0;
454 sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0;
455#ifdef TW_OSLI_DEFERRED_INTR_USED
456 sc->flags |= TW_CL_DEFERRED_INTR_USED;
457#endif /* TW_OSLI_DEFERRED_INTR_USED */
458
459 max_sg_elements = (sizeof(bus_addr_t) == 8) ?
460 TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS;
461
462 if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags,
463 sc->device_id, TW_OSLI_MAX_NUM_IOS, TW_OSLI_MAX_NUM_AENS,
464 &(sc->alignment), &(sc->sg_size_factor),
465 &non_dma_mem_size, &dma_mem_size
466 ))) {
467 tw_osli_printf(sc, "error = %d",
468 TW_CL_SEVERITY_ERROR_STRING,
469 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
470 0x2008,
471 "Can't get Common Layer's memory requirements",
472 error);
473 return(error);
474 }
475
476 if ((sc->non_dma_mem = malloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS,
477 M_WAITOK)) == NULL) {
478 tw_osli_printf(sc, "error = %d",
479 TW_CL_SEVERITY_ERROR_STRING,
480 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
481 0x2009,
482 "Can't allocate non-dma memory",
483 ENOMEM);
484 return(ENOMEM);
485 }
486
487 /* Create the parent dma tag. */
488 if (bus_dma_tag_create(NULL, /* parent */
489 sc->alignment, /* alignment */
490 0, /* boundary */
491 BUS_SPACE_MAXADDR, /* lowaddr */
492 BUS_SPACE_MAXADDR, /* highaddr */
493 NULL, NULL, /* filter, filterarg */
494 TW_CL_MAX_IO_SIZE, /* maxsize */
495 max_sg_elements, /* nsegments */
496 TW_CL_MAX_IO_SIZE, /* maxsegsize */
497 0, /* flags */
498 NULL, /* lockfunc */
499 NULL, /* lockfuncarg */
500 &sc->parent_tag /* tag */)) {
501 tw_osli_printf(sc, "error = %d",
502 TW_CL_SEVERITY_ERROR_STRING,
503 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
504 0x200A,
505 "Can't allocate parent DMA tag",
506 ENOMEM);
507 return(ENOMEM);
508 }
509
510 /* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */
511 if (bus_dma_tag_create(sc->parent_tag, /* parent */
512 sc->alignment, /* alignment */
513 0, /* boundary */
514 BUS_SPACE_MAXADDR, /* lowaddr */
515 BUS_SPACE_MAXADDR, /* highaddr */
516 NULL, NULL, /* filter, filterarg */
517 dma_mem_size, /* maxsize */
518 1, /* nsegments */
519 BUS_SPACE_MAXSIZE, /* maxsegsize */
520 0, /* flags */
521 NULL, /* lockfunc */
522 NULL, /* lockfuncarg */
523 &sc->cmd_tag /* tag */)) {
524 tw_osli_printf(sc, "error = %d",
525 TW_CL_SEVERITY_ERROR_STRING,
526 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
527 0x200B,
528 "Can't allocate DMA tag for Common Layer's "
529 "DMA'able memory",
530 ENOMEM);
531 return(ENOMEM);
532 }
533
534 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
535 BUS_DMA_NOWAIT, &sc->cmd_map)) {
536 /* Try a second time. */
537 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
538 BUS_DMA_NOWAIT, &sc->cmd_map)) {
539 tw_osli_printf(sc, "error = %d",
540 TW_CL_SEVERITY_ERROR_STRING,
541 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
542 0x200C,
543 "Can't allocate DMA'able memory for the"
544 "Common Layer",
545 ENOMEM);
546 return(ENOMEM);
547 }
548 }
549
550 bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
551 dma_mem_size, twa_map_load_callback,
552 &sc->dma_mem_phys, 0);
553
554 /*
555 * Create a dma tag for data buffers; size will be the maximum
556 * possible I/O size (128kB).
557 */
558 if (bus_dma_tag_create(sc->parent_tag, /* parent */
559 sc->alignment, /* alignment */
560 0, /* boundary */
561 BUS_SPACE_MAXADDR, /* lowaddr */
562 BUS_SPACE_MAXADDR, /* highaddr */
563 NULL, NULL, /* filter, filterarg */
564 TW_CL_MAX_IO_SIZE, /* maxsize */
565 max_sg_elements, /* nsegments */
566 TW_CL_MAX_IO_SIZE, /* maxsegsize */
567 BUS_DMA_ALLOCNOW, /* flags */
568 twa_busdma_lock, /* lockfunc */
569 sc->io_lock, /* lockfuncarg */
570 &sc->dma_tag /* tag */)) {
571 tw_osli_printf(sc, "error = %d",
572 TW_CL_SEVERITY_ERROR_STRING,
573 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
574 0x200F,
575 "Can't allocate DMA tag for data buffers",
576 ENOMEM);
577 return(ENOMEM);
578 }
579
580 /*
581 * Create a dma tag for ioctl data buffers; size will be the maximum
582 * possible I/O size (128kB).
583 */
584 if (bus_dma_tag_create(sc->parent_tag, /* parent */
585 sc->alignment, /* alignment */
586 0, /* boundary */
587 BUS_SPACE_MAXADDR, /* lowaddr */
588 BUS_SPACE_MAXADDR, /* highaddr */
589 NULL, NULL, /* filter, filterarg */
590 TW_CL_MAX_IO_SIZE, /* maxsize */
591 max_sg_elements, /* nsegments */
592 TW_CL_MAX_IO_SIZE, /* maxsegsize */
593 BUS_DMA_ALLOCNOW, /* flags */
594 twa_busdma_lock, /* lockfunc */
595 sc->io_lock, /* lockfuncarg */
596 &sc->ioctl_tag /* tag */)) {
597 tw_osli_printf(sc, "error = %d",
598 TW_CL_SEVERITY_ERROR_STRING,
599 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
600 0x2010,
601 "Can't allocate DMA tag for ioctl data buffers",
602 ENOMEM);
603 return(ENOMEM);
604 }
605
606 /* Create just one map for all ioctl request data buffers. */
607 if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) {
608 tw_osli_printf(sc, "error = %d",
609 TW_CL_SEVERITY_ERROR_STRING,
610 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
611 0x2011,
612 "Can't create ioctl map",
613 ENOMEM);
614 return(ENOMEM);
615 }
616
617
618 /* Initialize request queues. */
619 tw_osli_req_q_init(sc, TW_OSLI_FREE_Q);
620 tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q);
621
622 if ((sc->req_ctxt_buf = (struct tw_osli_req_context *)
623 malloc((sizeof(struct tw_osli_req_context) *
624 TW_OSLI_MAX_NUM_IOS),
625 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
626 tw_osli_printf(sc, "error = %d",
627 TW_CL_SEVERITY_ERROR_STRING,
628 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
629 0x2012,
630 "Failed to allocate request packets",
631 ENOMEM);
632 return(ENOMEM);
633 }
634 bzero(sc->req_ctxt_buf,
635 sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_IOS);
636
637 for (i = 0; i < TW_OSLI_MAX_NUM_IOS; i++) {
638 req = &(sc->req_ctxt_buf[i]);
639 req->ctlr = sc;
640 if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) {
641 tw_osli_printf(sc, "request # = %d, error = %d",
642 TW_CL_SEVERITY_ERROR_STRING,
643 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
644 0x2013,
645 "Can't create dma map",
646 i, ENOMEM);
647 return(ENOMEM);
648 }
649
650 /* Insert request into the free queue. */
651 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
652 }
653
654 return(0);
655}
656
657
658
659/*
660 * Function name: tw_osli_free_resources
661 * Description: Performs clean-up at the time of going down.
662 *
663 * Input: sc -- ptr to OSL internal ctlr context
664 * Output: None
665 * Return value: None
666 */
667static TW_VOID
668tw_osli_free_resources(struct twa_softc *sc)
669{
670 struct tw_osli_req_context *req;
671 TW_INT32 error = 0;
672
673 tw_osli_dbg_dprintf(3, sc, "entered");
674
675 /* Detach from CAM */
676 tw_osli_cam_detach(sc);
677
678 if (sc->req_ctxt_buf)
679 while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) !=
680 NULL)
681 if ((error = bus_dmamap_destroy(sc->dma_tag,
682 req->dma_map)))
683 tw_osli_dbg_dprintf(1, sc,
684 "dmamap_destroy(dma) returned %d",
685 error);
686
687 if ((sc->ioctl_tag) && (sc->ioctl_map))
688 if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map)))
689 tw_osli_dbg_dprintf(1, sc,
690 "dmamap_destroy(ioctl) returned %d", error);
691
692 /* Free all memory allocated so far. */
693 if (sc->req_ctxt_buf)
694 free(sc->req_ctxt_buf, TW_OSLI_MALLOC_CLASS);
695
696 if (sc->non_dma_mem)
697 free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS);
698
699 if (sc->dma_mem) {
700 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
701 bus_dmamem_free(sc->cmd_tag, sc->dma_mem,
702 sc->cmd_map);
703 }
704 if (sc->cmd_tag)
705 if ((error = bus_dma_tag_destroy(sc->cmd_tag)))
706 tw_osli_dbg_dprintf(1, sc,
707 "dma_tag_destroy(cmd) returned %d", error);
708
709 if (sc->dma_tag)
710 if ((error = bus_dma_tag_destroy(sc->dma_tag)))
711 tw_osli_dbg_dprintf(1, sc,
712 "dma_tag_destroy(dma) returned %d", error);
713
714 if (sc->ioctl_tag)
715 if ((error = bus_dma_tag_destroy(sc->ioctl_tag)))
716 tw_osli_dbg_dprintf(1, sc,
717 "dma_tag_destroy(ioctl) returned %d", error);
718
719 if (sc->parent_tag)
720 if ((error = bus_dma_tag_destroy(sc->parent_tag)))
721 tw_osli_dbg_dprintf(1, sc,
722 "dma_tag_destroy(parent) returned %d", error);
723
724
725 /* Disconnect the interrupt handler. */
726 if (sc->intr_handle)
727 if ((error = bus_teardown_intr(sc->bus_dev,
728 sc->irq_res, sc->intr_handle)))
729 tw_osli_dbg_dprintf(1, sc,
730 "teardown_intr returned %d", error);
731
732 if (sc->irq_res != NULL)
733 if ((error = bus_release_resource(sc->bus_dev,
734 SYS_RES_IRQ, sc->irq_res_id, sc->irq_res)))
735 tw_osli_dbg_dprintf(1, sc,
736 "release_resource(irq) returned %d", error);
737
738
739 /* Release the register window mapping. */
740 if (sc->reg_res != NULL)
741 if ((error = bus_release_resource(sc->bus_dev,
742 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)))
743 tw_osli_dbg_dprintf(1, sc,
744 "release_resource(io) returned %d", error);
745
746
747 /* Destroy the control device. */
748 if (sc->ctrl_dev != (struct cdev *)NULL)
749 destroy_dev(sc->ctrl_dev);
750
751 if ((error = sysctl_ctx_free(&sc->sysctl_ctxt)))
752 tw_osli_dbg_dprintf(1, sc,
753 "sysctl_ctx_free returned %d", error);
754
755}
756
757
758
759/*
760 * Function name: twa_detach
761 * Description: Called when the controller is being detached from
762 * the pci bus.
763 *
764 * Input: dev -- bus device corresponding to the ctlr
765 * Output: None
766 * Return value: 0 -- success
767 * non-zero-- failure
768 */
769static TW_INT32
770twa_detach(device_t dev)
771{
772 struct twa_softc *sc = device_get_softc(dev);
773 TW_INT32 error;
774
775 tw_osli_dbg_dprintf(3, sc, "entered");
776
777 error = EBUSY;
778 if (sc->state & TW_OSLI_CTLR_STATE_OPEN) {
779 tw_osli_printf(sc, "error = %d",
780 TW_CL_SEVERITY_ERROR_STRING,
781 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
782 0x2014,
783 "Device open",
784 error);
785 goto out;
786 }
787
788 /* Shut the controller down. */
789 if ((error = twa_shutdown(dev)))
790 goto out;
791
792 /* Free all resources associated with this controller. */
793 tw_osli_free_resources(sc);
794 error = 0;
795
796out:
797 return(error);
798}
799
800
801
802/*
803 * Function name: twa_shutdown
804 * Description: Called at unload/shutdown time. Lets the controller
805 * know that we are going down.
806 *
807 * Input: dev -- bus device corresponding to the ctlr
808 * Output: None
809 * Return value: 0 -- success
810 * non-zero-- failure
811 */
812static TW_INT32
813twa_shutdown(device_t dev)
814{
815 struct twa_softc *sc = device_get_softc(dev);
816 TW_INT32 error = 0;
817
818 tw_osli_dbg_dprintf(3, sc, "entered");
819
820 /* Disconnect from the controller. */
821 if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) {
822 tw_osli_printf(sc, "error = %d",
823 TW_CL_SEVERITY_ERROR_STRING,
824 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
825 0x2015,
826 "Failed to shutdown Common Layer/controller",
827 error);
828 }
829 return(error);
830}
831
832
833
834/*
835 * Function name: twa_busdma_lock
836 * Description: Function to provide synchronization during busdma_swi.
837 *
838 * Input: lock_arg -- lock mutex sent as argument
839 * op -- operation (lock/unlock) expected of the function
840 * Output: None
841 * Return value: None
842 */
843TW_VOID
844twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op)
845{
846 struct mtx *lock;
847
848 lock = (struct mtx *)lock_arg;
849 switch (op) {
850 case BUS_DMA_LOCK:
851 mtx_lock_spin(lock);
852 break;
853
854 case BUS_DMA_UNLOCK:
855 mtx_unlock_spin(lock);
856 break;
857
858 default:
859 panic("Unknown operation 0x%x for twa_busdma_lock!", op);
860 }
861}
862
863
864#ifdef TW_OSLI_DEFERRED_INTR_USED
865/*
866 * Function name: twa_pci_intr_fast
867 * Description: Interrupt handler. Wrapper for twa_interrupt.
868 *
869 * Input: arg -- ptr to OSL internal ctlr context
870 * Output: FILTER_HANDLED or FILTER_STRAY
871 * Return value: None
872 */
873static int
874twa_pci_intr_fast(TW_VOID *arg)
875{
876 struct twa_softc *sc = (struct twa_softc *)arg;
877
878 tw_osli_dbg_dprintf(10, sc, "entered");
879 if (tw_cl_interrupt(&(sc->ctlr_handle))) {
370 NULL, twa_pci_intr,
371#endif
372 sc, &sc->intr_handle))) {
373 tw_osli_printf(sc, "error = %d",
374 TW_CL_SEVERITY_ERROR_STRING,
375 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
376 0x2004,
377 "Can't set up interrupt",
378 error);
379 tw_osli_free_resources(sc);
380 return(error);
381 }
382
383#ifdef TW_OSLI_DEFERRED_INTR_USED
384 TASK_INIT(&sc->deferred_intr_callback, 0, twa_deferred_intr, sc);
385#endif /* TW_OSLI_DEFERRED_INTR_USED */
386
387 if ((error = tw_osli_alloc_mem(sc))) {
388 tw_osli_printf(sc, "error = %d",
389 TW_CL_SEVERITY_ERROR_STRING,
390 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
391 0x2005,
392 "Memory allocation failure",
393 error);
394 tw_osli_free_resources(sc);
395 return(error);
396 }
397
398 /* Initialize the Common Layer for this controller. */
399 if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id,
400 TW_OSLI_MAX_NUM_IOS, TW_OSLI_MAX_NUM_AENS,
401 sc->non_dma_mem, sc->dma_mem,
402 sc->dma_mem_phys
403 ))) {
404 tw_osli_printf(sc, "error = %d",
405 TW_CL_SEVERITY_ERROR_STRING,
406 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
407 0x2006,
408 "Failed to initialize Common Layer/controller",
409 error);
410 tw_osli_free_resources(sc);
411 return(error);
412 }
413
414 /* Create the control device. */
415 sc->ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->bus_dev),
416 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
417 "twa%d", device_get_unit(sc->bus_dev));
418 sc->ctrl_dev->si_drv1 = sc;
419
420 if ((error = tw_osli_cam_attach(sc))) {
421 tw_osli_free_resources(sc);
422 tw_osli_printf(sc, "error = %d",
423 TW_CL_SEVERITY_ERROR_STRING,
424 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
425 0x2007,
426 "Failed to initialize CAM",
427 error);
428 return(error);
429 }
430
431 return(0);
432}
433
434
435
436/*
437 * Function name: tw_osli_alloc_mem
438 * Description: Allocates memory needed both by CL and OSL.
439 *
440 * Input: sc -- OSL internal controller context
441 * Output: None
442 * Return value: 0 -- success
443 * non-zero-- failure
444 */
445static TW_INT32
446tw_osli_alloc_mem(struct twa_softc *sc)
447{
448 struct tw_osli_req_context *req;
449 TW_UINT32 max_sg_elements;
450 TW_UINT32 non_dma_mem_size;
451 TW_UINT32 dma_mem_size;
452 TW_INT32 error;
453 TW_INT32 i;
454
455 tw_osli_dbg_dprintf(3, sc, "entered");
456
457 sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0;
458 sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0;
459#ifdef TW_OSLI_DEFERRED_INTR_USED
460 sc->flags |= TW_CL_DEFERRED_INTR_USED;
461#endif /* TW_OSLI_DEFERRED_INTR_USED */
462
463 max_sg_elements = (sizeof(bus_addr_t) == 8) ?
464 TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS;
465
466 if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags,
467 sc->device_id, TW_OSLI_MAX_NUM_IOS, TW_OSLI_MAX_NUM_AENS,
468 &(sc->alignment), &(sc->sg_size_factor),
469 &non_dma_mem_size, &dma_mem_size
470 ))) {
471 tw_osli_printf(sc, "error = %d",
472 TW_CL_SEVERITY_ERROR_STRING,
473 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
474 0x2008,
475 "Can't get Common Layer's memory requirements",
476 error);
477 return(error);
478 }
479
480 if ((sc->non_dma_mem = malloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS,
481 M_WAITOK)) == NULL) {
482 tw_osli_printf(sc, "error = %d",
483 TW_CL_SEVERITY_ERROR_STRING,
484 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
485 0x2009,
486 "Can't allocate non-dma memory",
487 ENOMEM);
488 return(ENOMEM);
489 }
490
491 /* Create the parent dma tag. */
492 if (bus_dma_tag_create(NULL, /* parent */
493 sc->alignment, /* alignment */
494 0, /* boundary */
495 BUS_SPACE_MAXADDR, /* lowaddr */
496 BUS_SPACE_MAXADDR, /* highaddr */
497 NULL, NULL, /* filter, filterarg */
498 TW_CL_MAX_IO_SIZE, /* maxsize */
499 max_sg_elements, /* nsegments */
500 TW_CL_MAX_IO_SIZE, /* maxsegsize */
501 0, /* flags */
502 NULL, /* lockfunc */
503 NULL, /* lockfuncarg */
504 &sc->parent_tag /* tag */)) {
505 tw_osli_printf(sc, "error = %d",
506 TW_CL_SEVERITY_ERROR_STRING,
507 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
508 0x200A,
509 "Can't allocate parent DMA tag",
510 ENOMEM);
511 return(ENOMEM);
512 }
513
514 /* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */
515 if (bus_dma_tag_create(sc->parent_tag, /* parent */
516 sc->alignment, /* alignment */
517 0, /* boundary */
518 BUS_SPACE_MAXADDR, /* lowaddr */
519 BUS_SPACE_MAXADDR, /* highaddr */
520 NULL, NULL, /* filter, filterarg */
521 dma_mem_size, /* maxsize */
522 1, /* nsegments */
523 BUS_SPACE_MAXSIZE, /* maxsegsize */
524 0, /* flags */
525 NULL, /* lockfunc */
526 NULL, /* lockfuncarg */
527 &sc->cmd_tag /* tag */)) {
528 tw_osli_printf(sc, "error = %d",
529 TW_CL_SEVERITY_ERROR_STRING,
530 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
531 0x200B,
532 "Can't allocate DMA tag for Common Layer's "
533 "DMA'able memory",
534 ENOMEM);
535 return(ENOMEM);
536 }
537
538 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
539 BUS_DMA_NOWAIT, &sc->cmd_map)) {
540 /* Try a second time. */
541 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
542 BUS_DMA_NOWAIT, &sc->cmd_map)) {
543 tw_osli_printf(sc, "error = %d",
544 TW_CL_SEVERITY_ERROR_STRING,
545 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
546 0x200C,
547 "Can't allocate DMA'able memory for the"
548 "Common Layer",
549 ENOMEM);
550 return(ENOMEM);
551 }
552 }
553
554 bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
555 dma_mem_size, twa_map_load_callback,
556 &sc->dma_mem_phys, 0);
557
558 /*
559 * Create a dma tag for data buffers; size will be the maximum
560 * possible I/O size (128kB).
561 */
562 if (bus_dma_tag_create(sc->parent_tag, /* parent */
563 sc->alignment, /* alignment */
564 0, /* boundary */
565 BUS_SPACE_MAXADDR, /* lowaddr */
566 BUS_SPACE_MAXADDR, /* highaddr */
567 NULL, NULL, /* filter, filterarg */
568 TW_CL_MAX_IO_SIZE, /* maxsize */
569 max_sg_elements, /* nsegments */
570 TW_CL_MAX_IO_SIZE, /* maxsegsize */
571 BUS_DMA_ALLOCNOW, /* flags */
572 twa_busdma_lock, /* lockfunc */
573 sc->io_lock, /* lockfuncarg */
574 &sc->dma_tag /* tag */)) {
575 tw_osli_printf(sc, "error = %d",
576 TW_CL_SEVERITY_ERROR_STRING,
577 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
578 0x200F,
579 "Can't allocate DMA tag for data buffers",
580 ENOMEM);
581 return(ENOMEM);
582 }
583
584 /*
585 * Create a dma tag for ioctl data buffers; size will be the maximum
586 * possible I/O size (128kB).
587 */
588 if (bus_dma_tag_create(sc->parent_tag, /* parent */
589 sc->alignment, /* alignment */
590 0, /* boundary */
591 BUS_SPACE_MAXADDR, /* lowaddr */
592 BUS_SPACE_MAXADDR, /* highaddr */
593 NULL, NULL, /* filter, filterarg */
594 TW_CL_MAX_IO_SIZE, /* maxsize */
595 max_sg_elements, /* nsegments */
596 TW_CL_MAX_IO_SIZE, /* maxsegsize */
597 BUS_DMA_ALLOCNOW, /* flags */
598 twa_busdma_lock, /* lockfunc */
599 sc->io_lock, /* lockfuncarg */
600 &sc->ioctl_tag /* tag */)) {
601 tw_osli_printf(sc, "error = %d",
602 TW_CL_SEVERITY_ERROR_STRING,
603 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
604 0x2010,
605 "Can't allocate DMA tag for ioctl data buffers",
606 ENOMEM);
607 return(ENOMEM);
608 }
609
610 /* Create just one map for all ioctl request data buffers. */
611 if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) {
612 tw_osli_printf(sc, "error = %d",
613 TW_CL_SEVERITY_ERROR_STRING,
614 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
615 0x2011,
616 "Can't create ioctl map",
617 ENOMEM);
618 return(ENOMEM);
619 }
620
621
622 /* Initialize request queues. */
623 tw_osli_req_q_init(sc, TW_OSLI_FREE_Q);
624 tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q);
625
626 if ((sc->req_ctxt_buf = (struct tw_osli_req_context *)
627 malloc((sizeof(struct tw_osli_req_context) *
628 TW_OSLI_MAX_NUM_IOS),
629 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
630 tw_osli_printf(sc, "error = %d",
631 TW_CL_SEVERITY_ERROR_STRING,
632 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
633 0x2012,
634 "Failed to allocate request packets",
635 ENOMEM);
636 return(ENOMEM);
637 }
638 bzero(sc->req_ctxt_buf,
639 sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_IOS);
640
641 for (i = 0; i < TW_OSLI_MAX_NUM_IOS; i++) {
642 req = &(sc->req_ctxt_buf[i]);
643 req->ctlr = sc;
644 if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) {
645 tw_osli_printf(sc, "request # = %d, error = %d",
646 TW_CL_SEVERITY_ERROR_STRING,
647 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
648 0x2013,
649 "Can't create dma map",
650 i, ENOMEM);
651 return(ENOMEM);
652 }
653
654 /* Insert request into the free queue. */
655 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
656 }
657
658 return(0);
659}
660
661
662
663/*
664 * Function name: tw_osli_free_resources
665 * Description: Performs clean-up at the time of going down.
666 *
667 * Input: sc -- ptr to OSL internal ctlr context
668 * Output: None
669 * Return value: None
670 */
671static TW_VOID
672tw_osli_free_resources(struct twa_softc *sc)
673{
674 struct tw_osli_req_context *req;
675 TW_INT32 error = 0;
676
677 tw_osli_dbg_dprintf(3, sc, "entered");
678
679 /* Detach from CAM */
680 tw_osli_cam_detach(sc);
681
682 if (sc->req_ctxt_buf)
683 while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) !=
684 NULL)
685 if ((error = bus_dmamap_destroy(sc->dma_tag,
686 req->dma_map)))
687 tw_osli_dbg_dprintf(1, sc,
688 "dmamap_destroy(dma) returned %d",
689 error);
690
691 if ((sc->ioctl_tag) && (sc->ioctl_map))
692 if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map)))
693 tw_osli_dbg_dprintf(1, sc,
694 "dmamap_destroy(ioctl) returned %d", error);
695
696 /* Free all memory allocated so far. */
697 if (sc->req_ctxt_buf)
698 free(sc->req_ctxt_buf, TW_OSLI_MALLOC_CLASS);
699
700 if (sc->non_dma_mem)
701 free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS);
702
703 if (sc->dma_mem) {
704 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
705 bus_dmamem_free(sc->cmd_tag, sc->dma_mem,
706 sc->cmd_map);
707 }
708 if (sc->cmd_tag)
709 if ((error = bus_dma_tag_destroy(sc->cmd_tag)))
710 tw_osli_dbg_dprintf(1, sc,
711 "dma_tag_destroy(cmd) returned %d", error);
712
713 if (sc->dma_tag)
714 if ((error = bus_dma_tag_destroy(sc->dma_tag)))
715 tw_osli_dbg_dprintf(1, sc,
716 "dma_tag_destroy(dma) returned %d", error);
717
718 if (sc->ioctl_tag)
719 if ((error = bus_dma_tag_destroy(sc->ioctl_tag)))
720 tw_osli_dbg_dprintf(1, sc,
721 "dma_tag_destroy(ioctl) returned %d", error);
722
723 if (sc->parent_tag)
724 if ((error = bus_dma_tag_destroy(sc->parent_tag)))
725 tw_osli_dbg_dprintf(1, sc,
726 "dma_tag_destroy(parent) returned %d", error);
727
728
729 /* Disconnect the interrupt handler. */
730 if (sc->intr_handle)
731 if ((error = bus_teardown_intr(sc->bus_dev,
732 sc->irq_res, sc->intr_handle)))
733 tw_osli_dbg_dprintf(1, sc,
734 "teardown_intr returned %d", error);
735
736 if (sc->irq_res != NULL)
737 if ((error = bus_release_resource(sc->bus_dev,
738 SYS_RES_IRQ, sc->irq_res_id, sc->irq_res)))
739 tw_osli_dbg_dprintf(1, sc,
740 "release_resource(irq) returned %d", error);
741
742
743 /* Release the register window mapping. */
744 if (sc->reg_res != NULL)
745 if ((error = bus_release_resource(sc->bus_dev,
746 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)))
747 tw_osli_dbg_dprintf(1, sc,
748 "release_resource(io) returned %d", error);
749
750
751 /* Destroy the control device. */
752 if (sc->ctrl_dev != (struct cdev *)NULL)
753 destroy_dev(sc->ctrl_dev);
754
755 if ((error = sysctl_ctx_free(&sc->sysctl_ctxt)))
756 tw_osli_dbg_dprintf(1, sc,
757 "sysctl_ctx_free returned %d", error);
758
759}
760
761
762
763/*
764 * Function name: twa_detach
765 * Description: Called when the controller is being detached from
766 * the pci bus.
767 *
768 * Input: dev -- bus device corresponding to the ctlr
769 * Output: None
770 * Return value: 0 -- success
771 * non-zero-- failure
772 */
773static TW_INT32
774twa_detach(device_t dev)
775{
776 struct twa_softc *sc = device_get_softc(dev);
777 TW_INT32 error;
778
779 tw_osli_dbg_dprintf(3, sc, "entered");
780
781 error = EBUSY;
782 if (sc->state & TW_OSLI_CTLR_STATE_OPEN) {
783 tw_osli_printf(sc, "error = %d",
784 TW_CL_SEVERITY_ERROR_STRING,
785 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
786 0x2014,
787 "Device open",
788 error);
789 goto out;
790 }
791
792 /* Shut the controller down. */
793 if ((error = twa_shutdown(dev)))
794 goto out;
795
796 /* Free all resources associated with this controller. */
797 tw_osli_free_resources(sc);
798 error = 0;
799
800out:
801 return(error);
802}
803
804
805
806/*
807 * Function name: twa_shutdown
808 * Description: Called at unload/shutdown time. Lets the controller
809 * know that we are going down.
810 *
811 * Input: dev -- bus device corresponding to the ctlr
812 * Output: None
813 * Return value: 0 -- success
814 * non-zero-- failure
815 */
816static TW_INT32
817twa_shutdown(device_t dev)
818{
819 struct twa_softc *sc = device_get_softc(dev);
820 TW_INT32 error = 0;
821
822 tw_osli_dbg_dprintf(3, sc, "entered");
823
824 /* Disconnect from the controller. */
825 if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) {
826 tw_osli_printf(sc, "error = %d",
827 TW_CL_SEVERITY_ERROR_STRING,
828 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
829 0x2015,
830 "Failed to shutdown Common Layer/controller",
831 error);
832 }
833 return(error);
834}
835
836
837
838/*
839 * Function name: twa_busdma_lock
840 * Description: Function to provide synchronization during busdma_swi.
841 *
842 * Input: lock_arg -- lock mutex sent as argument
843 * op -- operation (lock/unlock) expected of the function
844 * Output: None
845 * Return value: None
846 */
847TW_VOID
848twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op)
849{
850 struct mtx *lock;
851
852 lock = (struct mtx *)lock_arg;
853 switch (op) {
854 case BUS_DMA_LOCK:
855 mtx_lock_spin(lock);
856 break;
857
858 case BUS_DMA_UNLOCK:
859 mtx_unlock_spin(lock);
860 break;
861
862 default:
863 panic("Unknown operation 0x%x for twa_busdma_lock!", op);
864 }
865}
866
867
868#ifdef TW_OSLI_DEFERRED_INTR_USED
869/*
870 * Function name: twa_pci_intr_fast
871 * Description: Interrupt handler. Wrapper for twa_interrupt.
872 *
873 * Input: arg -- ptr to OSL internal ctlr context
874 * Output: FILTER_HANDLED or FILTER_STRAY
875 * Return value: None
876 */
877static int
878twa_pci_intr_fast(TW_VOID *arg)
879{
880 struct twa_softc *sc = (struct twa_softc *)arg;
881
882 tw_osli_dbg_dprintf(10, sc, "entered");
883 if (tw_cl_interrupt(&(sc->ctlr_handle))) {
880 tw_cl_deferred_interrupt(&(sc->ctlr_handle));
884 taskqueue_enqueue_fast(taskqueue_fast,
885 &(sc->deferred_intr_callback));
881 return(FILTER_HANDLED);
882 }
883 return(FILTER_STRAY);
884}
885#else
886/*
887 * Function name: twa_pci_intr
888 * Description: Interrupt handler. Wrapper for twa_interrupt.
889 *
890 * Input: arg -- ptr to OSL internal ctlr context
891 * Output: None
892 * Return value: None
893 */
894static TW_VOID
895twa_pci_intr(TW_VOID *arg)
896{
897 struct twa_softc *sc = (struct twa_softc *)arg;
898
899 tw_osli_dbg_dprintf(10, sc, "entered");
900 if (tw_cl_interrupt(&(sc->ctlr_handle)))
901 tw_cl_deferred_interrupt(&(sc->ctlr_handle));
902}
903#endif
904
905#ifdef TW_OSLI_DEFERRED_INTR_USED
906
907/*
908 * Function name: twa_deferred_intr
909 * Description: Deferred interrupt handler.
910 *
911 * Input: context -- ptr to OSL internal ctlr context
912 * pending -- not used
913 * Output: None
914 * Return value: None
915 */
916static TW_VOID
917twa_deferred_intr(TW_VOID *context, TW_INT32 pending)
918{
919 struct twa_softc *sc = (struct twa_softc *)context;
920
921 tw_osli_dbg_dprintf(10, sc, "entered");
922
923 tw_cl_deferred_interrupt(&(sc->ctlr_handle));
924}
925
926#endif /* TW_OSLI_DEFERRED_INTR_USED */
927
928
929
930/*
931 * Function name: tw_osli_fw_passthru
932 * Description: Builds a fw passthru cmd pkt, and submits it to CL.
933 *
934 * Input: sc -- ptr to OSL internal ctlr context
935 * buf -- ptr to ioctl pkt understood by CL
936 * Output: None
937 * Return value: 0 -- success
938 * non-zero-- failure
939 */
940TW_INT32
941tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf)
942{
943 struct tw_osli_req_context *req;
944 struct tw_osli_ioctl_no_data_buf *user_buf =
945 (struct tw_osli_ioctl_no_data_buf *)buf;
946 TW_TIME end_time;
947 TW_UINT32 timeout = 60;
948 TW_UINT32 data_buf_size_adjusted;
949 struct tw_cl_req_packet *req_pkt;
950 struct tw_cl_passthru_req_packet *pt_req;
951 TW_INT32 error;
952
953 tw_osli_dbg_dprintf(5, sc, "ioctl: passthru");
954
955 if ((req = tw_osli_get_request(sc)) == NULL)
956 return(EBUSY);
957
958 req->req_handle.osl_req_ctxt = req;
959 req->orig_req = buf;
960 req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU;
961
962 req_pkt = &(req->req_pkt);
963 req_pkt->status = 0;
964 req_pkt->tw_osl_callback = tw_osl_complete_passthru;
965 /* Let the Common Layer retry the request on cmd queue full. */
966 req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY;
967
968 pt_req = &(req_pkt->gen_req_pkt.pt_req);
969 /*
970 * Make sure that the data buffer sent to firmware is a
971 * 512 byte multiple in size.
972 */
973 data_buf_size_adjusted =
974 (user_buf->driver_pkt.buffer_length +
975 (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1);
976 if ((req->length = data_buf_size_adjusted)) {
977 if ((req->data = malloc(data_buf_size_adjusted,
978 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
979 error = ENOMEM;
980 tw_osli_printf(sc, "error = %d",
981 TW_CL_SEVERITY_ERROR_STRING,
982 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
983 0x2016,
984 "Could not alloc mem for "
985 "fw_passthru data_buf",
986 error);
987 goto fw_passthru_err;
988 }
989 /* Copy the payload. */
990 if ((error = copyin((TW_VOID *)(user_buf->pdata),
991 req->data,
992 user_buf->driver_pkt.buffer_length)) != 0) {
993 tw_osli_printf(sc, "error = %d",
994 TW_CL_SEVERITY_ERROR_STRING,
995 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
996 0x2017,
997 "Could not copyin fw_passthru data_buf",
998 error);
999 goto fw_passthru_err;
1000 }
1001 pt_req->sgl_entries = 1; /* will be updated during mapping */
1002 req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN |
1003 TW_OSLI_REQ_FLAGS_DATA_OUT);
1004 } else
1005 pt_req->sgl_entries = 0; /* no payload */
1006
1007 pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt));
1008 pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet);
1009
1010 if ((error = tw_osli_map_request(req)))
1011 goto fw_passthru_err;
1012
1013 end_time = tw_osl_get_local_time() + timeout;
1014 while (req->state != TW_OSLI_REQ_STATE_COMPLETE) {
1015 req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING;
1016
1017 error = tsleep(req, PRIBIO, "twa_passthru", timeout * hz);
1018
1019 if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING))
1020 error = 0;
1021 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1022
1023 if (! error) {
1024 if (((error = req->error_code)) ||
1025 ((error = (req->state !=
1026 TW_OSLI_REQ_STATE_COMPLETE))) ||
1027 ((error = req_pkt->status)))
1028 goto fw_passthru_err;
1029 break;
1030 }
1031
1032 if (req_pkt->status) {
1033 error = req_pkt->status;
1034 goto fw_passthru_err;
1035 }
1036
1037 if (error == EWOULDBLOCK) {
1038 /* Time out! */
1039 tw_osli_printf(sc, "request = %p",
1040 TW_CL_SEVERITY_ERROR_STRING,
1041 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1042 0x2018,
1043 "Passthru request timed out!",
1044 req);
1045 /*
1046 * Should I check here if the timeout happened
1047 * because of yet another reset, and not do a
1048 * second reset?
1049 */
1050 tw_cl_reset_ctlr(&sc->ctlr_handle);
1051 /*
1052 * Don't touch req after a reset. It (and any
1053 * associated data) will already have been
886 return(FILTER_HANDLED);
887 }
888 return(FILTER_STRAY);
889}
890#else
891/*
892 * Function name: twa_pci_intr
893 * Description: Interrupt handler. Wrapper for twa_interrupt.
894 *
895 * Input: arg -- ptr to OSL internal ctlr context
896 * Output: None
897 * Return value: None
898 */
899static TW_VOID
900twa_pci_intr(TW_VOID *arg)
901{
902 struct twa_softc *sc = (struct twa_softc *)arg;
903
904 tw_osli_dbg_dprintf(10, sc, "entered");
905 if (tw_cl_interrupt(&(sc->ctlr_handle)))
906 tw_cl_deferred_interrupt(&(sc->ctlr_handle));
907}
908#endif
909
910#ifdef TW_OSLI_DEFERRED_INTR_USED
911
912/*
913 * Function name: twa_deferred_intr
914 * Description: Deferred interrupt handler.
915 *
916 * Input: context -- ptr to OSL internal ctlr context
917 * pending -- not used
918 * Output: None
919 * Return value: None
920 */
921static TW_VOID
922twa_deferred_intr(TW_VOID *context, TW_INT32 pending)
923{
924 struct twa_softc *sc = (struct twa_softc *)context;
925
926 tw_osli_dbg_dprintf(10, sc, "entered");
927
928 tw_cl_deferred_interrupt(&(sc->ctlr_handle));
929}
930
931#endif /* TW_OSLI_DEFERRED_INTR_USED */
932
933
934
935/*
936 * Function name: tw_osli_fw_passthru
937 * Description: Builds a fw passthru cmd pkt, and submits it to CL.
938 *
939 * Input: sc -- ptr to OSL internal ctlr context
940 * buf -- ptr to ioctl pkt understood by CL
941 * Output: None
942 * Return value: 0 -- success
943 * non-zero-- failure
944 */
945TW_INT32
946tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf)
947{
948 struct tw_osli_req_context *req;
949 struct tw_osli_ioctl_no_data_buf *user_buf =
950 (struct tw_osli_ioctl_no_data_buf *)buf;
951 TW_TIME end_time;
952 TW_UINT32 timeout = 60;
953 TW_UINT32 data_buf_size_adjusted;
954 struct tw_cl_req_packet *req_pkt;
955 struct tw_cl_passthru_req_packet *pt_req;
956 TW_INT32 error;
957
958 tw_osli_dbg_dprintf(5, sc, "ioctl: passthru");
959
960 if ((req = tw_osli_get_request(sc)) == NULL)
961 return(EBUSY);
962
963 req->req_handle.osl_req_ctxt = req;
964 req->orig_req = buf;
965 req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU;
966
967 req_pkt = &(req->req_pkt);
968 req_pkt->status = 0;
969 req_pkt->tw_osl_callback = tw_osl_complete_passthru;
970 /* Let the Common Layer retry the request on cmd queue full. */
971 req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY;
972
973 pt_req = &(req_pkt->gen_req_pkt.pt_req);
974 /*
975 * Make sure that the data buffer sent to firmware is a
976 * 512 byte multiple in size.
977 */
978 data_buf_size_adjusted =
979 (user_buf->driver_pkt.buffer_length +
980 (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1);
981 if ((req->length = data_buf_size_adjusted)) {
982 if ((req->data = malloc(data_buf_size_adjusted,
983 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
984 error = ENOMEM;
985 tw_osli_printf(sc, "error = %d",
986 TW_CL_SEVERITY_ERROR_STRING,
987 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
988 0x2016,
989 "Could not alloc mem for "
990 "fw_passthru data_buf",
991 error);
992 goto fw_passthru_err;
993 }
994 /* Copy the payload. */
995 if ((error = copyin((TW_VOID *)(user_buf->pdata),
996 req->data,
997 user_buf->driver_pkt.buffer_length)) != 0) {
998 tw_osli_printf(sc, "error = %d",
999 TW_CL_SEVERITY_ERROR_STRING,
1000 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1001 0x2017,
1002 "Could not copyin fw_passthru data_buf",
1003 error);
1004 goto fw_passthru_err;
1005 }
1006 pt_req->sgl_entries = 1; /* will be updated during mapping */
1007 req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN |
1008 TW_OSLI_REQ_FLAGS_DATA_OUT);
1009 } else
1010 pt_req->sgl_entries = 0; /* no payload */
1011
1012 pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt));
1013 pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet);
1014
1015 if ((error = tw_osli_map_request(req)))
1016 goto fw_passthru_err;
1017
1018 end_time = tw_osl_get_local_time() + timeout;
1019 while (req->state != TW_OSLI_REQ_STATE_COMPLETE) {
1020 req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING;
1021
1022 error = tsleep(req, PRIBIO, "twa_passthru", timeout * hz);
1023
1024 if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING))
1025 error = 0;
1026 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1027
1028 if (! error) {
1029 if (((error = req->error_code)) ||
1030 ((error = (req->state !=
1031 TW_OSLI_REQ_STATE_COMPLETE))) ||
1032 ((error = req_pkt->status)))
1033 goto fw_passthru_err;
1034 break;
1035 }
1036
1037 if (req_pkt->status) {
1038 error = req_pkt->status;
1039 goto fw_passthru_err;
1040 }
1041
1042 if (error == EWOULDBLOCK) {
1043 /* Time out! */
1044 tw_osli_printf(sc, "request = %p",
1045 TW_CL_SEVERITY_ERROR_STRING,
1046 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1047 0x2018,
1048 "Passthru request timed out!",
1049 req);
1050 /*
1051 * Should I check here if the timeout happened
1052 * because of yet another reset, and not do a
1053 * second reset?
1054 */
1055 tw_cl_reset_ctlr(&sc->ctlr_handle);
1056 /*
1057 * Don't touch req after a reset. It (and any
1058 * associated data) will already have been
1054 * freed by the callback. Just return.
1059 * unmapped by the callback.
1055 */
1056 user_buf->driver_pkt.os_status = error;
1060 */
1061 user_buf->driver_pkt.os_status = error;
1057 return(ETIMEDOUT);
1062 error = ETIMEDOUT;
1063 goto fw_passthru_err;
1058 }
1059 /*
1060 * Either the request got completed, or we were woken up by a
1061 * signal. Calculate the new timeout, in case it was the latter.
1062 */
1063 timeout = (end_time - tw_osl_get_local_time());
1064 }
1065
1066 /* If there was a payload, copy it back. */
1067 if ((!error) && (req->length))
1068 if ((error = copyout(req->data, user_buf->pdata,
1069 user_buf->driver_pkt.buffer_length)))
1070 tw_osli_printf(sc, "error = %d",
1071 TW_CL_SEVERITY_ERROR_STRING,
1072 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1073 0x2019,
1074 "Could not copyout fw_passthru data_buf",
1075 error);
1076
1077fw_passthru_err:
1078 /*
1079 * Print the failure message. For some reason, on certain OS versions,
1080 * printing this error message during reset hangs the display (although
1081 * the rest of the system is running fine. So, don't print it if the
1082 * failure was due to a reset.
1083 */
1084 if ((error) && (error != TW_CL_ERR_REQ_BUS_RESET))
1085 tw_osli_printf(sc, "error = %d",
1086 TW_CL_SEVERITY_ERROR_STRING,
1087 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1088 0x201A,
1089 "Firmware passthru failed!",
1090 error);
1091
1092 user_buf->driver_pkt.os_status = error;
1093 /* Free resources. */
1094 if (req->data)
1095 free(req->data, TW_OSLI_MALLOC_CLASS);
1096 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1097 return(error);
1098}
1099
1100
1101
1102/*
1103 * Function name: tw_osl_complete_passthru
1104 * Description: Called to complete passthru requests.
1105 *
1106 * Input: req_handle -- ptr to request handle
1107 * Output: None
1108 * Return value: None
1109 */
1110TW_VOID
1111tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle)
1112{
1113 struct tw_osli_req_context *req = req_handle->osl_req_ctxt;
1114 struct twa_softc *sc = req->ctlr;
1115
1116 tw_osli_dbg_dprintf(5, sc, "entered");
1117
1118 if (req->state != TW_OSLI_REQ_STATE_BUSY) {
1119 tw_osli_printf(sc, "request = %p, status = %d",
1120 TW_CL_SEVERITY_ERROR_STRING,
1121 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1122 0x201B,
1123 "Unposted command completed!!",
1124 req, req->state);
1125 }
1126
1127 /*
1128 * Remove request from the busy queue. Just mark it complete.
1129 * There's no need to move it into the complete queue as we are
1130 * going to be done with it right now.
1131 */
1132 req->state = TW_OSLI_REQ_STATE_COMPLETE;
1133 tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q);
1134
1135 tw_osli_unmap_request(req);
1136
1137 /*
1138 * Don't do a wake up if there was an error even before the request
1139 * was sent down to the Common Layer, and we hadn't gotten an
1140 * EINPROGRESS. The request originator will then be returned an
1141 * error, and he can do the clean-up.
1142 */
1143 if ((req->error_code) &&
1144 (!(req->state & TW_OSLI_REQ_FLAGS_IN_PROGRESS)))
1145 return;
1146
1147 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1148 if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) {
1149 /* Wake up the sleeping command originator. */
1150 tw_osli_dbg_dprintf(5, sc,
1151 "Waking up originator of request %p", req);
1152 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1153 wakeup_one(req);
1154 } else {
1155 /*
1156 * If the request completed even before tsleep
1157 * was called, simply return.
1158 */
1159 if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED)
1160 return;
1161
1162 tw_osli_printf(sc, "request = %p",
1163 TW_CL_SEVERITY_ERROR_STRING,
1164 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1165 0x201C,
1166 "Passthru callback called, "
1167 "and caller not sleeping",
1168 req);
1169 }
1170 } else {
1171 tw_osli_printf(sc, "request = %p",
1172 TW_CL_SEVERITY_ERROR_STRING,
1173 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1174 0x201D,
1175 "Passthru callback called for non-passthru request",
1176 req);
1177 }
1178}
1179
1180
1181
1182/*
1183 * Function name: tw_osli_get_request
1184 * Description: Gets a request pkt from the free queue.
1185 *
1186 * Input: sc -- ptr to OSL internal ctlr context
1187 * Output: None
1188 * Return value: ptr to request pkt -- success
1189 * NULL -- failure
1190 */
1191struct tw_osli_req_context *
1192tw_osli_get_request(struct twa_softc *sc)
1193{
1194 struct tw_osli_req_context *req;
1195
1196 tw_osli_dbg_dprintf(4, sc, "entered");
1197
1198 /* Get a free request packet. */
1199 req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q);
1200
1201 /* Initialize some fields to their defaults. */
1202 if (req) {
1203 req->req_handle.osl_req_ctxt = NULL;
1204 req->req_handle.cl_req_ctxt = NULL;
1205 req->data = NULL;
1206 req->length = 0;
1207 req->real_data = NULL;
1208 req->real_length = 0;
1209 req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */
1210 req->flags = 0;
1211 req->error_code = 0;
1212 req->orig_req = NULL;
1213
1214 bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet));
1215
1216 }
1217 return(req);
1218}
1219
1220
1221
1222/*
1223 * Function name: twa_map_load_data_callback
1224 * Description: Callback of bus_dmamap_load for the buffer associated
1225 * with data. Updates the cmd pkt (size/sgl_entries
1226 * fields, as applicable) to reflect the number of sg
1227 * elements.
1228 *
1229 * Input: arg -- ptr to OSL internal request context
1230 * segs -- ptr to a list of segment descriptors
1231 * nsegments--# of segments
1232 * error -- 0 if no errors encountered before callback,
1233 * non-zero if errors were encountered
1234 * Output: None
1235 * Return value: None
1236 */
1237static TW_VOID
1238twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1239 TW_INT32 nsegments, TW_INT32 error)
1240{
1241 struct tw_osli_req_context *req =
1242 (struct tw_osli_req_context *)arg;
1243 struct twa_softc *sc = req->ctlr;
1244 struct tw_cl_req_packet *req_pkt = &(req->req_pkt);
1245
1246 tw_osli_dbg_dprintf(10, sc, "entered");
1247
1248 /* Mark the request as currently being processed. */
1249 req->state = TW_OSLI_REQ_STATE_BUSY;
1250 /* Move the request into the busy queue. */
1251 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1252
1253 req->flags |= TW_OSLI_REQ_FLAGS_MAPPED;
1254 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1255 tw_osli_allow_new_requests(sc, (TW_VOID *)(req->orig_req));
1256
1257 if (error == EFBIG) {
1258 req->error_code = error;
1259 goto out;
1260 }
1261
1262 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1263 struct tw_cl_passthru_req_packet *pt_req;
1264
1265 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1266 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1267 BUS_DMASYNC_PREREAD);
1268
1269 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1270 /*
1271 * If we're using an alignment buffer, and we're
1272 * writing data, copy the real data out.
1273 */
1274 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1275 bcopy(req->real_data, req->data, req->real_length);
1276 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1277 BUS_DMASYNC_PREWRITE);
1278 }
1279
1280 pt_req = &(req_pkt->gen_req_pkt.pt_req);
1281 pt_req->sg_list = (TW_UINT8 *)segs;
1282 pt_req->sgl_entries += (nsegments - 1);
1283 error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt,
1284 &(req->req_handle));
1285 } else {
1286 struct tw_cl_scsi_req_packet *scsi_req;
1287
1288 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1289 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1290 BUS_DMASYNC_PREREAD);
1291
1292 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1293 /*
1294 * If we're using an alignment buffer, and we're
1295 * writing data, copy the real data out.
1296 */
1297 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1298 bcopy(req->real_data, req->data, req->real_length);
1299 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1300 BUS_DMASYNC_PREWRITE);
1301 }
1302
1303 scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
1304 scsi_req->sg_list = (TW_UINT8 *)segs;
1305 scsi_req->sgl_entries += (nsegments - 1);
1306 error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt,
1307 &(req->req_handle));
1308 }
1309
1310out:
1311 if (error) {
1312 req->error_code = error;
1313 req_pkt->tw_osl_callback(&(req->req_handle));
1314 /*
1315 * If the caller had been returned EINPROGRESS, and he has
1316 * registered a callback for handling completion, the callback
1317 * will never get called because we were unable to submit the
1318 * request. So, free up the request right here.
1319 */
1320 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1321 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1322 }
1323}
1324
1325
1326
1327/*
1328 * Function name: twa_map_load_callback
1329 * Description: Callback of bus_dmamap_load for the buffer associated
1330 * with a cmd pkt.
1331 *
1332 * Input: arg -- ptr to variable to hold phys addr
1333 * segs -- ptr to a list of segment descriptors
1334 * nsegments--# of segments
1335 * error -- 0 if no errors encountered before callback,
1336 * non-zero if errors were encountered
1337 * Output: None
1338 * Return value: None
1339 */
1340static TW_VOID
1341twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1342 TW_INT32 nsegments, TW_INT32 error)
1343{
1344 *((bus_addr_t *)arg) = segs[0].ds_addr;
1345}
1346
1347
1348
1349/*
1350 * Function name: tw_osli_map_request
1351 * Description: Maps a cmd pkt and data associated with it, into
1352 * DMA'able memory.
1353 *
1354 * Input: req -- ptr to request pkt
1355 * Output: None
1356 * Return value: 0 -- success
1357 * non-zero-- failure
1358 */
1359TW_INT32
1360tw_osli_map_request(struct tw_osli_req_context *req)
1361{
1362 struct twa_softc *sc = req->ctlr;
1363 TW_INT32 error = 0;
1364
1365 tw_osli_dbg_dprintf(10, sc, "entered");
1366
1367 /* If the command involves data, map that too. */
1368 if (req->data != NULL) {
1369 /*
1370 * It's sufficient for the data pointer to be 4-byte aligned
1371 * to work with 9000. However, if 4-byte aligned addresses
1372 * are passed to bus_dmamap_load, we can get back sg elements
1373 * that are not 512-byte multiples in size. So, we will let
1374 * only those buffers that are 512-byte aligned to pass
1375 * through, and bounce the rest, so as to make sure that we
1376 * always get back sg elements that are 512-byte multiples
1377 * in size.
1378 */
1379 if (((vm_offset_t)req->data % sc->sg_size_factor) ||
1380 (req->length % sc->sg_size_factor)) {
1381 req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED;
1382 /* Save original data pointer and length. */
1383 req->real_data = req->data;
1384 req->real_length = req->length;
1385 req->length = (req->length +
1386 (sc->sg_size_factor - 1)) &
1387 ~(sc->sg_size_factor - 1);
1388 req->data = malloc(req->length, TW_OSLI_MALLOC_CLASS,
1389 M_NOWAIT);
1390 if (req->data == NULL) {
1391 tw_osli_printf(sc, "error = %d",
1392 TW_CL_SEVERITY_ERROR_STRING,
1393 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1394 0x201E,
1395 "Failed to allocate memory "
1396 "for bounce buffer",
1397 ENOMEM);
1398 /* Restore original data pointer and length. */
1399 req->data = req->real_data;
1400 req->length = req->real_length;
1401 return(ENOMEM);
1402 }
1403 }
1404
1405 /*
1406 * Map the data buffer into bus space and build the SG list.
1407 */
1408 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1409 /* Lock against multiple simultaneous ioctl calls. */
1410 mtx_lock_spin(sc->io_lock);
1411 error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map,
1412 req->data, req->length,
1413 twa_map_load_data_callback, req,
1414 BUS_DMA_WAITOK);
1415 mtx_unlock_spin(sc->io_lock);
1416 } else {
1417 /*
1418 * There's only one CAM I/O thread running at a time.
1419 * So, there's no need to hold the io_lock.
1420 */
1421 error = bus_dmamap_load(sc->dma_tag, req->dma_map,
1422 req->data, req->length,
1423 twa_map_load_data_callback, req,
1424 BUS_DMA_WAITOK);
1425 }
1426
1427 if (!error)
1428 error = req->error_code;
1429 else {
1430 if (error == EINPROGRESS) {
1431 /*
1432 * Specifying sc->io_lock as the lockfuncarg
1433 * in ...tag_create should protect the access
1434 * of ...FLAGS_MAPPED from the callback.
1435 */
1436 mtx_lock_spin(sc->io_lock);
1437 if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED)) {
1438 req->flags |=
1439 TW_OSLI_REQ_FLAGS_IN_PROGRESS;
1440 tw_osli_disallow_new_requests(sc);
1441 }
1442 mtx_unlock_spin(sc->io_lock);
1443 error = 0;
1444 } else {
1445 /* Free alignment buffer if it was used. */
1446 if (req->flags &
1447 TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1448 free(req->data, TW_OSLI_MALLOC_CLASS);
1449 /*
1450 * Restore original data pointer
1451 * and length.
1452 */
1453 req->data = req->real_data;
1454 req->length = req->real_length;
1455 }
1456 }
1457 }
1458
1459 } else {
1460 /* Mark the request as currently being processed. */
1461 req->state = TW_OSLI_REQ_STATE_BUSY;
1462 /* Move the request into the busy queue. */
1463 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1464 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU)
1465 error = tw_cl_fw_passthru(&sc->ctlr_handle,
1466 &(req->req_pkt), &(req->req_handle));
1467 else
1468 error = tw_cl_start_io(&sc->ctlr_handle,
1469 &(req->req_pkt), &(req->req_handle));
1470 if (error) {
1471 req->error_code = error;
1472 req->req_pkt.tw_osl_callback(&(req->req_handle));
1473 }
1474 }
1475 return(error);
1476}
1477
1478
1479
1480/*
1481 * Function name: tw_osli_unmap_request
1482 * Description: Undoes the mapping done by tw_osli_map_request.
1483 *
1484 * Input: req -- ptr to request pkt
1485 * Output: None
1486 * Return value: None
1487 */
1488TW_VOID
1489tw_osli_unmap_request(struct tw_osli_req_context *req)
1490{
1491 struct twa_softc *sc = req->ctlr;
1492
1493 tw_osli_dbg_dprintf(10, sc, "entered");
1494
1495 /* If the command involved data, unmap that too. */
1496 if (req->data != NULL) {
1497 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1498 /* Lock against multiple simultaneous ioctl calls. */
1499 mtx_lock_spin(sc->io_lock);
1500
1501 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1502 bus_dmamap_sync(sc->ioctl_tag,
1503 sc->ioctl_map, BUS_DMASYNC_POSTREAD);
1504
1505 /*
1506 * If we are using a bounce buffer, and we are
1507 * reading data, copy the real data in.
1508 */
1509 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1510 bcopy(req->data, req->real_data,
1511 req->real_length);
1512 }
1513
1514 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1515 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1516 BUS_DMASYNC_POSTWRITE);
1517
1518 bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map);
1519
1520 mtx_unlock_spin(sc->io_lock);
1521 } else {
1522 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1523 bus_dmamap_sync(sc->dma_tag,
1524 req->dma_map, BUS_DMASYNC_POSTREAD);
1525
1526 /*
1527 * If we are using a bounce buffer, and we are
1528 * reading data, copy the real data in.
1529 */
1530 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1531 bcopy(req->data, req->real_data,
1532 req->real_length);
1533 }
1534 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1535 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1536 BUS_DMASYNC_POSTWRITE);
1537
1538 bus_dmamap_unload(sc->dma_tag, req->dma_map);
1539 }
1540 }
1541
1542 /* Free alignment buffer if it was used. */
1543 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1544 free(req->data, TW_OSLI_MALLOC_CLASS);
1545 /* Restore original data pointer and length. */
1546 req->data = req->real_data;
1547 req->length = req->real_length;
1548 }
1549}
1550
1551
1552
1553#ifdef TW_OSL_DEBUG
1554
1555TW_VOID twa_report_stats(TW_VOID);
1556TW_VOID twa_reset_stats(TW_VOID);
1557TW_VOID tw_osli_print_ctlr_stats(struct twa_softc *sc);
1558TW_VOID twa_print_req_info(struct tw_osli_req_context *req);
1559
1560
1561/*
1562 * Function name: twa_report_stats
1563 * Description: For being called from ddb. Calls functions that print
1564 * OSL and CL internal stats for the controller.
1565 *
1566 * Input: None
1567 * Output: None
1568 * Return value: None
1569 */
1570TW_VOID
1571twa_report_stats(TW_VOID)
1572{
1573 struct twa_softc *sc;
1574 TW_INT32 i;
1575
1576 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1577 tw_osli_print_ctlr_stats(sc);
1578 tw_cl_print_ctlr_stats(&sc->ctlr_handle);
1579 }
1580}
1581
1582
1583
1584/*
1585 * Function name: tw_osli_print_ctlr_stats
1586 * Description: For being called from ddb. Prints OSL controller stats
1587 *
1588 * Input: sc -- ptr to OSL internal controller context
1589 * Output: None
1590 * Return value: None
1591 */
1592TW_VOID
1593tw_osli_print_ctlr_stats(struct twa_softc *sc)
1594{
1595 twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc);
1596 twa_printf(sc, "OSLq type current max\n");
1597 twa_printf(sc, "free %04d %04d\n",
1598 sc->q_stats[TW_OSLI_FREE_Q].cur_len,
1599 sc->q_stats[TW_OSLI_FREE_Q].max_len);
1600 twa_printf(sc, "busy %04d %04d\n",
1601 sc->q_stats[TW_OSLI_BUSY_Q].cur_len,
1602 sc->q_stats[TW_OSLI_BUSY_Q].max_len);
1603}
1604
1605
1606
1607/*
1608 * Function name: twa_print_req_info
1609 * Description: For being called from ddb. Calls functions that print
1610 * OSL and CL internal details for the request.
1611 *
1612 * Input: req -- ptr to OSL internal request context
1613 * Output: None
1614 * Return value: None
1615 */
1616TW_VOID
1617twa_print_req_info(struct tw_osli_req_context *req)
1618{
1619 struct twa_softc *sc = req->ctlr;
1620
1621 twa_printf(sc, "OSL details for request:\n");
1622 twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n"
1623 "data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n"
1624 "state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n"
1625 "next_req = %p, prev_req = %p, dma_map = %p\n",
1626 req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt,
1627 req->data, req->length, req->real_data, req->real_length,
1628 req->state, req->flags, req->error_code, req->orig_req,
1629 req->link.next, req->link.prev, req->dma_map);
1630 tw_cl_print_req_info(&(req->req_handle));
1631}
1632
1633
1634
1635/*
1636 * Function name: twa_reset_stats
1637 * Description: For being called from ddb.
1638 * Resets some OSL controller stats.
1639 *
1640 * Input: None
1641 * Output: None
1642 * Return value: None
1643 */
1644TW_VOID
1645twa_reset_stats(TW_VOID)
1646{
1647 struct twa_softc *sc;
1648 TW_INT32 i;
1649
1650 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1651 sc->q_stats[TW_OSLI_FREE_Q].max_len = 0;
1652 sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0;
1653 tw_cl_reset_stats(&sc->ctlr_handle);
1654 }
1655}
1656
1657#endif /* TW_OSL_DEBUG */
1064 }
1065 /*
1066 * Either the request got completed, or we were woken up by a
1067 * signal. Calculate the new timeout, in case it was the latter.
1068 */
1069 timeout = (end_time - tw_osl_get_local_time());
1070 }
1071
1072 /* If there was a payload, copy it back. */
1073 if ((!error) && (req->length))
1074 if ((error = copyout(req->data, user_buf->pdata,
1075 user_buf->driver_pkt.buffer_length)))
1076 tw_osli_printf(sc, "error = %d",
1077 TW_CL_SEVERITY_ERROR_STRING,
1078 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1079 0x2019,
1080 "Could not copyout fw_passthru data_buf",
1081 error);
1082
1083fw_passthru_err:
1084 /*
1085 * Print the failure message. For some reason, on certain OS versions,
1086 * printing this error message during reset hangs the display (although
1087 * the rest of the system is running fine. So, don't print it if the
1088 * failure was due to a reset.
1089 */
1090 if ((error) && (error != TW_CL_ERR_REQ_BUS_RESET))
1091 tw_osli_printf(sc, "error = %d",
1092 TW_CL_SEVERITY_ERROR_STRING,
1093 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1094 0x201A,
1095 "Firmware passthru failed!",
1096 error);
1097
1098 user_buf->driver_pkt.os_status = error;
1099 /* Free resources. */
1100 if (req->data)
1101 free(req->data, TW_OSLI_MALLOC_CLASS);
1102 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1103 return(error);
1104}
1105
1106
1107
1108/*
1109 * Function name: tw_osl_complete_passthru
1110 * Description: Called to complete passthru requests.
1111 *
1112 * Input: req_handle -- ptr to request handle
1113 * Output: None
1114 * Return value: None
1115 */
1116TW_VOID
1117tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle)
1118{
1119 struct tw_osli_req_context *req = req_handle->osl_req_ctxt;
1120 struct twa_softc *sc = req->ctlr;
1121
1122 tw_osli_dbg_dprintf(5, sc, "entered");
1123
1124 if (req->state != TW_OSLI_REQ_STATE_BUSY) {
1125 tw_osli_printf(sc, "request = %p, status = %d",
1126 TW_CL_SEVERITY_ERROR_STRING,
1127 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1128 0x201B,
1129 "Unposted command completed!!",
1130 req, req->state);
1131 }
1132
1133 /*
1134 * Remove request from the busy queue. Just mark it complete.
1135 * There's no need to move it into the complete queue as we are
1136 * going to be done with it right now.
1137 */
1138 req->state = TW_OSLI_REQ_STATE_COMPLETE;
1139 tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q);
1140
1141 tw_osli_unmap_request(req);
1142
1143 /*
1144 * Don't do a wake up if there was an error even before the request
1145 * was sent down to the Common Layer, and we hadn't gotten an
1146 * EINPROGRESS. The request originator will then be returned an
1147 * error, and he can do the clean-up.
1148 */
1149 if ((req->error_code) &&
1150 (!(req->state & TW_OSLI_REQ_FLAGS_IN_PROGRESS)))
1151 return;
1152
1153 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1154 if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) {
1155 /* Wake up the sleeping command originator. */
1156 tw_osli_dbg_dprintf(5, sc,
1157 "Waking up originator of request %p", req);
1158 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1159 wakeup_one(req);
1160 } else {
1161 /*
1162 * If the request completed even before tsleep
1163 * was called, simply return.
1164 */
1165 if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED)
1166 return;
1167
1168 tw_osli_printf(sc, "request = %p",
1169 TW_CL_SEVERITY_ERROR_STRING,
1170 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1171 0x201C,
1172 "Passthru callback called, "
1173 "and caller not sleeping",
1174 req);
1175 }
1176 } else {
1177 tw_osli_printf(sc, "request = %p",
1178 TW_CL_SEVERITY_ERROR_STRING,
1179 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1180 0x201D,
1181 "Passthru callback called for non-passthru request",
1182 req);
1183 }
1184}
1185
1186
1187
1188/*
1189 * Function name: tw_osli_get_request
1190 * Description: Gets a request pkt from the free queue.
1191 *
1192 * Input: sc -- ptr to OSL internal ctlr context
1193 * Output: None
1194 * Return value: ptr to request pkt -- success
1195 * NULL -- failure
1196 */
1197struct tw_osli_req_context *
1198tw_osli_get_request(struct twa_softc *sc)
1199{
1200 struct tw_osli_req_context *req;
1201
1202 tw_osli_dbg_dprintf(4, sc, "entered");
1203
1204 /* Get a free request packet. */
1205 req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q);
1206
1207 /* Initialize some fields to their defaults. */
1208 if (req) {
1209 req->req_handle.osl_req_ctxt = NULL;
1210 req->req_handle.cl_req_ctxt = NULL;
1211 req->data = NULL;
1212 req->length = 0;
1213 req->real_data = NULL;
1214 req->real_length = 0;
1215 req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */
1216 req->flags = 0;
1217 req->error_code = 0;
1218 req->orig_req = NULL;
1219
1220 bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet));
1221
1222 }
1223 return(req);
1224}
1225
1226
1227
1228/*
1229 * Function name: twa_map_load_data_callback
1230 * Description: Callback of bus_dmamap_load for the buffer associated
1231 * with data. Updates the cmd pkt (size/sgl_entries
1232 * fields, as applicable) to reflect the number of sg
1233 * elements.
1234 *
1235 * Input: arg -- ptr to OSL internal request context
1236 * segs -- ptr to a list of segment descriptors
1237 * nsegments--# of segments
1238 * error -- 0 if no errors encountered before callback,
1239 * non-zero if errors were encountered
1240 * Output: None
1241 * Return value: None
1242 */
1243static TW_VOID
1244twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1245 TW_INT32 nsegments, TW_INT32 error)
1246{
1247 struct tw_osli_req_context *req =
1248 (struct tw_osli_req_context *)arg;
1249 struct twa_softc *sc = req->ctlr;
1250 struct tw_cl_req_packet *req_pkt = &(req->req_pkt);
1251
1252 tw_osli_dbg_dprintf(10, sc, "entered");
1253
1254 /* Mark the request as currently being processed. */
1255 req->state = TW_OSLI_REQ_STATE_BUSY;
1256 /* Move the request into the busy queue. */
1257 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1258
1259 req->flags |= TW_OSLI_REQ_FLAGS_MAPPED;
1260 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1261 tw_osli_allow_new_requests(sc, (TW_VOID *)(req->orig_req));
1262
1263 if (error == EFBIG) {
1264 req->error_code = error;
1265 goto out;
1266 }
1267
1268 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1269 struct tw_cl_passthru_req_packet *pt_req;
1270
1271 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1272 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1273 BUS_DMASYNC_PREREAD);
1274
1275 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1276 /*
1277 * If we're using an alignment buffer, and we're
1278 * writing data, copy the real data out.
1279 */
1280 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1281 bcopy(req->real_data, req->data, req->real_length);
1282 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1283 BUS_DMASYNC_PREWRITE);
1284 }
1285
1286 pt_req = &(req_pkt->gen_req_pkt.pt_req);
1287 pt_req->sg_list = (TW_UINT8 *)segs;
1288 pt_req->sgl_entries += (nsegments - 1);
1289 error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt,
1290 &(req->req_handle));
1291 } else {
1292 struct tw_cl_scsi_req_packet *scsi_req;
1293
1294 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1295 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1296 BUS_DMASYNC_PREREAD);
1297
1298 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1299 /*
1300 * If we're using an alignment buffer, and we're
1301 * writing data, copy the real data out.
1302 */
1303 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1304 bcopy(req->real_data, req->data, req->real_length);
1305 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1306 BUS_DMASYNC_PREWRITE);
1307 }
1308
1309 scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
1310 scsi_req->sg_list = (TW_UINT8 *)segs;
1311 scsi_req->sgl_entries += (nsegments - 1);
1312 error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt,
1313 &(req->req_handle));
1314 }
1315
1316out:
1317 if (error) {
1318 req->error_code = error;
1319 req_pkt->tw_osl_callback(&(req->req_handle));
1320 /*
1321 * If the caller had been returned EINPROGRESS, and he has
1322 * registered a callback for handling completion, the callback
1323 * will never get called because we were unable to submit the
1324 * request. So, free up the request right here.
1325 */
1326 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1327 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1328 }
1329}
1330
1331
1332
1333/*
1334 * Function name: twa_map_load_callback
1335 * Description: Callback of bus_dmamap_load for the buffer associated
1336 * with a cmd pkt.
1337 *
1338 * Input: arg -- ptr to variable to hold phys addr
1339 * segs -- ptr to a list of segment descriptors
1340 * nsegments--# of segments
1341 * error -- 0 if no errors encountered before callback,
1342 * non-zero if errors were encountered
1343 * Output: None
1344 * Return value: None
1345 */
1346static TW_VOID
1347twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1348 TW_INT32 nsegments, TW_INT32 error)
1349{
1350 *((bus_addr_t *)arg) = segs[0].ds_addr;
1351}
1352
1353
1354
1355/*
1356 * Function name: tw_osli_map_request
1357 * Description: Maps a cmd pkt and data associated with it, into
1358 * DMA'able memory.
1359 *
1360 * Input: req -- ptr to request pkt
1361 * Output: None
1362 * Return value: 0 -- success
1363 * non-zero-- failure
1364 */
1365TW_INT32
1366tw_osli_map_request(struct tw_osli_req_context *req)
1367{
1368 struct twa_softc *sc = req->ctlr;
1369 TW_INT32 error = 0;
1370
1371 tw_osli_dbg_dprintf(10, sc, "entered");
1372
1373 /* If the command involves data, map that too. */
1374 if (req->data != NULL) {
1375 /*
1376 * It's sufficient for the data pointer to be 4-byte aligned
1377 * to work with 9000. However, if 4-byte aligned addresses
1378 * are passed to bus_dmamap_load, we can get back sg elements
1379 * that are not 512-byte multiples in size. So, we will let
1380 * only those buffers that are 512-byte aligned to pass
1381 * through, and bounce the rest, so as to make sure that we
1382 * always get back sg elements that are 512-byte multiples
1383 * in size.
1384 */
1385 if (((vm_offset_t)req->data % sc->sg_size_factor) ||
1386 (req->length % sc->sg_size_factor)) {
1387 req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED;
1388 /* Save original data pointer and length. */
1389 req->real_data = req->data;
1390 req->real_length = req->length;
1391 req->length = (req->length +
1392 (sc->sg_size_factor - 1)) &
1393 ~(sc->sg_size_factor - 1);
1394 req->data = malloc(req->length, TW_OSLI_MALLOC_CLASS,
1395 M_NOWAIT);
1396 if (req->data == NULL) {
1397 tw_osli_printf(sc, "error = %d",
1398 TW_CL_SEVERITY_ERROR_STRING,
1399 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1400 0x201E,
1401 "Failed to allocate memory "
1402 "for bounce buffer",
1403 ENOMEM);
1404 /* Restore original data pointer and length. */
1405 req->data = req->real_data;
1406 req->length = req->real_length;
1407 return(ENOMEM);
1408 }
1409 }
1410
1411 /*
1412 * Map the data buffer into bus space and build the SG list.
1413 */
1414 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1415 /* Lock against multiple simultaneous ioctl calls. */
1416 mtx_lock_spin(sc->io_lock);
1417 error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map,
1418 req->data, req->length,
1419 twa_map_load_data_callback, req,
1420 BUS_DMA_WAITOK);
1421 mtx_unlock_spin(sc->io_lock);
1422 } else {
1423 /*
1424 * There's only one CAM I/O thread running at a time.
1425 * So, there's no need to hold the io_lock.
1426 */
1427 error = bus_dmamap_load(sc->dma_tag, req->dma_map,
1428 req->data, req->length,
1429 twa_map_load_data_callback, req,
1430 BUS_DMA_WAITOK);
1431 }
1432
1433 if (!error)
1434 error = req->error_code;
1435 else {
1436 if (error == EINPROGRESS) {
1437 /*
1438 * Specifying sc->io_lock as the lockfuncarg
1439 * in ...tag_create should protect the access
1440 * of ...FLAGS_MAPPED from the callback.
1441 */
1442 mtx_lock_spin(sc->io_lock);
1443 if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED)) {
1444 req->flags |=
1445 TW_OSLI_REQ_FLAGS_IN_PROGRESS;
1446 tw_osli_disallow_new_requests(sc);
1447 }
1448 mtx_unlock_spin(sc->io_lock);
1449 error = 0;
1450 } else {
1451 /* Free alignment buffer if it was used. */
1452 if (req->flags &
1453 TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1454 free(req->data, TW_OSLI_MALLOC_CLASS);
1455 /*
1456 * Restore original data pointer
1457 * and length.
1458 */
1459 req->data = req->real_data;
1460 req->length = req->real_length;
1461 }
1462 }
1463 }
1464
1465 } else {
1466 /* Mark the request as currently being processed. */
1467 req->state = TW_OSLI_REQ_STATE_BUSY;
1468 /* Move the request into the busy queue. */
1469 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1470 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU)
1471 error = tw_cl_fw_passthru(&sc->ctlr_handle,
1472 &(req->req_pkt), &(req->req_handle));
1473 else
1474 error = tw_cl_start_io(&sc->ctlr_handle,
1475 &(req->req_pkt), &(req->req_handle));
1476 if (error) {
1477 req->error_code = error;
1478 req->req_pkt.tw_osl_callback(&(req->req_handle));
1479 }
1480 }
1481 return(error);
1482}
1483
1484
1485
1486/*
1487 * Function name: tw_osli_unmap_request
1488 * Description: Undoes the mapping done by tw_osli_map_request.
1489 *
1490 * Input: req -- ptr to request pkt
1491 * Output: None
1492 * Return value: None
1493 */
1494TW_VOID
1495tw_osli_unmap_request(struct tw_osli_req_context *req)
1496{
1497 struct twa_softc *sc = req->ctlr;
1498
1499 tw_osli_dbg_dprintf(10, sc, "entered");
1500
1501 /* If the command involved data, unmap that too. */
1502 if (req->data != NULL) {
1503 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1504 /* Lock against multiple simultaneous ioctl calls. */
1505 mtx_lock_spin(sc->io_lock);
1506
1507 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1508 bus_dmamap_sync(sc->ioctl_tag,
1509 sc->ioctl_map, BUS_DMASYNC_POSTREAD);
1510
1511 /*
1512 * If we are using a bounce buffer, and we are
1513 * reading data, copy the real data in.
1514 */
1515 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1516 bcopy(req->data, req->real_data,
1517 req->real_length);
1518 }
1519
1520 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1521 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1522 BUS_DMASYNC_POSTWRITE);
1523
1524 bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map);
1525
1526 mtx_unlock_spin(sc->io_lock);
1527 } else {
1528 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1529 bus_dmamap_sync(sc->dma_tag,
1530 req->dma_map, BUS_DMASYNC_POSTREAD);
1531
1532 /*
1533 * If we are using a bounce buffer, and we are
1534 * reading data, copy the real data in.
1535 */
1536 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1537 bcopy(req->data, req->real_data,
1538 req->real_length);
1539 }
1540 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1541 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1542 BUS_DMASYNC_POSTWRITE);
1543
1544 bus_dmamap_unload(sc->dma_tag, req->dma_map);
1545 }
1546 }
1547
1548 /* Free alignment buffer if it was used. */
1549 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1550 free(req->data, TW_OSLI_MALLOC_CLASS);
1551 /* Restore original data pointer and length. */
1552 req->data = req->real_data;
1553 req->length = req->real_length;
1554 }
1555}
1556
1557
1558
1559#ifdef TW_OSL_DEBUG
1560
1561TW_VOID twa_report_stats(TW_VOID);
1562TW_VOID twa_reset_stats(TW_VOID);
1563TW_VOID tw_osli_print_ctlr_stats(struct twa_softc *sc);
1564TW_VOID twa_print_req_info(struct tw_osli_req_context *req);
1565
1566
1567/*
1568 * Function name: twa_report_stats
1569 * Description: For being called from ddb. Calls functions that print
1570 * OSL and CL internal stats for the controller.
1571 *
1572 * Input: None
1573 * Output: None
1574 * Return value: None
1575 */
1576TW_VOID
1577twa_report_stats(TW_VOID)
1578{
1579 struct twa_softc *sc;
1580 TW_INT32 i;
1581
1582 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1583 tw_osli_print_ctlr_stats(sc);
1584 tw_cl_print_ctlr_stats(&sc->ctlr_handle);
1585 }
1586}
1587
1588
1589
1590/*
1591 * Function name: tw_osli_print_ctlr_stats
1592 * Description: For being called from ddb. Prints OSL controller stats
1593 *
1594 * Input: sc -- ptr to OSL internal controller context
1595 * Output: None
1596 * Return value: None
1597 */
1598TW_VOID
1599tw_osli_print_ctlr_stats(struct twa_softc *sc)
1600{
1601 twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc);
1602 twa_printf(sc, "OSLq type current max\n");
1603 twa_printf(sc, "free %04d %04d\n",
1604 sc->q_stats[TW_OSLI_FREE_Q].cur_len,
1605 sc->q_stats[TW_OSLI_FREE_Q].max_len);
1606 twa_printf(sc, "busy %04d %04d\n",
1607 sc->q_stats[TW_OSLI_BUSY_Q].cur_len,
1608 sc->q_stats[TW_OSLI_BUSY_Q].max_len);
1609}
1610
1611
1612
1613/*
1614 * Function name: twa_print_req_info
1615 * Description: For being called from ddb. Calls functions that print
1616 * OSL and CL internal details for the request.
1617 *
1618 * Input: req -- ptr to OSL internal request context
1619 * Output: None
1620 * Return value: None
1621 */
1622TW_VOID
1623twa_print_req_info(struct tw_osli_req_context *req)
1624{
1625 struct twa_softc *sc = req->ctlr;
1626
1627 twa_printf(sc, "OSL details for request:\n");
1628 twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n"
1629 "data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n"
1630 "state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n"
1631 "next_req = %p, prev_req = %p, dma_map = %p\n",
1632 req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt,
1633 req->data, req->length, req->real_data, req->real_length,
1634 req->state, req->flags, req->error_code, req->orig_req,
1635 req->link.next, req->link.prev, req->dma_map);
1636 tw_cl_print_req_info(&(req->req_handle));
1637}
1638
1639
1640
1641/*
1642 * Function name: twa_reset_stats
1643 * Description: For being called from ddb.
1644 * Resets some OSL controller stats.
1645 *
1646 * Input: None
1647 * Output: None
1648 * Return value: None
1649 */
1650TW_VOID
1651twa_reset_stats(TW_VOID)
1652{
1653 struct twa_softc *sc;
1654 TW_INT32 i;
1655
1656 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1657 sc->q_stats[TW_OSLI_FREE_Q].max_len = 0;
1658 sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0;
1659 tw_cl_reset_stats(&sc->ctlr_handle);
1660 }
1661}
1662
1663#endif /* TW_OSL_DEBUG */