Deleted Added
full compact
tw_osl_freebsd.c (248583) tw_osl_freebsd.c (254263)
1/*
2 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap.
4 * Copyright (c) 2000 Michael Smith
5 * Copyright (c) 2000 BSDi
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
1/*
2 * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3 * Copyright (c) 2004-05 Vinod Kashyap.
4 * Copyright (c) 2000 Michael Smith
5 * Copyright (c) 2000 BSDi
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/twa/tw_osl_freebsd.c 248583 2013-03-21 13:06:28Z kib $");
31__FBSDID("$FreeBSD: head/sys/dev/twa/tw_osl_freebsd.c 254263 2013-08-12 23:30:01Z scottl $");
32
33/*
34 * AMCC'S 3ware driver for 9000 series storage controllers.
35 *
36 * Author: Vinod Kashyap
37 * Modifications by: Adam Radford
38 * Modifications by: Manjunath Ranganathaiah
39 */
40
41
42/*
43 * FreeBSD specific functions not related to CAM, and other
44 * miscellaneous functions.
45 */
46
47
48#include <dev/twa/tw_osl_includes.h>
49#include <dev/twa/tw_cl_fwif.h>
50#include <dev/twa/tw_cl_ioctl.h>
51#include <dev/twa/tw_osl_ioctl.h>
52
53#ifdef TW_OSL_DEBUG
54TW_INT32 TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG;
55TW_INT32 TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG;
56#endif /* TW_OSL_DEBUG */
57
58static MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands");
59
60
61static d_open_t twa_open;
62static d_close_t twa_close;
63static d_ioctl_t twa_ioctl;
64
65static struct cdevsw twa_cdevsw = {
66 .d_version = D_VERSION,
67 .d_open = twa_open,
68 .d_close = twa_close,
69 .d_ioctl = twa_ioctl,
70 .d_name = "twa",
71};
72
73static devclass_t twa_devclass;
74
75
76/*
77 * Function name: twa_open
78 * Description: Called when the controller is opened.
79 * Simply marks the controller as open.
80 *
81 * Input: dev -- control device corresponding to the ctlr
82 * flags -- mode of open
83 * fmt -- device type (character/block etc.)
84 * proc -- current process
85 * Output: None
86 * Return value: 0 -- success
87 * non-zero-- failure
88 */
89static TW_INT32
90twa_open(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc)
91{
92 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
93
94 tw_osli_dbg_dprintf(5, sc, "entered");
95 sc->open = TW_CL_TRUE;
96 return(0);
97}
98
99
100
101/*
102 * Function name: twa_close
103 * Description: Called when the controller is closed.
104 * Simply marks the controller as not open.
105 *
106 * Input: dev -- control device corresponding to the ctlr
107 * flags -- mode of corresponding open
108 * fmt -- device type (character/block etc.)
109 * proc -- current process
110 * Output: None
111 * Return value: 0 -- success
112 * non-zero-- failure
113 */
114static TW_INT32
115twa_close(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc)
116{
117 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
118
119 tw_osli_dbg_dprintf(5, sc, "entered");
120 sc->open = TW_CL_FALSE;
121 return(0);
122}
123
124
125
126/*
127 * Function name: twa_ioctl
128 * Description: Called when an ioctl is posted to the controller.
129 * Handles any OS Layer specific cmds, passes the rest
130 * on to the Common Layer.
131 *
132 * Input: dev -- control device corresponding to the ctlr
133 * cmd -- ioctl cmd
134 * buf -- ptr to buffer in kernel memory, which is
135 * a copy of the input buffer in user-space
136 * flags -- mode of corresponding open
137 * proc -- current process
138 * Output: buf -- ptr to buffer in kernel memory, which will
139 * be copied to the output buffer in user-space
140 * Return value: 0 -- success
141 * non-zero-- failure
142 */
143static TW_INT32
144twa_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, TW_INT32 flags, struct thread *proc)
145{
146 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
147 TW_INT32 error;
148
149 tw_osli_dbg_dprintf(5, sc, "entered");
150
151 switch (cmd) {
152 case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
153 tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru");
154 error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf);
155 break;
156
157 case TW_OSL_IOCTL_SCAN_BUS:
158 /* Request CAM for a bus scan. */
159 tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus");
160 error = tw_osli_request_bus_scan(sc);
161 break;
162
163 default:
164 tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd);
165 error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf);
166 break;
167 }
168 return(error);
169}
170
171
172
173static TW_INT32 twa_probe(device_t dev);
174static TW_INT32 twa_attach(device_t dev);
175static TW_INT32 twa_detach(device_t dev);
176static TW_INT32 twa_shutdown(device_t dev);
177static TW_VOID twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op);
178static TW_VOID twa_pci_intr(TW_VOID *arg);
179static TW_VOID twa_watchdog(TW_VOID *arg);
180int twa_setup_intr(struct twa_softc *sc);
181int twa_teardown_intr(struct twa_softc *sc);
182
183static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc);
184static TW_VOID tw_osli_free_resources(struct twa_softc *sc);
185
186static TW_VOID twa_map_load_data_callback(TW_VOID *arg,
187 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
188static TW_VOID twa_map_load_callback(TW_VOID *arg,
189 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
190
191
192static device_method_t twa_methods[] = {
193 /* Device interface */
194 DEVMETHOD(device_probe, twa_probe),
195 DEVMETHOD(device_attach, twa_attach),
196 DEVMETHOD(device_detach, twa_detach),
197 DEVMETHOD(device_shutdown, twa_shutdown),
198
199 DEVMETHOD_END
200};
201
202static driver_t twa_pci_driver = {
203 "twa",
204 twa_methods,
205 sizeof(struct twa_softc)
206};
207
208DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0);
209MODULE_DEPEND(twa, cam, 1, 1, 1);
210MODULE_DEPEND(twa, pci, 1, 1, 1);
211
212
213/*
214 * Function name: twa_probe
215 * Description: Called at driver load time. Claims 9000 ctlrs.
216 *
217 * Input: dev -- bus device corresponding to the ctlr
218 * Output: None
219 * Return value: <= 0 -- success
220 * > 0 -- failure
221 */
222static TW_INT32
223twa_probe(device_t dev)
224{
225 static TW_UINT8 first_ctlr = 1;
226
227 tw_osli_dbg_printf(3, "entered");
228
229 if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) {
230 device_set_desc(dev, TW_OSLI_DEVICE_NAME);
231 /* Print the driver version only once. */
232 if (first_ctlr) {
233 printf("3ware device driver for 9000 series storage "
234 "controllers, version: %s\n",
235 TW_OSL_DRIVER_VERSION_STRING);
236 first_ctlr = 0;
237 }
238 return(0);
239 }
240 return(ENXIO);
241}
242
243int twa_setup_intr(struct twa_softc *sc)
244{
245 int error = 0;
246
247 if (!(sc->intr_handle) && (sc->irq_res)) {
248 error = bus_setup_intr(sc->bus_dev, sc->irq_res,
249 INTR_TYPE_CAM | INTR_MPSAFE,
250 NULL, twa_pci_intr,
251 sc, &sc->intr_handle);
252 }
253 return( error );
254}
255
256
257int twa_teardown_intr(struct twa_softc *sc)
258{
259 int error = 0;
260
261 if ((sc->intr_handle) && (sc->irq_res)) {
262 error = bus_teardown_intr(sc->bus_dev,
263 sc->irq_res, sc->intr_handle);
264 sc->intr_handle = NULL;
265 }
266 return( error );
267}
268
269
270
271/*
272 * Function name: twa_attach
273 * Description: Allocates pci resources; updates sc; adds a node to the
274 * sysctl tree to expose the driver version; makes calls
275 * (to the Common Layer) to initialize ctlr, and to
276 * attach to CAM.
277 *
278 * Input: dev -- bus device corresponding to the ctlr
279 * Output: None
280 * Return value: 0 -- success
281 * non-zero-- failure
282 */
283static TW_INT32
284twa_attach(device_t dev)
285{
286 struct twa_softc *sc = device_get_softc(dev);
32
33/*
34 * AMCC'S 3ware driver for 9000 series storage controllers.
35 *
36 * Author: Vinod Kashyap
37 * Modifications by: Adam Radford
38 * Modifications by: Manjunath Ranganathaiah
39 */
40
41
42/*
43 * FreeBSD specific functions not related to CAM, and other
44 * miscellaneous functions.
45 */
46
47
48#include <dev/twa/tw_osl_includes.h>
49#include <dev/twa/tw_cl_fwif.h>
50#include <dev/twa/tw_cl_ioctl.h>
51#include <dev/twa/tw_osl_ioctl.h>
52
53#ifdef TW_OSL_DEBUG
54TW_INT32 TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG;
55TW_INT32 TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG;
56#endif /* TW_OSL_DEBUG */
57
58static MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands");
59
60
61static d_open_t twa_open;
62static d_close_t twa_close;
63static d_ioctl_t twa_ioctl;
64
65static struct cdevsw twa_cdevsw = {
66 .d_version = D_VERSION,
67 .d_open = twa_open,
68 .d_close = twa_close,
69 .d_ioctl = twa_ioctl,
70 .d_name = "twa",
71};
72
73static devclass_t twa_devclass;
74
75
76/*
77 * Function name: twa_open
78 * Description: Called when the controller is opened.
79 * Simply marks the controller as open.
80 *
81 * Input: dev -- control device corresponding to the ctlr
82 * flags -- mode of open
83 * fmt -- device type (character/block etc.)
84 * proc -- current process
85 * Output: None
86 * Return value: 0 -- success
87 * non-zero-- failure
88 */
89static TW_INT32
90twa_open(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc)
91{
92 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
93
94 tw_osli_dbg_dprintf(5, sc, "entered");
95 sc->open = TW_CL_TRUE;
96 return(0);
97}
98
99
100
101/*
102 * Function name: twa_close
103 * Description: Called when the controller is closed.
104 * Simply marks the controller as not open.
105 *
106 * Input: dev -- control device corresponding to the ctlr
107 * flags -- mode of corresponding open
108 * fmt -- device type (character/block etc.)
109 * proc -- current process
110 * Output: None
111 * Return value: 0 -- success
112 * non-zero-- failure
113 */
114static TW_INT32
115twa_close(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc)
116{
117 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
118
119 tw_osli_dbg_dprintf(5, sc, "entered");
120 sc->open = TW_CL_FALSE;
121 return(0);
122}
123
124
125
126/*
127 * Function name: twa_ioctl
128 * Description: Called when an ioctl is posted to the controller.
129 * Handles any OS Layer specific cmds, passes the rest
130 * on to the Common Layer.
131 *
132 * Input: dev -- control device corresponding to the ctlr
133 * cmd -- ioctl cmd
134 * buf -- ptr to buffer in kernel memory, which is
135 * a copy of the input buffer in user-space
136 * flags -- mode of corresponding open
137 * proc -- current process
138 * Output: buf -- ptr to buffer in kernel memory, which will
139 * be copied to the output buffer in user-space
140 * Return value: 0 -- success
141 * non-zero-- failure
142 */
143static TW_INT32
144twa_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, TW_INT32 flags, struct thread *proc)
145{
146 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1);
147 TW_INT32 error;
148
149 tw_osli_dbg_dprintf(5, sc, "entered");
150
151 switch (cmd) {
152 case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH:
153 tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru");
154 error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf);
155 break;
156
157 case TW_OSL_IOCTL_SCAN_BUS:
158 /* Request CAM for a bus scan. */
159 tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus");
160 error = tw_osli_request_bus_scan(sc);
161 break;
162
163 default:
164 tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd);
165 error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf);
166 break;
167 }
168 return(error);
169}
170
171
172
173static TW_INT32 twa_probe(device_t dev);
174static TW_INT32 twa_attach(device_t dev);
175static TW_INT32 twa_detach(device_t dev);
176static TW_INT32 twa_shutdown(device_t dev);
177static TW_VOID twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op);
178static TW_VOID twa_pci_intr(TW_VOID *arg);
179static TW_VOID twa_watchdog(TW_VOID *arg);
180int twa_setup_intr(struct twa_softc *sc);
181int twa_teardown_intr(struct twa_softc *sc);
182
183static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc);
184static TW_VOID tw_osli_free_resources(struct twa_softc *sc);
185
186static TW_VOID twa_map_load_data_callback(TW_VOID *arg,
187 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
188static TW_VOID twa_map_load_callback(TW_VOID *arg,
189 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error);
190
191
192static device_method_t twa_methods[] = {
193 /* Device interface */
194 DEVMETHOD(device_probe, twa_probe),
195 DEVMETHOD(device_attach, twa_attach),
196 DEVMETHOD(device_detach, twa_detach),
197 DEVMETHOD(device_shutdown, twa_shutdown),
198
199 DEVMETHOD_END
200};
201
202static driver_t twa_pci_driver = {
203 "twa",
204 twa_methods,
205 sizeof(struct twa_softc)
206};
207
208DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0);
209MODULE_DEPEND(twa, cam, 1, 1, 1);
210MODULE_DEPEND(twa, pci, 1, 1, 1);
211
212
213/*
214 * Function name: twa_probe
215 * Description: Called at driver load time. Claims 9000 ctlrs.
216 *
217 * Input: dev -- bus device corresponding to the ctlr
218 * Output: None
219 * Return value: <= 0 -- success
220 * > 0 -- failure
221 */
222static TW_INT32
223twa_probe(device_t dev)
224{
225 static TW_UINT8 first_ctlr = 1;
226
227 tw_osli_dbg_printf(3, "entered");
228
229 if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) {
230 device_set_desc(dev, TW_OSLI_DEVICE_NAME);
231 /* Print the driver version only once. */
232 if (first_ctlr) {
233 printf("3ware device driver for 9000 series storage "
234 "controllers, version: %s\n",
235 TW_OSL_DRIVER_VERSION_STRING);
236 first_ctlr = 0;
237 }
238 return(0);
239 }
240 return(ENXIO);
241}
242
243int twa_setup_intr(struct twa_softc *sc)
244{
245 int error = 0;
246
247 if (!(sc->intr_handle) && (sc->irq_res)) {
248 error = bus_setup_intr(sc->bus_dev, sc->irq_res,
249 INTR_TYPE_CAM | INTR_MPSAFE,
250 NULL, twa_pci_intr,
251 sc, &sc->intr_handle);
252 }
253 return( error );
254}
255
256
257int twa_teardown_intr(struct twa_softc *sc)
258{
259 int error = 0;
260
261 if ((sc->intr_handle) && (sc->irq_res)) {
262 error = bus_teardown_intr(sc->bus_dev,
263 sc->irq_res, sc->intr_handle);
264 sc->intr_handle = NULL;
265 }
266 return( error );
267}
268
269
270
271/*
272 * Function name: twa_attach
273 * Description: Allocates pci resources; updates sc; adds a node to the
274 * sysctl tree to expose the driver version; makes calls
275 * (to the Common Layer) to initialize ctlr, and to
276 * attach to CAM.
277 *
278 * Input: dev -- bus device corresponding to the ctlr
279 * Output: None
280 * Return value: 0 -- success
281 * non-zero-- failure
282 */
283static TW_INT32
284twa_attach(device_t dev)
285{
286 struct twa_softc *sc = device_get_softc(dev);
287 TW_UINT32 command;
288 TW_INT32 bar_num;
289 TW_INT32 bar0_offset;
290 TW_INT32 bar_size;
291 TW_INT32 error;
292
293 tw_osli_dbg_dprintf(3, sc, "entered");
294
295 sc->ctlr_handle.osl_ctlr_ctxt = sc;
296
297 /* Initialize the softc structure. */
298 sc->bus_dev = dev;
299 sc->device_id = pci_get_device(dev);
300
301 /* Initialize the mutexes right here. */
302 sc->io_lock = &(sc->io_lock_handle);
303 mtx_init(sc->io_lock, "tw_osl_io_lock", NULL, MTX_SPIN);
304 sc->q_lock = &(sc->q_lock_handle);
305 mtx_init(sc->q_lock, "tw_osl_q_lock", NULL, MTX_SPIN);
306 sc->sim_lock = &(sc->sim_lock_handle);
307 mtx_init(sc->sim_lock, "tw_osl_sim_lock", NULL, MTX_DEF | MTX_RECURSE);
308
309 sysctl_ctx_init(&sc->sysctl_ctxt);
310 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt,
311 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
312 device_get_nameunit(dev), CTLFLAG_RD, 0, "");
313 if (sc->sysctl_tree == NULL) {
314 tw_osli_printf(sc, "error = %d",
315 TW_CL_SEVERITY_ERROR_STRING,
316 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
317 0x2000,
318 "Cannot add sysctl tree node",
319 ENXIO);
320 return(ENXIO);
321 }
322 SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree),
323 OID_AUTO, "driver_version", CTLFLAG_RD,
324 TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version");
325
287 TW_INT32 bar_num;
288 TW_INT32 bar0_offset;
289 TW_INT32 bar_size;
290 TW_INT32 error;
291
292 tw_osli_dbg_dprintf(3, sc, "entered");
293
294 sc->ctlr_handle.osl_ctlr_ctxt = sc;
295
296 /* Initialize the softc structure. */
297 sc->bus_dev = dev;
298 sc->device_id = pci_get_device(dev);
299
300 /* Initialize the mutexes right here. */
301 sc->io_lock = &(sc->io_lock_handle);
302 mtx_init(sc->io_lock, "tw_osl_io_lock", NULL, MTX_SPIN);
303 sc->q_lock = &(sc->q_lock_handle);
304 mtx_init(sc->q_lock, "tw_osl_q_lock", NULL, MTX_SPIN);
305 sc->sim_lock = &(sc->sim_lock_handle);
306 mtx_init(sc->sim_lock, "tw_osl_sim_lock", NULL, MTX_DEF | MTX_RECURSE);
307
308 sysctl_ctx_init(&sc->sysctl_ctxt);
309 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt,
310 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
311 device_get_nameunit(dev), CTLFLAG_RD, 0, "");
312 if (sc->sysctl_tree == NULL) {
313 tw_osli_printf(sc, "error = %d",
314 TW_CL_SEVERITY_ERROR_STRING,
315 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
316 0x2000,
317 "Cannot add sysctl tree node",
318 ENXIO);
319 return(ENXIO);
320 }
321 SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree),
322 OID_AUTO, "driver_version", CTLFLAG_RD,
323 TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version");
324
326 /* Make sure we are going to be able to talk to this board. */
327 command = pci_read_config(dev, PCIR_COMMAND, 2);
328 if ((command & PCIM_CMD_PORTEN) == 0) {
329 tw_osli_printf(sc, "error = %d",
330 TW_CL_SEVERITY_ERROR_STRING,
331 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
332 0x2001,
333 "Register window not available",
334 ENXIO);
335 tw_osli_free_resources(sc);
336 return(ENXIO);
337 }
338
339 /* Force the busmaster enable bit on, in case the BIOS forgot. */
325 /* Force the busmaster enable bit on, in case the BIOS forgot. */
340 command |= PCIM_CMD_BUSMASTEREN;
341 pci_write_config(dev, PCIR_COMMAND, command, 2);
326 pci_enable_busmaster(dev);
342
343 /* Allocate the PCI register window. */
344 if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM,
345 &bar_num, &bar0_offset, &bar_size))) {
346 tw_osli_printf(sc, "error = %d",
347 TW_CL_SEVERITY_ERROR_STRING,
348 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
349 0x201F,
350 "Can't get PCI BAR info",
351 error);
352 tw_osli_free_resources(sc);
353 return(error);
354 }
355 sc->reg_res_id = PCIR_BARS + bar0_offset;
356 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
357 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
358 == NULL) {
359 tw_osli_printf(sc, "error = %d",
360 TW_CL_SEVERITY_ERROR_STRING,
361 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
362 0x2002,
363 "Can't allocate register window",
364 ENXIO);
365 tw_osli_free_resources(sc);
366 return(ENXIO);
367 }
368 sc->bus_tag = rman_get_bustag(sc->reg_res);
369 sc->bus_handle = rman_get_bushandle(sc->reg_res);
370
371 /* Allocate and register our interrupt. */
372 sc->irq_res_id = 0;
373 if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ,
374 &(sc->irq_res_id), 0, ~0, 1,
375 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
376 tw_osli_printf(sc, "error = %d",
377 TW_CL_SEVERITY_ERROR_STRING,
378 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
379 0x2003,
380 "Can't allocate interrupt",
381 ENXIO);
382 tw_osli_free_resources(sc);
383 return(ENXIO);
384 }
385 if ((error = twa_setup_intr(sc))) {
386 tw_osli_printf(sc, "error = %d",
387 TW_CL_SEVERITY_ERROR_STRING,
388 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
389 0x2004,
390 "Can't set up interrupt",
391 error);
392 tw_osli_free_resources(sc);
393 return(error);
394 }
395
396 if ((error = tw_osli_alloc_mem(sc))) {
397 tw_osli_printf(sc, "error = %d",
398 TW_CL_SEVERITY_ERROR_STRING,
399 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
400 0x2005,
401 "Memory allocation failure",
402 error);
403 tw_osli_free_resources(sc);
404 return(error);
405 }
406
407 /* Initialize the Common Layer for this controller. */
408 if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id,
409 TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS,
410 sc->non_dma_mem, sc->dma_mem,
411 sc->dma_mem_phys
412 ))) {
413 tw_osli_printf(sc, "error = %d",
414 TW_CL_SEVERITY_ERROR_STRING,
415 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
416 0x2006,
417 "Failed to initialize Common Layer/controller",
418 error);
419 tw_osli_free_resources(sc);
420 return(error);
421 }
422
423 /* Create the control device. */
424 sc->ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->bus_dev),
425 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
426 "twa%d", device_get_unit(sc->bus_dev));
427 sc->ctrl_dev->si_drv1 = sc;
428
429 if ((error = tw_osli_cam_attach(sc))) {
430 tw_osli_free_resources(sc);
431 tw_osli_printf(sc, "error = %d",
432 TW_CL_SEVERITY_ERROR_STRING,
433 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
434 0x2007,
435 "Failed to initialize CAM",
436 error);
437 return(error);
438 }
439
440 sc->watchdog_index = 0;
441 callout_init(&(sc->watchdog_callout[0]), CALLOUT_MPSAFE);
442 callout_init(&(sc->watchdog_callout[1]), CALLOUT_MPSAFE);
443 callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle);
444
445 return(0);
446}
447
448
449static TW_VOID
450twa_watchdog(TW_VOID *arg)
451{
452 struct tw_cl_ctlr_handle *ctlr_handle =
453 (struct tw_cl_ctlr_handle *)arg;
454 struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt;
455 int i;
456 int i_need_a_reset = 0;
457 int driver_is_active = 0;
458 int my_watchdog_was_pending = 1234;
459 TW_UINT64 current_time;
460 struct tw_osli_req_context *my_req;
461
462
463//==============================================================================
464 current_time = (TW_UINT64) (tw_osl_get_local_time());
465
466 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
467 my_req = &(sc->req_ctx_buf[i]);
468
469 if ((my_req->state == TW_OSLI_REQ_STATE_BUSY) &&
470 (my_req->deadline) &&
471 (my_req->deadline < current_time)) {
472 tw_cl_set_reset_needed(ctlr_handle);
473#ifdef TW_OSL_DEBUG
474 device_printf((sc)->bus_dev, "Request %d timed out! d = %llu, c = %llu\n", i, my_req->deadline, current_time);
475#else /* TW_OSL_DEBUG */
476 device_printf((sc)->bus_dev, "Request %d timed out!\n", i);
477#endif /* TW_OSL_DEBUG */
478 break;
479 }
480 }
481//==============================================================================
482
483 i_need_a_reset = tw_cl_is_reset_needed(ctlr_handle);
484
485 i = (int) ((sc->watchdog_index++) & 1);
486
487 driver_is_active = tw_cl_is_active(ctlr_handle);
488
489 if (i_need_a_reset) {
490#ifdef TW_OSL_DEBUG
491 device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n");
492#endif /* TW_OSL_DEBUG */
493 my_watchdog_was_pending =
494 callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle);
495 tw_cl_reset_ctlr(ctlr_handle);
496#ifdef TW_OSL_DEBUG
497 device_printf((sc)->bus_dev, "Watchdog reset completed!\n");
498#endif /* TW_OSL_DEBUG */
499 } else if (driver_is_active) {
500 my_watchdog_was_pending =
501 callout_reset(&(sc->watchdog_callout[i]), 5*hz, twa_watchdog, &sc->ctlr_handle);
502 }
503#ifdef TW_OSL_DEBUG
504 if (i_need_a_reset || my_watchdog_was_pending)
505 device_printf((sc)->bus_dev, "i_need_a_reset = %d, "
506 "driver_is_active = %d, my_watchdog_was_pending = %d\n",
507 i_need_a_reset, driver_is_active, my_watchdog_was_pending);
508#endif /* TW_OSL_DEBUG */
509}
510
511
512/*
513 * Function name: tw_osli_alloc_mem
514 * Description: Allocates memory needed both by CL and OSL.
515 *
516 * Input: sc -- OSL internal controller context
517 * Output: None
518 * Return value: 0 -- success
519 * non-zero-- failure
520 */
521static TW_INT32
522tw_osli_alloc_mem(struct twa_softc *sc)
523{
524 struct tw_osli_req_context *req;
525 TW_UINT32 max_sg_elements;
526 TW_UINT32 non_dma_mem_size;
527 TW_UINT32 dma_mem_size;
528 TW_INT32 error;
529 TW_INT32 i;
530
531 tw_osli_dbg_dprintf(3, sc, "entered");
532
533 sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0;
534 sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0;
535
536 max_sg_elements = (sizeof(bus_addr_t) == 8) ?
537 TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS;
538
539 if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags,
540 sc->device_id, TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS,
541 &(sc->alignment), &(sc->sg_size_factor),
542 &non_dma_mem_size, &dma_mem_size
543 ))) {
544 tw_osli_printf(sc, "error = %d",
545 TW_CL_SEVERITY_ERROR_STRING,
546 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
547 0x2008,
548 "Can't get Common Layer's memory requirements",
549 error);
550 return(error);
551 }
552
553 if ((sc->non_dma_mem = malloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS,
554 M_WAITOK)) == NULL) {
555 tw_osli_printf(sc, "error = %d",
556 TW_CL_SEVERITY_ERROR_STRING,
557 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
558 0x2009,
559 "Can't allocate non-dma memory",
560 ENOMEM);
561 return(ENOMEM);
562 }
563
564 /* Create the parent dma tag. */
565 if (bus_dma_tag_create(bus_get_dma_tag(sc->bus_dev), /* parent */
566 sc->alignment, /* alignment */
567 0, /* boundary */
568 BUS_SPACE_MAXADDR, /* lowaddr */
569 BUS_SPACE_MAXADDR, /* highaddr */
570 NULL, NULL, /* filter, filterarg */
571 TW_CL_MAX_IO_SIZE, /* maxsize */
572 max_sg_elements, /* nsegments */
573 TW_CL_MAX_IO_SIZE, /* maxsegsize */
574 0, /* flags */
575 NULL, /* lockfunc */
576 NULL, /* lockfuncarg */
577 &sc->parent_tag /* tag */)) {
578 tw_osli_printf(sc, "error = %d",
579 TW_CL_SEVERITY_ERROR_STRING,
580 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
581 0x200A,
582 "Can't allocate parent DMA tag",
583 ENOMEM);
584 return(ENOMEM);
585 }
586
587 /* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */
588 if (bus_dma_tag_create(sc->parent_tag, /* parent */
589 sc->alignment, /* alignment */
590 0, /* boundary */
591 BUS_SPACE_MAXADDR, /* lowaddr */
592 BUS_SPACE_MAXADDR, /* highaddr */
593 NULL, NULL, /* filter, filterarg */
594 dma_mem_size, /* maxsize */
595 1, /* nsegments */
596 BUS_SPACE_MAXSIZE, /* maxsegsize */
597 0, /* flags */
598 NULL, /* lockfunc */
599 NULL, /* lockfuncarg */
600 &sc->cmd_tag /* tag */)) {
601 tw_osli_printf(sc, "error = %d",
602 TW_CL_SEVERITY_ERROR_STRING,
603 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
604 0x200B,
605 "Can't allocate DMA tag for Common Layer's "
606 "DMA'able memory",
607 ENOMEM);
608 return(ENOMEM);
609 }
610
611 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
612 BUS_DMA_NOWAIT, &sc->cmd_map)) {
613 /* Try a second time. */
614 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
615 BUS_DMA_NOWAIT, &sc->cmd_map)) {
616 tw_osli_printf(sc, "error = %d",
617 TW_CL_SEVERITY_ERROR_STRING,
618 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
619 0x200C,
620 "Can't allocate DMA'able memory for the"
621 "Common Layer",
622 ENOMEM);
623 return(ENOMEM);
624 }
625 }
626
627 bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
628 dma_mem_size, twa_map_load_callback,
629 &sc->dma_mem_phys, 0);
630
631 /*
632 * Create a dma tag for data buffers; size will be the maximum
633 * possible I/O size (128kB).
634 */
635 if (bus_dma_tag_create(sc->parent_tag, /* parent */
636 sc->alignment, /* alignment */
637 0, /* boundary */
638 BUS_SPACE_MAXADDR, /* lowaddr */
639 BUS_SPACE_MAXADDR, /* highaddr */
640 NULL, NULL, /* filter, filterarg */
641 TW_CL_MAX_IO_SIZE, /* maxsize */
642 max_sg_elements, /* nsegments */
643 TW_CL_MAX_IO_SIZE, /* maxsegsize */
644 BUS_DMA_ALLOCNOW, /* flags */
645 twa_busdma_lock, /* lockfunc */
646 sc->io_lock, /* lockfuncarg */
647 &sc->dma_tag /* tag */)) {
648 tw_osli_printf(sc, "error = %d",
649 TW_CL_SEVERITY_ERROR_STRING,
650 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
651 0x200F,
652 "Can't allocate DMA tag for data buffers",
653 ENOMEM);
654 return(ENOMEM);
655 }
656
657 /*
658 * Create a dma tag for ioctl data buffers; size will be the maximum
659 * possible I/O size (128kB).
660 */
661 if (bus_dma_tag_create(sc->parent_tag, /* parent */
662 sc->alignment, /* alignment */
663 0, /* boundary */
664 BUS_SPACE_MAXADDR, /* lowaddr */
665 BUS_SPACE_MAXADDR, /* highaddr */
666 NULL, NULL, /* filter, filterarg */
667 TW_CL_MAX_IO_SIZE, /* maxsize */
668 max_sg_elements, /* nsegments */
669 TW_CL_MAX_IO_SIZE, /* maxsegsize */
670 BUS_DMA_ALLOCNOW, /* flags */
671 twa_busdma_lock, /* lockfunc */
672 sc->io_lock, /* lockfuncarg */
673 &sc->ioctl_tag /* tag */)) {
674 tw_osli_printf(sc, "error = %d",
675 TW_CL_SEVERITY_ERROR_STRING,
676 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
677 0x2010,
678 "Can't allocate DMA tag for ioctl data buffers",
679 ENOMEM);
680 return(ENOMEM);
681 }
682
683 /* Create just one map for all ioctl request data buffers. */
684 if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) {
685 tw_osli_printf(sc, "error = %d",
686 TW_CL_SEVERITY_ERROR_STRING,
687 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
688 0x2011,
689 "Can't create ioctl map",
690 ENOMEM);
691 return(ENOMEM);
692 }
693
694
695 /* Initialize request queues. */
696 tw_osli_req_q_init(sc, TW_OSLI_FREE_Q);
697 tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q);
698
699 if ((sc->req_ctx_buf = (struct tw_osli_req_context *)
700 malloc((sizeof(struct tw_osli_req_context) *
701 TW_OSLI_MAX_NUM_REQUESTS),
702 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
703 tw_osli_printf(sc, "error = %d",
704 TW_CL_SEVERITY_ERROR_STRING,
705 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
706 0x2012,
707 "Failed to allocate request packets",
708 ENOMEM);
709 return(ENOMEM);
710 }
711 bzero(sc->req_ctx_buf,
712 sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_REQUESTS);
713
714 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
715 req = &(sc->req_ctx_buf[i]);
716 req->ctlr = sc;
717 if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) {
718 tw_osli_printf(sc, "request # = %d, error = %d",
719 TW_CL_SEVERITY_ERROR_STRING,
720 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
721 0x2013,
722 "Can't create dma map",
723 i, ENOMEM);
724 return(ENOMEM);
725 }
726
727 /* Initialize the ioctl wakeup/ timeout mutex */
728 req->ioctl_wake_timeout_lock = &(req->ioctl_wake_timeout_lock_handle);
729 mtx_init(req->ioctl_wake_timeout_lock, "tw_ioctl_wake_timeout_lock", NULL, MTX_DEF);
730
731 /* Insert request into the free queue. */
732 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
733 }
734
735 return(0);
736}
737
738
739
740/*
741 * Function name: tw_osli_free_resources
742 * Description: Performs clean-up at the time of going down.
743 *
744 * Input: sc -- ptr to OSL internal ctlr context
745 * Output: None
746 * Return value: None
747 */
748static TW_VOID
749tw_osli_free_resources(struct twa_softc *sc)
750{
751 struct tw_osli_req_context *req;
752 TW_INT32 error = 0;
753
754 tw_osli_dbg_dprintf(3, sc, "entered");
755
756 /* Detach from CAM */
757 tw_osli_cam_detach(sc);
758
759 if (sc->req_ctx_buf)
760 while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) !=
761 NULL) {
762 mtx_destroy(req->ioctl_wake_timeout_lock);
763
764 if ((error = bus_dmamap_destroy(sc->dma_tag,
765 req->dma_map)))
766 tw_osli_dbg_dprintf(1, sc,
767 "dmamap_destroy(dma) returned %d",
768 error);
769 }
770
771 if ((sc->ioctl_tag) && (sc->ioctl_map))
772 if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map)))
773 tw_osli_dbg_dprintf(1, sc,
774 "dmamap_destroy(ioctl) returned %d", error);
775
776 /* Free all memory allocated so far. */
777 if (sc->req_ctx_buf)
778 free(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS);
779
780 if (sc->non_dma_mem)
781 free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS);
782
783 if (sc->dma_mem) {
784 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
785 bus_dmamem_free(sc->cmd_tag, sc->dma_mem,
786 sc->cmd_map);
787 }
788 if (sc->cmd_tag)
789 if ((error = bus_dma_tag_destroy(sc->cmd_tag)))
790 tw_osli_dbg_dprintf(1, sc,
791 "dma_tag_destroy(cmd) returned %d", error);
792
793 if (sc->dma_tag)
794 if ((error = bus_dma_tag_destroy(sc->dma_tag)))
795 tw_osli_dbg_dprintf(1, sc,
796 "dma_tag_destroy(dma) returned %d", error);
797
798 if (sc->ioctl_tag)
799 if ((error = bus_dma_tag_destroy(sc->ioctl_tag)))
800 tw_osli_dbg_dprintf(1, sc,
801 "dma_tag_destroy(ioctl) returned %d", error);
802
803 if (sc->parent_tag)
804 if ((error = bus_dma_tag_destroy(sc->parent_tag)))
805 tw_osli_dbg_dprintf(1, sc,
806 "dma_tag_destroy(parent) returned %d", error);
807
808
809 /* Disconnect the interrupt handler. */
810 if ((error = twa_teardown_intr(sc)))
811 tw_osli_dbg_dprintf(1, sc,
812 "teardown_intr returned %d", error);
813
814 if (sc->irq_res != NULL)
815 if ((error = bus_release_resource(sc->bus_dev,
816 SYS_RES_IRQ, sc->irq_res_id, sc->irq_res)))
817 tw_osli_dbg_dprintf(1, sc,
818 "release_resource(irq) returned %d", error);
819
820
821 /* Release the register window mapping. */
822 if (sc->reg_res != NULL)
823 if ((error = bus_release_resource(sc->bus_dev,
824 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)))
825 tw_osli_dbg_dprintf(1, sc,
826 "release_resource(io) returned %d", error);
827
828
829 /* Destroy the control device. */
830 if (sc->ctrl_dev != (struct cdev *)NULL)
831 destroy_dev(sc->ctrl_dev);
832
833 if ((error = sysctl_ctx_free(&sc->sysctl_ctxt)))
834 tw_osli_dbg_dprintf(1, sc,
835 "sysctl_ctx_free returned %d", error);
836
837}
838
839
840
841/*
842 * Function name: twa_detach
843 * Description: Called when the controller is being detached from
844 * the pci bus.
845 *
846 * Input: dev -- bus device corresponding to the ctlr
847 * Output: None
848 * Return value: 0 -- success
849 * non-zero-- failure
850 */
851static TW_INT32
852twa_detach(device_t dev)
853{
854 struct twa_softc *sc = device_get_softc(dev);
855 TW_INT32 error;
856
857 tw_osli_dbg_dprintf(3, sc, "entered");
858
859 error = EBUSY;
860 if (sc->open) {
861 tw_osli_printf(sc, "error = %d",
862 TW_CL_SEVERITY_ERROR_STRING,
863 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
864 0x2014,
865 "Device open",
866 error);
867 goto out;
868 }
869
870 /* Shut the controller down. */
871 if ((error = twa_shutdown(dev)))
872 goto out;
873
874 /* Free all resources associated with this controller. */
875 tw_osli_free_resources(sc);
876 error = 0;
877
878out:
879 return(error);
880}
881
882
883
884/*
885 * Function name: twa_shutdown
886 * Description: Called at unload/shutdown time. Lets the controller
887 * know that we are going down.
888 *
889 * Input: dev -- bus device corresponding to the ctlr
890 * Output: None
891 * Return value: 0 -- success
892 * non-zero-- failure
893 */
894static TW_INT32
895twa_shutdown(device_t dev)
896{
897 struct twa_softc *sc = device_get_softc(dev);
898 TW_INT32 error = 0;
899
900 tw_osli_dbg_dprintf(3, sc, "entered");
901
902 /* Disconnect interrupts. */
903 error = twa_teardown_intr(sc);
904
905 /* Stop watchdog task. */
906 callout_drain(&(sc->watchdog_callout[0]));
907 callout_drain(&(sc->watchdog_callout[1]));
908
909 /* Disconnect from the controller. */
910 if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) {
911 tw_osli_printf(sc, "error = %d",
912 TW_CL_SEVERITY_ERROR_STRING,
913 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
914 0x2015,
915 "Failed to shutdown Common Layer/controller",
916 error);
917 }
918 return(error);
919}
920
921
922
923/*
924 * Function name: twa_busdma_lock
925 * Description: Function to provide synchronization during busdma_swi.
926 *
927 * Input: lock_arg -- lock mutex sent as argument
928 * op -- operation (lock/unlock) expected of the function
929 * Output: None
930 * Return value: None
931 */
932TW_VOID
933twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op)
934{
935 struct mtx *lock;
936
937 lock = (struct mtx *)lock_arg;
938 switch (op) {
939 case BUS_DMA_LOCK:
940 mtx_lock_spin(lock);
941 break;
942
943 case BUS_DMA_UNLOCK:
944 mtx_unlock_spin(lock);
945 break;
946
947 default:
948 panic("Unknown operation 0x%x for twa_busdma_lock!", op);
949 }
950}
951
952
953/*
954 * Function name: twa_pci_intr
955 * Description: Interrupt handler. Wrapper for twa_interrupt.
956 *
957 * Input: arg -- ptr to OSL internal ctlr context
958 * Output: None
959 * Return value: None
960 */
961static TW_VOID
962twa_pci_intr(TW_VOID *arg)
963{
964 struct twa_softc *sc = (struct twa_softc *)arg;
965
966 tw_osli_dbg_dprintf(10, sc, "entered");
967 tw_cl_interrupt(&(sc->ctlr_handle));
968}
969
970
971/*
972 * Function name: tw_osli_fw_passthru
973 * Description: Builds a fw passthru cmd pkt, and submits it to CL.
974 *
975 * Input: sc -- ptr to OSL internal ctlr context
976 * buf -- ptr to ioctl pkt understood by CL
977 * Output: None
978 * Return value: 0 -- success
979 * non-zero-- failure
980 */
981TW_INT32
982tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf)
983{
984 struct tw_osli_req_context *req;
985 struct tw_osli_ioctl_no_data_buf *user_buf =
986 (struct tw_osli_ioctl_no_data_buf *)buf;
987 TW_TIME end_time;
988 TW_UINT32 timeout = 60;
989 TW_UINT32 data_buf_size_adjusted;
990 struct tw_cl_req_packet *req_pkt;
991 struct tw_cl_passthru_req_packet *pt_req;
992 TW_INT32 error;
993
994 tw_osli_dbg_dprintf(5, sc, "ioctl: passthru");
995
996 if ((req = tw_osli_get_request(sc)) == NULL)
997 return(EBUSY);
998
999 req->req_handle.osl_req_ctxt = req;
1000 req->orig_req = buf;
1001 req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU;
1002
1003 req_pkt = &(req->req_pkt);
1004 req_pkt->status = 0;
1005 req_pkt->tw_osl_callback = tw_osl_complete_passthru;
1006 /* Let the Common Layer retry the request on cmd queue full. */
1007 req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY;
1008
1009 pt_req = &(req_pkt->gen_req_pkt.pt_req);
1010 /*
1011 * Make sure that the data buffer sent to firmware is a
1012 * 512 byte multiple in size.
1013 */
1014 data_buf_size_adjusted =
1015 (user_buf->driver_pkt.buffer_length +
1016 (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1);
1017 if ((req->length = data_buf_size_adjusted)) {
1018 if ((req->data = malloc(data_buf_size_adjusted,
1019 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
1020 error = ENOMEM;
1021 tw_osli_printf(sc, "error = %d",
1022 TW_CL_SEVERITY_ERROR_STRING,
1023 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1024 0x2016,
1025 "Could not alloc mem for "
1026 "fw_passthru data_buf",
1027 error);
1028 goto fw_passthru_err;
1029 }
1030 /* Copy the payload. */
1031 if ((error = copyin((TW_VOID *)(user_buf->pdata),
1032 req->data,
1033 user_buf->driver_pkt.buffer_length)) != 0) {
1034 tw_osli_printf(sc, "error = %d",
1035 TW_CL_SEVERITY_ERROR_STRING,
1036 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1037 0x2017,
1038 "Could not copyin fw_passthru data_buf",
1039 error);
1040 goto fw_passthru_err;
1041 }
1042 pt_req->sgl_entries = 1; /* will be updated during mapping */
1043 req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN |
1044 TW_OSLI_REQ_FLAGS_DATA_OUT);
1045 } else
1046 pt_req->sgl_entries = 0; /* no payload */
1047
1048 pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt));
1049 pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet);
1050
1051 if ((error = tw_osli_map_request(req)))
1052 goto fw_passthru_err;
1053
1054 end_time = tw_osl_get_local_time() + timeout;
1055 while (req->state != TW_OSLI_REQ_STATE_COMPLETE) {
1056 mtx_lock(req->ioctl_wake_timeout_lock);
1057 req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING;
1058
1059 error = mtx_sleep(req, req->ioctl_wake_timeout_lock, 0,
1060 "twa_passthru", timeout*hz);
1061 mtx_unlock(req->ioctl_wake_timeout_lock);
1062
1063 if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING))
1064 error = 0;
1065 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1066
1067 if (! error) {
1068 if (((error = req->error_code)) ||
1069 ((error = (req->state !=
1070 TW_OSLI_REQ_STATE_COMPLETE))) ||
1071 ((error = req_pkt->status)))
1072 goto fw_passthru_err;
1073 break;
1074 }
1075
1076 if (req_pkt->status) {
1077 error = req_pkt->status;
1078 goto fw_passthru_err;
1079 }
1080
1081 if (error == EWOULDBLOCK) {
1082 /* Time out! */
1083 if ((!(req->error_code)) &&
1084 (req->state == TW_OSLI_REQ_STATE_COMPLETE) &&
1085 (!(req_pkt->status)) ) {
1086#ifdef TW_OSL_DEBUG
1087 tw_osli_printf(sc, "request = %p",
1088 TW_CL_SEVERITY_ERROR_STRING,
1089 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1090 0x7777,
1091 "FALSE Passthru timeout!",
1092 req);
1093#endif /* TW_OSL_DEBUG */
1094 error = 0; /* False error */
1095 break;
1096 }
1097 if (!(tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) {
1098#ifdef TW_OSL_DEBUG
1099 tw_osli_printf(sc, "request = %p",
1100 TW_CL_SEVERITY_ERROR_STRING,
1101 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1102 0x2018,
1103 "Passthru request timed out!",
1104 req);
1105#else /* TW_OSL_DEBUG */
1106 device_printf((sc)->bus_dev, "Passthru request timed out!\n");
1107#endif /* TW_OSL_DEBUG */
1108 tw_cl_reset_ctlr(&(req->ctlr->ctlr_handle));
1109 }
1110
1111 error = 0;
1112 end_time = tw_osl_get_local_time() + timeout;
1113 continue;
1114 /*
1115 * Don't touch req after a reset. It (and any
1116 * associated data) will be
1117 * unmapped by the callback.
1118 */
1119 }
1120 /*
1121 * Either the request got completed, or we were woken up by a
1122 * signal. Calculate the new timeout, in case it was the latter.
1123 */
1124 timeout = (end_time - tw_osl_get_local_time());
1125 } /* End of while loop */
1126
1127 /* If there was a payload, copy it back. */
1128 if ((!error) && (req->length))
1129 if ((error = copyout(req->data, user_buf->pdata,
1130 user_buf->driver_pkt.buffer_length)))
1131 tw_osli_printf(sc, "error = %d",
1132 TW_CL_SEVERITY_ERROR_STRING,
1133 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1134 0x2019,
1135 "Could not copyout fw_passthru data_buf",
1136 error);
1137
1138fw_passthru_err:
1139
1140 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1141 error = EBUSY;
1142
1143 user_buf->driver_pkt.os_status = error;
1144 /* Free resources. */
1145 if (req->data)
1146 free(req->data, TW_OSLI_MALLOC_CLASS);
1147 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1148 return(error);
1149}
1150
1151
1152
1153/*
1154 * Function name: tw_osl_complete_passthru
1155 * Description: Called to complete passthru requests.
1156 *
1157 * Input: req_handle -- ptr to request handle
1158 * Output: None
1159 * Return value: None
1160 */
1161TW_VOID
1162tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle)
1163{
1164 struct tw_osli_req_context *req = req_handle->osl_req_ctxt;
1165 struct tw_cl_req_packet *req_pkt =
1166 (struct tw_cl_req_packet *)(&req->req_pkt);
1167 struct twa_softc *sc = req->ctlr;
1168
1169 tw_osli_dbg_dprintf(5, sc, "entered");
1170
1171 if (req->state != TW_OSLI_REQ_STATE_BUSY) {
1172 tw_osli_printf(sc, "request = %p, status = %d",
1173 TW_CL_SEVERITY_ERROR_STRING,
1174 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1175 0x201B,
1176 "Unposted command completed!!",
1177 req, req->state);
1178 }
1179
1180 /*
1181 * Remove request from the busy queue. Just mark it complete.
1182 * There's no need to move it into the complete queue as we are
1183 * going to be done with it right now.
1184 */
1185 req->state = TW_OSLI_REQ_STATE_COMPLETE;
1186 tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q);
1187
1188 tw_osli_unmap_request(req);
1189
1190 /*
1191 * Don't do a wake up if there was an error even before the request
1192 * was sent down to the Common Layer, and we hadn't gotten an
1193 * EINPROGRESS. The request originator will then be returned an
1194 * error, and he can do the clean-up.
1195 */
1196 if ((req->error_code) && (!(req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)))
1197 return;
1198
1199 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1200 if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) {
1201 /* Wake up the sleeping command originator. */
1202 tw_osli_dbg_dprintf(5, sc,
1203 "Waking up originator of request %p", req);
1204 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1205 wakeup_one(req);
1206 } else {
1207 /*
1208 * If the request completed even before mtx_sleep
1209 * was called, simply return.
1210 */
1211 if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED)
1212 return;
1213
1214 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1215 return;
1216
1217 tw_osli_printf(sc, "request = %p",
1218 TW_CL_SEVERITY_ERROR_STRING,
1219 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1220 0x201C,
1221 "Passthru callback called, "
1222 "and caller not sleeping",
1223 req);
1224 }
1225 } else {
1226 tw_osli_printf(sc, "request = %p",
1227 TW_CL_SEVERITY_ERROR_STRING,
1228 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1229 0x201D,
1230 "Passthru callback called for non-passthru request",
1231 req);
1232 }
1233}
1234
1235
1236
1237/*
1238 * Function name: tw_osli_get_request
1239 * Description: Gets a request pkt from the free queue.
1240 *
1241 * Input: sc -- ptr to OSL internal ctlr context
1242 * Output: None
1243 * Return value: ptr to request pkt -- success
1244 * NULL -- failure
1245 */
1246struct tw_osli_req_context *
1247tw_osli_get_request(struct twa_softc *sc)
1248{
1249 struct tw_osli_req_context *req;
1250
1251 tw_osli_dbg_dprintf(4, sc, "entered");
1252
1253 /* Get a free request packet. */
1254 req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q);
1255
1256 /* Initialize some fields to their defaults. */
1257 if (req) {
1258 req->req_handle.osl_req_ctxt = NULL;
1259 req->req_handle.cl_req_ctxt = NULL;
1260 req->req_handle.is_io = 0;
1261 req->data = NULL;
1262 req->length = 0;
1263 req->deadline = 0;
1264 req->real_data = NULL;
1265 req->real_length = 0;
1266 req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */
1267 req->flags = 0;
1268 req->error_code = 0;
1269 req->orig_req = NULL;
1270
1271 bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet));
1272
1273 }
1274 return(req);
1275}
1276
1277
1278
1279/*
1280 * Function name: twa_map_load_data_callback
1281 * Description: Callback of bus_dmamap_load for the buffer associated
1282 * with data. Updates the cmd pkt (size/sgl_entries
1283 * fields, as applicable) to reflect the number of sg
1284 * elements.
1285 *
1286 * Input: arg -- ptr to OSL internal request context
1287 * segs -- ptr to a list of segment descriptors
1288 * nsegments--# of segments
1289 * error -- 0 if no errors encountered before callback,
1290 * non-zero if errors were encountered
1291 * Output: None
1292 * Return value: None
1293 */
1294static TW_VOID
1295twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1296 TW_INT32 nsegments, TW_INT32 error)
1297{
1298 struct tw_osli_req_context *req =
1299 (struct tw_osli_req_context *)arg;
1300 struct twa_softc *sc = req->ctlr;
1301 struct tw_cl_req_packet *req_pkt = &(req->req_pkt);
1302
1303 tw_osli_dbg_dprintf(10, sc, "entered");
1304
1305 if (error == EINVAL) {
1306 req->error_code = error;
1307 return;
1308 }
1309
1310 /* Mark the request as currently being processed. */
1311 req->state = TW_OSLI_REQ_STATE_BUSY;
1312 /* Move the request into the busy queue. */
1313 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1314
1315 req->flags |= TW_OSLI_REQ_FLAGS_MAPPED;
1316
1317 if (error == EFBIG) {
1318 req->error_code = error;
1319 goto out;
1320 }
1321
1322 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1323 struct tw_cl_passthru_req_packet *pt_req;
1324
1325 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1326 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1327 BUS_DMASYNC_PREREAD);
1328
1329 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1330 /*
1331 * If we're using an alignment buffer, and we're
1332 * writing data, copy the real data out.
1333 */
1334 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1335 bcopy(req->real_data, req->data, req->real_length);
1336 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1337 BUS_DMASYNC_PREWRITE);
1338 }
1339
1340 pt_req = &(req_pkt->gen_req_pkt.pt_req);
1341 pt_req->sg_list = (TW_UINT8 *)segs;
1342 pt_req->sgl_entries += (nsegments - 1);
1343 error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt,
1344 &(req->req_handle));
1345 } else {
1346 struct tw_cl_scsi_req_packet *scsi_req;
1347
1348 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1349 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1350 BUS_DMASYNC_PREREAD);
1351
1352 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1353 /*
1354 * If we're using an alignment buffer, and we're
1355 * writing data, copy the real data out.
1356 */
1357 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1358 bcopy(req->real_data, req->data, req->real_length);
1359 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1360 BUS_DMASYNC_PREWRITE);
1361 }
1362
1363 scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
1364 scsi_req->sg_list = (TW_UINT8 *)segs;
1365 scsi_req->sgl_entries += (nsegments - 1);
1366 error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt,
1367 &(req->req_handle));
1368 }
1369
1370out:
1371 if (error) {
1372 req->error_code = error;
1373 req_pkt->tw_osl_callback(&(req->req_handle));
1374 /*
1375 * If the caller had been returned EINPROGRESS, and he has
1376 * registered a callback for handling completion, the callback
1377 * will never get called because we were unable to submit the
1378 * request. So, free up the request right here.
1379 */
1380 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1381 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1382 }
1383}
1384
1385
1386
1387/*
1388 * Function name: twa_map_load_callback
1389 * Description: Callback of bus_dmamap_load for the buffer associated
1390 * with a cmd pkt.
1391 *
1392 * Input: arg -- ptr to variable to hold phys addr
1393 * segs -- ptr to a list of segment descriptors
1394 * nsegments--# of segments
1395 * error -- 0 if no errors encountered before callback,
1396 * non-zero if errors were encountered
1397 * Output: None
1398 * Return value: None
1399 */
1400static TW_VOID
1401twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1402 TW_INT32 nsegments, TW_INT32 error)
1403{
1404 *((bus_addr_t *)arg) = segs[0].ds_addr;
1405}
1406
1407
1408
1409/*
1410 * Function name: tw_osli_map_request
1411 * Description: Maps a cmd pkt and data associated with it, into
1412 * DMA'able memory.
1413 *
1414 * Input: req -- ptr to request pkt
1415 * Output: None
1416 * Return value: 0 -- success
1417 * non-zero-- failure
1418 */
1419TW_INT32
1420tw_osli_map_request(struct tw_osli_req_context *req)
1421{
1422 struct twa_softc *sc = req->ctlr;
1423 TW_INT32 error = 0;
1424
1425 tw_osli_dbg_dprintf(10, sc, "entered");
1426
1427 /* If the command involves data, map that too. */
1428 if (req->data != NULL) {
1429 /*
1430 * It's sufficient for the data pointer to be 4-byte aligned
1431 * to work with 9000. However, if 4-byte aligned addresses
1432 * are passed to bus_dmamap_load, we can get back sg elements
1433 * that are not 512-byte multiples in size. So, we will let
1434 * only those buffers that are 512-byte aligned to pass
1435 * through, and bounce the rest, so as to make sure that we
1436 * always get back sg elements that are 512-byte multiples
1437 * in size.
1438 */
1439 if (((vm_offset_t)req->data % sc->sg_size_factor) ||
1440 (req->length % sc->sg_size_factor)) {
1441 req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED;
1442 /* Save original data pointer and length. */
1443 req->real_data = req->data;
1444 req->real_length = req->length;
1445 req->length = (req->length +
1446 (sc->sg_size_factor - 1)) &
1447 ~(sc->sg_size_factor - 1);
1448 req->data = malloc(req->length, TW_OSLI_MALLOC_CLASS,
1449 M_NOWAIT);
1450 if (req->data == NULL) {
1451 tw_osli_printf(sc, "error = %d",
1452 TW_CL_SEVERITY_ERROR_STRING,
1453 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1454 0x201E,
1455 "Failed to allocate memory "
1456 "for bounce buffer",
1457 ENOMEM);
1458 /* Restore original data pointer and length. */
1459 req->data = req->real_data;
1460 req->length = req->real_length;
1461 return(ENOMEM);
1462 }
1463 }
1464
1465 /*
1466 * Map the data buffer into bus space and build the SG list.
1467 */
1468 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1469 /* Lock against multiple simultaneous ioctl calls. */
1470 mtx_lock_spin(sc->io_lock);
1471 error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map,
1472 req->data, req->length,
1473 twa_map_load_data_callback, req,
1474 BUS_DMA_WAITOK);
1475 mtx_unlock_spin(sc->io_lock);
1476 } else if (req->flags & TW_OSLI_REQ_FLAGS_CCB) {
1477 error = bus_dmamap_load_ccb(sc->dma_tag, req->dma_map,
1478 req->orig_req, twa_map_load_data_callback, req,
1479 BUS_DMA_WAITOK);
1480 } else {
1481 /*
1482 * There's only one CAM I/O thread running at a time.
1483 * So, there's no need to hold the io_lock.
1484 */
1485 error = bus_dmamap_load(sc->dma_tag, req->dma_map,
1486 req->data, req->length,
1487 twa_map_load_data_callback, req,
1488 BUS_DMA_WAITOK);
1489 }
1490
1491 if (!error)
1492 error = req->error_code;
1493 else {
1494 if (error == EINPROGRESS) {
1495 /*
1496 * Specifying sc->io_lock as the lockfuncarg
1497 * in ...tag_create should protect the access
1498 * of ...FLAGS_MAPPED from the callback.
1499 */
1500 mtx_lock_spin(sc->io_lock);
1501 if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED))
1502 req->flags |= TW_OSLI_REQ_FLAGS_IN_PROGRESS;
1503 tw_osli_disallow_new_requests(sc, &(req->req_handle));
1504 mtx_unlock_spin(sc->io_lock);
1505 error = 0;
1506 } else {
1507 tw_osli_printf(sc, "error = %d",
1508 TW_CL_SEVERITY_ERROR_STRING,
1509 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1510 0x9999,
1511 "Failed to map DMA memory "
1512 "for I/O request",
1513 error);
1514 req->flags |= TW_OSLI_REQ_FLAGS_FAILED;
1515 /* Free alignment buffer if it was used. */
1516 if (req->flags &
1517 TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1518 free(req->data, TW_OSLI_MALLOC_CLASS);
1519 /*
1520 * Restore original data pointer
1521 * and length.
1522 */
1523 req->data = req->real_data;
1524 req->length = req->real_length;
1525 }
1526 }
1527 }
1528
1529 } else {
1530 /* Mark the request as currently being processed. */
1531 req->state = TW_OSLI_REQ_STATE_BUSY;
1532 /* Move the request into the busy queue. */
1533 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1534 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU)
1535 error = tw_cl_fw_passthru(&sc->ctlr_handle,
1536 &(req->req_pkt), &(req->req_handle));
1537 else
1538 error = tw_cl_start_io(&sc->ctlr_handle,
1539 &(req->req_pkt), &(req->req_handle));
1540 if (error) {
1541 req->error_code = error;
1542 req->req_pkt.tw_osl_callback(&(req->req_handle));
1543 }
1544 }
1545 return(error);
1546}
1547
1548
1549
1550/*
1551 * Function name: tw_osli_unmap_request
1552 * Description: Undoes the mapping done by tw_osli_map_request.
1553 *
1554 * Input: req -- ptr to request pkt
1555 * Output: None
1556 * Return value: None
1557 */
1558TW_VOID
1559tw_osli_unmap_request(struct tw_osli_req_context *req)
1560{
1561 struct twa_softc *sc = req->ctlr;
1562
1563 tw_osli_dbg_dprintf(10, sc, "entered");
1564
1565 /* If the command involved data, unmap that too. */
1566 if (req->data != NULL) {
1567 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1568 /* Lock against multiple simultaneous ioctl calls. */
1569 mtx_lock_spin(sc->io_lock);
1570
1571 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1572 bus_dmamap_sync(sc->ioctl_tag,
1573 sc->ioctl_map, BUS_DMASYNC_POSTREAD);
1574
1575 /*
1576 * If we are using a bounce buffer, and we are
1577 * reading data, copy the real data in.
1578 */
1579 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1580 bcopy(req->data, req->real_data,
1581 req->real_length);
1582 }
1583
1584 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1585 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1586 BUS_DMASYNC_POSTWRITE);
1587
1588 bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map);
1589
1590 mtx_unlock_spin(sc->io_lock);
1591 } else {
1592 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1593 bus_dmamap_sync(sc->dma_tag,
1594 req->dma_map, BUS_DMASYNC_POSTREAD);
1595
1596 /*
1597 * If we are using a bounce buffer, and we are
1598 * reading data, copy the real data in.
1599 */
1600 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1601 bcopy(req->data, req->real_data,
1602 req->real_length);
1603 }
1604 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1605 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1606 BUS_DMASYNC_POSTWRITE);
1607
1608 bus_dmamap_unload(sc->dma_tag, req->dma_map);
1609 }
1610 }
1611
1612 /* Free alignment buffer if it was used. */
1613 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1614 free(req->data, TW_OSLI_MALLOC_CLASS);
1615 /* Restore original data pointer and length. */
1616 req->data = req->real_data;
1617 req->length = req->real_length;
1618 }
1619}
1620
1621
1622
1623#ifdef TW_OSL_DEBUG
1624
1625TW_VOID twa_report_stats(TW_VOID);
1626TW_VOID twa_reset_stats(TW_VOID);
1627TW_VOID tw_osli_print_ctlr_stats(struct twa_softc *sc);
1628TW_VOID twa_print_req_info(struct tw_osli_req_context *req);
1629
1630
1631/*
1632 * Function name: twa_report_stats
1633 * Description: For being called from ddb. Calls functions that print
1634 * OSL and CL internal stats for the controller.
1635 *
1636 * Input: None
1637 * Output: None
1638 * Return value: None
1639 */
1640TW_VOID
1641twa_report_stats(TW_VOID)
1642{
1643 struct twa_softc *sc;
1644 TW_INT32 i;
1645
1646 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1647 tw_osli_print_ctlr_stats(sc);
1648 tw_cl_print_ctlr_stats(&sc->ctlr_handle);
1649 }
1650}
1651
1652
1653
1654/*
1655 * Function name: tw_osli_print_ctlr_stats
1656 * Description: For being called from ddb. Prints OSL controller stats
1657 *
1658 * Input: sc -- ptr to OSL internal controller context
1659 * Output: None
1660 * Return value: None
1661 */
1662TW_VOID
1663tw_osli_print_ctlr_stats(struct twa_softc *sc)
1664{
1665 twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc);
1666 twa_printf(sc, "OSLq type current max\n");
1667 twa_printf(sc, "free %04d %04d\n",
1668 sc->q_stats[TW_OSLI_FREE_Q].cur_len,
1669 sc->q_stats[TW_OSLI_FREE_Q].max_len);
1670 twa_printf(sc, "busy %04d %04d\n",
1671 sc->q_stats[TW_OSLI_BUSY_Q].cur_len,
1672 sc->q_stats[TW_OSLI_BUSY_Q].max_len);
1673}
1674
1675
1676
1677/*
1678 * Function name: twa_print_req_info
1679 * Description: For being called from ddb. Calls functions that print
1680 * OSL and CL internal details for the request.
1681 *
1682 * Input: req -- ptr to OSL internal request context
1683 * Output: None
1684 * Return value: None
1685 */
1686TW_VOID
1687twa_print_req_info(struct tw_osli_req_context *req)
1688{
1689 struct twa_softc *sc = req->ctlr;
1690
1691 twa_printf(sc, "OSL details for request:\n");
1692 twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n"
1693 "data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n"
1694 "state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n"
1695 "next_req = %p, prev_req = %p, dma_map = %p\n",
1696 req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt,
1697 req->data, req->length, req->real_data, req->real_length,
1698 req->state, req->flags, req->error_code, req->orig_req,
1699 req->link.next, req->link.prev, req->dma_map);
1700 tw_cl_print_req_info(&(req->req_handle));
1701}
1702
1703
1704
1705/*
1706 * Function name: twa_reset_stats
1707 * Description: For being called from ddb.
1708 * Resets some OSL controller stats.
1709 *
1710 * Input: None
1711 * Output: None
1712 * Return value: None
1713 */
1714TW_VOID
1715twa_reset_stats(TW_VOID)
1716{
1717 struct twa_softc *sc;
1718 TW_INT32 i;
1719
1720 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1721 sc->q_stats[TW_OSLI_FREE_Q].max_len = 0;
1722 sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0;
1723 tw_cl_reset_stats(&sc->ctlr_handle);
1724 }
1725}
1726
1727#endif /* TW_OSL_DEBUG */
327
328 /* Allocate the PCI register window. */
329 if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM,
330 &bar_num, &bar0_offset, &bar_size))) {
331 tw_osli_printf(sc, "error = %d",
332 TW_CL_SEVERITY_ERROR_STRING,
333 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
334 0x201F,
335 "Can't get PCI BAR info",
336 error);
337 tw_osli_free_resources(sc);
338 return(error);
339 }
340 sc->reg_res_id = PCIR_BARS + bar0_offset;
341 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
342 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
343 == NULL) {
344 tw_osli_printf(sc, "error = %d",
345 TW_CL_SEVERITY_ERROR_STRING,
346 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
347 0x2002,
348 "Can't allocate register window",
349 ENXIO);
350 tw_osli_free_resources(sc);
351 return(ENXIO);
352 }
353 sc->bus_tag = rman_get_bustag(sc->reg_res);
354 sc->bus_handle = rman_get_bushandle(sc->reg_res);
355
356 /* Allocate and register our interrupt. */
357 sc->irq_res_id = 0;
358 if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ,
359 &(sc->irq_res_id), 0, ~0, 1,
360 RF_SHAREABLE | RF_ACTIVE)) == NULL) {
361 tw_osli_printf(sc, "error = %d",
362 TW_CL_SEVERITY_ERROR_STRING,
363 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
364 0x2003,
365 "Can't allocate interrupt",
366 ENXIO);
367 tw_osli_free_resources(sc);
368 return(ENXIO);
369 }
370 if ((error = twa_setup_intr(sc))) {
371 tw_osli_printf(sc, "error = %d",
372 TW_CL_SEVERITY_ERROR_STRING,
373 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
374 0x2004,
375 "Can't set up interrupt",
376 error);
377 tw_osli_free_resources(sc);
378 return(error);
379 }
380
381 if ((error = tw_osli_alloc_mem(sc))) {
382 tw_osli_printf(sc, "error = %d",
383 TW_CL_SEVERITY_ERROR_STRING,
384 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
385 0x2005,
386 "Memory allocation failure",
387 error);
388 tw_osli_free_resources(sc);
389 return(error);
390 }
391
392 /* Initialize the Common Layer for this controller. */
393 if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id,
394 TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS,
395 sc->non_dma_mem, sc->dma_mem,
396 sc->dma_mem_phys
397 ))) {
398 tw_osli_printf(sc, "error = %d",
399 TW_CL_SEVERITY_ERROR_STRING,
400 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
401 0x2006,
402 "Failed to initialize Common Layer/controller",
403 error);
404 tw_osli_free_resources(sc);
405 return(error);
406 }
407
408 /* Create the control device. */
409 sc->ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->bus_dev),
410 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
411 "twa%d", device_get_unit(sc->bus_dev));
412 sc->ctrl_dev->si_drv1 = sc;
413
414 if ((error = tw_osli_cam_attach(sc))) {
415 tw_osli_free_resources(sc);
416 tw_osli_printf(sc, "error = %d",
417 TW_CL_SEVERITY_ERROR_STRING,
418 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
419 0x2007,
420 "Failed to initialize CAM",
421 error);
422 return(error);
423 }
424
425 sc->watchdog_index = 0;
426 callout_init(&(sc->watchdog_callout[0]), CALLOUT_MPSAFE);
427 callout_init(&(sc->watchdog_callout[1]), CALLOUT_MPSAFE);
428 callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle);
429
430 return(0);
431}
432
433
434static TW_VOID
435twa_watchdog(TW_VOID *arg)
436{
437 struct tw_cl_ctlr_handle *ctlr_handle =
438 (struct tw_cl_ctlr_handle *)arg;
439 struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt;
440 int i;
441 int i_need_a_reset = 0;
442 int driver_is_active = 0;
443 int my_watchdog_was_pending = 1234;
444 TW_UINT64 current_time;
445 struct tw_osli_req_context *my_req;
446
447
448//==============================================================================
449 current_time = (TW_UINT64) (tw_osl_get_local_time());
450
451 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
452 my_req = &(sc->req_ctx_buf[i]);
453
454 if ((my_req->state == TW_OSLI_REQ_STATE_BUSY) &&
455 (my_req->deadline) &&
456 (my_req->deadline < current_time)) {
457 tw_cl_set_reset_needed(ctlr_handle);
458#ifdef TW_OSL_DEBUG
459 device_printf((sc)->bus_dev, "Request %d timed out! d = %llu, c = %llu\n", i, my_req->deadline, current_time);
460#else /* TW_OSL_DEBUG */
461 device_printf((sc)->bus_dev, "Request %d timed out!\n", i);
462#endif /* TW_OSL_DEBUG */
463 break;
464 }
465 }
466//==============================================================================
467
468 i_need_a_reset = tw_cl_is_reset_needed(ctlr_handle);
469
470 i = (int) ((sc->watchdog_index++) & 1);
471
472 driver_is_active = tw_cl_is_active(ctlr_handle);
473
474 if (i_need_a_reset) {
475#ifdef TW_OSL_DEBUG
476 device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n");
477#endif /* TW_OSL_DEBUG */
478 my_watchdog_was_pending =
479 callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle);
480 tw_cl_reset_ctlr(ctlr_handle);
481#ifdef TW_OSL_DEBUG
482 device_printf((sc)->bus_dev, "Watchdog reset completed!\n");
483#endif /* TW_OSL_DEBUG */
484 } else if (driver_is_active) {
485 my_watchdog_was_pending =
486 callout_reset(&(sc->watchdog_callout[i]), 5*hz, twa_watchdog, &sc->ctlr_handle);
487 }
488#ifdef TW_OSL_DEBUG
489 if (i_need_a_reset || my_watchdog_was_pending)
490 device_printf((sc)->bus_dev, "i_need_a_reset = %d, "
491 "driver_is_active = %d, my_watchdog_was_pending = %d\n",
492 i_need_a_reset, driver_is_active, my_watchdog_was_pending);
493#endif /* TW_OSL_DEBUG */
494}
495
496
497/*
498 * Function name: tw_osli_alloc_mem
499 * Description: Allocates memory needed both by CL and OSL.
500 *
501 * Input: sc -- OSL internal controller context
502 * Output: None
503 * Return value: 0 -- success
504 * non-zero-- failure
505 */
506static TW_INT32
507tw_osli_alloc_mem(struct twa_softc *sc)
508{
509 struct tw_osli_req_context *req;
510 TW_UINT32 max_sg_elements;
511 TW_UINT32 non_dma_mem_size;
512 TW_UINT32 dma_mem_size;
513 TW_INT32 error;
514 TW_INT32 i;
515
516 tw_osli_dbg_dprintf(3, sc, "entered");
517
518 sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0;
519 sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0;
520
521 max_sg_elements = (sizeof(bus_addr_t) == 8) ?
522 TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS;
523
524 if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags,
525 sc->device_id, TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS,
526 &(sc->alignment), &(sc->sg_size_factor),
527 &non_dma_mem_size, &dma_mem_size
528 ))) {
529 tw_osli_printf(sc, "error = %d",
530 TW_CL_SEVERITY_ERROR_STRING,
531 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
532 0x2008,
533 "Can't get Common Layer's memory requirements",
534 error);
535 return(error);
536 }
537
538 if ((sc->non_dma_mem = malloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS,
539 M_WAITOK)) == NULL) {
540 tw_osli_printf(sc, "error = %d",
541 TW_CL_SEVERITY_ERROR_STRING,
542 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
543 0x2009,
544 "Can't allocate non-dma memory",
545 ENOMEM);
546 return(ENOMEM);
547 }
548
549 /* Create the parent dma tag. */
550 if (bus_dma_tag_create(bus_get_dma_tag(sc->bus_dev), /* parent */
551 sc->alignment, /* alignment */
552 0, /* boundary */
553 BUS_SPACE_MAXADDR, /* lowaddr */
554 BUS_SPACE_MAXADDR, /* highaddr */
555 NULL, NULL, /* filter, filterarg */
556 TW_CL_MAX_IO_SIZE, /* maxsize */
557 max_sg_elements, /* nsegments */
558 TW_CL_MAX_IO_SIZE, /* maxsegsize */
559 0, /* flags */
560 NULL, /* lockfunc */
561 NULL, /* lockfuncarg */
562 &sc->parent_tag /* tag */)) {
563 tw_osli_printf(sc, "error = %d",
564 TW_CL_SEVERITY_ERROR_STRING,
565 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
566 0x200A,
567 "Can't allocate parent DMA tag",
568 ENOMEM);
569 return(ENOMEM);
570 }
571
572 /* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */
573 if (bus_dma_tag_create(sc->parent_tag, /* parent */
574 sc->alignment, /* alignment */
575 0, /* boundary */
576 BUS_SPACE_MAXADDR, /* lowaddr */
577 BUS_SPACE_MAXADDR, /* highaddr */
578 NULL, NULL, /* filter, filterarg */
579 dma_mem_size, /* maxsize */
580 1, /* nsegments */
581 BUS_SPACE_MAXSIZE, /* maxsegsize */
582 0, /* flags */
583 NULL, /* lockfunc */
584 NULL, /* lockfuncarg */
585 &sc->cmd_tag /* tag */)) {
586 tw_osli_printf(sc, "error = %d",
587 TW_CL_SEVERITY_ERROR_STRING,
588 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
589 0x200B,
590 "Can't allocate DMA tag for Common Layer's "
591 "DMA'able memory",
592 ENOMEM);
593 return(ENOMEM);
594 }
595
596 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
597 BUS_DMA_NOWAIT, &sc->cmd_map)) {
598 /* Try a second time. */
599 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
600 BUS_DMA_NOWAIT, &sc->cmd_map)) {
601 tw_osli_printf(sc, "error = %d",
602 TW_CL_SEVERITY_ERROR_STRING,
603 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
604 0x200C,
605 "Can't allocate DMA'able memory for the"
606 "Common Layer",
607 ENOMEM);
608 return(ENOMEM);
609 }
610 }
611
612 bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
613 dma_mem_size, twa_map_load_callback,
614 &sc->dma_mem_phys, 0);
615
616 /*
617 * Create a dma tag for data buffers; size will be the maximum
618 * possible I/O size (128kB).
619 */
620 if (bus_dma_tag_create(sc->parent_tag, /* parent */
621 sc->alignment, /* alignment */
622 0, /* boundary */
623 BUS_SPACE_MAXADDR, /* lowaddr */
624 BUS_SPACE_MAXADDR, /* highaddr */
625 NULL, NULL, /* filter, filterarg */
626 TW_CL_MAX_IO_SIZE, /* maxsize */
627 max_sg_elements, /* nsegments */
628 TW_CL_MAX_IO_SIZE, /* maxsegsize */
629 BUS_DMA_ALLOCNOW, /* flags */
630 twa_busdma_lock, /* lockfunc */
631 sc->io_lock, /* lockfuncarg */
632 &sc->dma_tag /* tag */)) {
633 tw_osli_printf(sc, "error = %d",
634 TW_CL_SEVERITY_ERROR_STRING,
635 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
636 0x200F,
637 "Can't allocate DMA tag for data buffers",
638 ENOMEM);
639 return(ENOMEM);
640 }
641
642 /*
643 * Create a dma tag for ioctl data buffers; size will be the maximum
644 * possible I/O size (128kB).
645 */
646 if (bus_dma_tag_create(sc->parent_tag, /* parent */
647 sc->alignment, /* alignment */
648 0, /* boundary */
649 BUS_SPACE_MAXADDR, /* lowaddr */
650 BUS_SPACE_MAXADDR, /* highaddr */
651 NULL, NULL, /* filter, filterarg */
652 TW_CL_MAX_IO_SIZE, /* maxsize */
653 max_sg_elements, /* nsegments */
654 TW_CL_MAX_IO_SIZE, /* maxsegsize */
655 BUS_DMA_ALLOCNOW, /* flags */
656 twa_busdma_lock, /* lockfunc */
657 sc->io_lock, /* lockfuncarg */
658 &sc->ioctl_tag /* tag */)) {
659 tw_osli_printf(sc, "error = %d",
660 TW_CL_SEVERITY_ERROR_STRING,
661 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
662 0x2010,
663 "Can't allocate DMA tag for ioctl data buffers",
664 ENOMEM);
665 return(ENOMEM);
666 }
667
668 /* Create just one map for all ioctl request data buffers. */
669 if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) {
670 tw_osli_printf(sc, "error = %d",
671 TW_CL_SEVERITY_ERROR_STRING,
672 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
673 0x2011,
674 "Can't create ioctl map",
675 ENOMEM);
676 return(ENOMEM);
677 }
678
679
680 /* Initialize request queues. */
681 tw_osli_req_q_init(sc, TW_OSLI_FREE_Q);
682 tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q);
683
684 if ((sc->req_ctx_buf = (struct tw_osli_req_context *)
685 malloc((sizeof(struct tw_osli_req_context) *
686 TW_OSLI_MAX_NUM_REQUESTS),
687 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
688 tw_osli_printf(sc, "error = %d",
689 TW_CL_SEVERITY_ERROR_STRING,
690 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
691 0x2012,
692 "Failed to allocate request packets",
693 ENOMEM);
694 return(ENOMEM);
695 }
696 bzero(sc->req_ctx_buf,
697 sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_REQUESTS);
698
699 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) {
700 req = &(sc->req_ctx_buf[i]);
701 req->ctlr = sc;
702 if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) {
703 tw_osli_printf(sc, "request # = %d, error = %d",
704 TW_CL_SEVERITY_ERROR_STRING,
705 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
706 0x2013,
707 "Can't create dma map",
708 i, ENOMEM);
709 return(ENOMEM);
710 }
711
712 /* Initialize the ioctl wakeup/ timeout mutex */
713 req->ioctl_wake_timeout_lock = &(req->ioctl_wake_timeout_lock_handle);
714 mtx_init(req->ioctl_wake_timeout_lock, "tw_ioctl_wake_timeout_lock", NULL, MTX_DEF);
715
716 /* Insert request into the free queue. */
717 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
718 }
719
720 return(0);
721}
722
723
724
725/*
726 * Function name: tw_osli_free_resources
727 * Description: Performs clean-up at the time of going down.
728 *
729 * Input: sc -- ptr to OSL internal ctlr context
730 * Output: None
731 * Return value: None
732 */
733static TW_VOID
734tw_osli_free_resources(struct twa_softc *sc)
735{
736 struct tw_osli_req_context *req;
737 TW_INT32 error = 0;
738
739 tw_osli_dbg_dprintf(3, sc, "entered");
740
741 /* Detach from CAM */
742 tw_osli_cam_detach(sc);
743
744 if (sc->req_ctx_buf)
745 while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) !=
746 NULL) {
747 mtx_destroy(req->ioctl_wake_timeout_lock);
748
749 if ((error = bus_dmamap_destroy(sc->dma_tag,
750 req->dma_map)))
751 tw_osli_dbg_dprintf(1, sc,
752 "dmamap_destroy(dma) returned %d",
753 error);
754 }
755
756 if ((sc->ioctl_tag) && (sc->ioctl_map))
757 if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map)))
758 tw_osli_dbg_dprintf(1, sc,
759 "dmamap_destroy(ioctl) returned %d", error);
760
761 /* Free all memory allocated so far. */
762 if (sc->req_ctx_buf)
763 free(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS);
764
765 if (sc->non_dma_mem)
766 free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS);
767
768 if (sc->dma_mem) {
769 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
770 bus_dmamem_free(sc->cmd_tag, sc->dma_mem,
771 sc->cmd_map);
772 }
773 if (sc->cmd_tag)
774 if ((error = bus_dma_tag_destroy(sc->cmd_tag)))
775 tw_osli_dbg_dprintf(1, sc,
776 "dma_tag_destroy(cmd) returned %d", error);
777
778 if (sc->dma_tag)
779 if ((error = bus_dma_tag_destroy(sc->dma_tag)))
780 tw_osli_dbg_dprintf(1, sc,
781 "dma_tag_destroy(dma) returned %d", error);
782
783 if (sc->ioctl_tag)
784 if ((error = bus_dma_tag_destroy(sc->ioctl_tag)))
785 tw_osli_dbg_dprintf(1, sc,
786 "dma_tag_destroy(ioctl) returned %d", error);
787
788 if (sc->parent_tag)
789 if ((error = bus_dma_tag_destroy(sc->parent_tag)))
790 tw_osli_dbg_dprintf(1, sc,
791 "dma_tag_destroy(parent) returned %d", error);
792
793
794 /* Disconnect the interrupt handler. */
795 if ((error = twa_teardown_intr(sc)))
796 tw_osli_dbg_dprintf(1, sc,
797 "teardown_intr returned %d", error);
798
799 if (sc->irq_res != NULL)
800 if ((error = bus_release_resource(sc->bus_dev,
801 SYS_RES_IRQ, sc->irq_res_id, sc->irq_res)))
802 tw_osli_dbg_dprintf(1, sc,
803 "release_resource(irq) returned %d", error);
804
805
806 /* Release the register window mapping. */
807 if (sc->reg_res != NULL)
808 if ((error = bus_release_resource(sc->bus_dev,
809 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)))
810 tw_osli_dbg_dprintf(1, sc,
811 "release_resource(io) returned %d", error);
812
813
814 /* Destroy the control device. */
815 if (sc->ctrl_dev != (struct cdev *)NULL)
816 destroy_dev(sc->ctrl_dev);
817
818 if ((error = sysctl_ctx_free(&sc->sysctl_ctxt)))
819 tw_osli_dbg_dprintf(1, sc,
820 "sysctl_ctx_free returned %d", error);
821
822}
823
824
825
826/*
827 * Function name: twa_detach
828 * Description: Called when the controller is being detached from
829 * the pci bus.
830 *
831 * Input: dev -- bus device corresponding to the ctlr
832 * Output: None
833 * Return value: 0 -- success
834 * non-zero-- failure
835 */
836static TW_INT32
837twa_detach(device_t dev)
838{
839 struct twa_softc *sc = device_get_softc(dev);
840 TW_INT32 error;
841
842 tw_osli_dbg_dprintf(3, sc, "entered");
843
844 error = EBUSY;
845 if (sc->open) {
846 tw_osli_printf(sc, "error = %d",
847 TW_CL_SEVERITY_ERROR_STRING,
848 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
849 0x2014,
850 "Device open",
851 error);
852 goto out;
853 }
854
855 /* Shut the controller down. */
856 if ((error = twa_shutdown(dev)))
857 goto out;
858
859 /* Free all resources associated with this controller. */
860 tw_osli_free_resources(sc);
861 error = 0;
862
863out:
864 return(error);
865}
866
867
868
869/*
870 * Function name: twa_shutdown
871 * Description: Called at unload/shutdown time. Lets the controller
872 * know that we are going down.
873 *
874 * Input: dev -- bus device corresponding to the ctlr
875 * Output: None
876 * Return value: 0 -- success
877 * non-zero-- failure
878 */
879static TW_INT32
880twa_shutdown(device_t dev)
881{
882 struct twa_softc *sc = device_get_softc(dev);
883 TW_INT32 error = 0;
884
885 tw_osli_dbg_dprintf(3, sc, "entered");
886
887 /* Disconnect interrupts. */
888 error = twa_teardown_intr(sc);
889
890 /* Stop watchdog task. */
891 callout_drain(&(sc->watchdog_callout[0]));
892 callout_drain(&(sc->watchdog_callout[1]));
893
894 /* Disconnect from the controller. */
895 if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) {
896 tw_osli_printf(sc, "error = %d",
897 TW_CL_SEVERITY_ERROR_STRING,
898 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
899 0x2015,
900 "Failed to shutdown Common Layer/controller",
901 error);
902 }
903 return(error);
904}
905
906
907
908/*
909 * Function name: twa_busdma_lock
910 * Description: Function to provide synchronization during busdma_swi.
911 *
912 * Input: lock_arg -- lock mutex sent as argument
913 * op -- operation (lock/unlock) expected of the function
914 * Output: None
915 * Return value: None
916 */
917TW_VOID
918twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op)
919{
920 struct mtx *lock;
921
922 lock = (struct mtx *)lock_arg;
923 switch (op) {
924 case BUS_DMA_LOCK:
925 mtx_lock_spin(lock);
926 break;
927
928 case BUS_DMA_UNLOCK:
929 mtx_unlock_spin(lock);
930 break;
931
932 default:
933 panic("Unknown operation 0x%x for twa_busdma_lock!", op);
934 }
935}
936
937
938/*
939 * Function name: twa_pci_intr
940 * Description: Interrupt handler. Wrapper for twa_interrupt.
941 *
942 * Input: arg -- ptr to OSL internal ctlr context
943 * Output: None
944 * Return value: None
945 */
946static TW_VOID
947twa_pci_intr(TW_VOID *arg)
948{
949 struct twa_softc *sc = (struct twa_softc *)arg;
950
951 tw_osli_dbg_dprintf(10, sc, "entered");
952 tw_cl_interrupt(&(sc->ctlr_handle));
953}
954
955
956/*
957 * Function name: tw_osli_fw_passthru
958 * Description: Builds a fw passthru cmd pkt, and submits it to CL.
959 *
960 * Input: sc -- ptr to OSL internal ctlr context
961 * buf -- ptr to ioctl pkt understood by CL
962 * Output: None
963 * Return value: 0 -- success
964 * non-zero-- failure
965 */
966TW_INT32
967tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf)
968{
969 struct tw_osli_req_context *req;
970 struct tw_osli_ioctl_no_data_buf *user_buf =
971 (struct tw_osli_ioctl_no_data_buf *)buf;
972 TW_TIME end_time;
973 TW_UINT32 timeout = 60;
974 TW_UINT32 data_buf_size_adjusted;
975 struct tw_cl_req_packet *req_pkt;
976 struct tw_cl_passthru_req_packet *pt_req;
977 TW_INT32 error;
978
979 tw_osli_dbg_dprintf(5, sc, "ioctl: passthru");
980
981 if ((req = tw_osli_get_request(sc)) == NULL)
982 return(EBUSY);
983
984 req->req_handle.osl_req_ctxt = req;
985 req->orig_req = buf;
986 req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU;
987
988 req_pkt = &(req->req_pkt);
989 req_pkt->status = 0;
990 req_pkt->tw_osl_callback = tw_osl_complete_passthru;
991 /* Let the Common Layer retry the request on cmd queue full. */
992 req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY;
993
994 pt_req = &(req_pkt->gen_req_pkt.pt_req);
995 /*
996 * Make sure that the data buffer sent to firmware is a
997 * 512 byte multiple in size.
998 */
999 data_buf_size_adjusted =
1000 (user_buf->driver_pkt.buffer_length +
1001 (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1);
1002 if ((req->length = data_buf_size_adjusted)) {
1003 if ((req->data = malloc(data_buf_size_adjusted,
1004 TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) {
1005 error = ENOMEM;
1006 tw_osli_printf(sc, "error = %d",
1007 TW_CL_SEVERITY_ERROR_STRING,
1008 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1009 0x2016,
1010 "Could not alloc mem for "
1011 "fw_passthru data_buf",
1012 error);
1013 goto fw_passthru_err;
1014 }
1015 /* Copy the payload. */
1016 if ((error = copyin((TW_VOID *)(user_buf->pdata),
1017 req->data,
1018 user_buf->driver_pkt.buffer_length)) != 0) {
1019 tw_osli_printf(sc, "error = %d",
1020 TW_CL_SEVERITY_ERROR_STRING,
1021 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1022 0x2017,
1023 "Could not copyin fw_passthru data_buf",
1024 error);
1025 goto fw_passthru_err;
1026 }
1027 pt_req->sgl_entries = 1; /* will be updated during mapping */
1028 req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN |
1029 TW_OSLI_REQ_FLAGS_DATA_OUT);
1030 } else
1031 pt_req->sgl_entries = 0; /* no payload */
1032
1033 pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt));
1034 pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet);
1035
1036 if ((error = tw_osli_map_request(req)))
1037 goto fw_passthru_err;
1038
1039 end_time = tw_osl_get_local_time() + timeout;
1040 while (req->state != TW_OSLI_REQ_STATE_COMPLETE) {
1041 mtx_lock(req->ioctl_wake_timeout_lock);
1042 req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING;
1043
1044 error = mtx_sleep(req, req->ioctl_wake_timeout_lock, 0,
1045 "twa_passthru", timeout*hz);
1046 mtx_unlock(req->ioctl_wake_timeout_lock);
1047
1048 if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING))
1049 error = 0;
1050 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1051
1052 if (! error) {
1053 if (((error = req->error_code)) ||
1054 ((error = (req->state !=
1055 TW_OSLI_REQ_STATE_COMPLETE))) ||
1056 ((error = req_pkt->status)))
1057 goto fw_passthru_err;
1058 break;
1059 }
1060
1061 if (req_pkt->status) {
1062 error = req_pkt->status;
1063 goto fw_passthru_err;
1064 }
1065
1066 if (error == EWOULDBLOCK) {
1067 /* Time out! */
1068 if ((!(req->error_code)) &&
1069 (req->state == TW_OSLI_REQ_STATE_COMPLETE) &&
1070 (!(req_pkt->status)) ) {
1071#ifdef TW_OSL_DEBUG
1072 tw_osli_printf(sc, "request = %p",
1073 TW_CL_SEVERITY_ERROR_STRING,
1074 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1075 0x7777,
1076 "FALSE Passthru timeout!",
1077 req);
1078#endif /* TW_OSL_DEBUG */
1079 error = 0; /* False error */
1080 break;
1081 }
1082 if (!(tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) {
1083#ifdef TW_OSL_DEBUG
1084 tw_osli_printf(sc, "request = %p",
1085 TW_CL_SEVERITY_ERROR_STRING,
1086 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1087 0x2018,
1088 "Passthru request timed out!",
1089 req);
1090#else /* TW_OSL_DEBUG */
1091 device_printf((sc)->bus_dev, "Passthru request timed out!\n");
1092#endif /* TW_OSL_DEBUG */
1093 tw_cl_reset_ctlr(&(req->ctlr->ctlr_handle));
1094 }
1095
1096 error = 0;
1097 end_time = tw_osl_get_local_time() + timeout;
1098 continue;
1099 /*
1100 * Don't touch req after a reset. It (and any
1101 * associated data) will be
1102 * unmapped by the callback.
1103 */
1104 }
1105 /*
1106 * Either the request got completed, or we were woken up by a
1107 * signal. Calculate the new timeout, in case it was the latter.
1108 */
1109 timeout = (end_time - tw_osl_get_local_time());
1110 } /* End of while loop */
1111
1112 /* If there was a payload, copy it back. */
1113 if ((!error) && (req->length))
1114 if ((error = copyout(req->data, user_buf->pdata,
1115 user_buf->driver_pkt.buffer_length)))
1116 tw_osli_printf(sc, "error = %d",
1117 TW_CL_SEVERITY_ERROR_STRING,
1118 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1119 0x2019,
1120 "Could not copyout fw_passthru data_buf",
1121 error);
1122
1123fw_passthru_err:
1124
1125 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1126 error = EBUSY;
1127
1128 user_buf->driver_pkt.os_status = error;
1129 /* Free resources. */
1130 if (req->data)
1131 free(req->data, TW_OSLI_MALLOC_CLASS);
1132 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1133 return(error);
1134}
1135
1136
1137
1138/*
1139 * Function name: tw_osl_complete_passthru
1140 * Description: Called to complete passthru requests.
1141 *
1142 * Input: req_handle -- ptr to request handle
1143 * Output: None
1144 * Return value: None
1145 */
1146TW_VOID
1147tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle)
1148{
1149 struct tw_osli_req_context *req = req_handle->osl_req_ctxt;
1150 struct tw_cl_req_packet *req_pkt =
1151 (struct tw_cl_req_packet *)(&req->req_pkt);
1152 struct twa_softc *sc = req->ctlr;
1153
1154 tw_osli_dbg_dprintf(5, sc, "entered");
1155
1156 if (req->state != TW_OSLI_REQ_STATE_BUSY) {
1157 tw_osli_printf(sc, "request = %p, status = %d",
1158 TW_CL_SEVERITY_ERROR_STRING,
1159 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1160 0x201B,
1161 "Unposted command completed!!",
1162 req, req->state);
1163 }
1164
1165 /*
1166 * Remove request from the busy queue. Just mark it complete.
1167 * There's no need to move it into the complete queue as we are
1168 * going to be done with it right now.
1169 */
1170 req->state = TW_OSLI_REQ_STATE_COMPLETE;
1171 tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q);
1172
1173 tw_osli_unmap_request(req);
1174
1175 /*
1176 * Don't do a wake up if there was an error even before the request
1177 * was sent down to the Common Layer, and we hadn't gotten an
1178 * EINPROGRESS. The request originator will then be returned an
1179 * error, and he can do the clean-up.
1180 */
1181 if ((req->error_code) && (!(req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)))
1182 return;
1183
1184 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1185 if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) {
1186 /* Wake up the sleeping command originator. */
1187 tw_osli_dbg_dprintf(5, sc,
1188 "Waking up originator of request %p", req);
1189 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING;
1190 wakeup_one(req);
1191 } else {
1192 /*
1193 * If the request completed even before mtx_sleep
1194 * was called, simply return.
1195 */
1196 if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED)
1197 return;
1198
1199 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET)
1200 return;
1201
1202 tw_osli_printf(sc, "request = %p",
1203 TW_CL_SEVERITY_ERROR_STRING,
1204 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1205 0x201C,
1206 "Passthru callback called, "
1207 "and caller not sleeping",
1208 req);
1209 }
1210 } else {
1211 tw_osli_printf(sc, "request = %p",
1212 TW_CL_SEVERITY_ERROR_STRING,
1213 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1214 0x201D,
1215 "Passthru callback called for non-passthru request",
1216 req);
1217 }
1218}
1219
1220
1221
1222/*
1223 * Function name: tw_osli_get_request
1224 * Description: Gets a request pkt from the free queue.
1225 *
1226 * Input: sc -- ptr to OSL internal ctlr context
1227 * Output: None
1228 * Return value: ptr to request pkt -- success
1229 * NULL -- failure
1230 */
1231struct tw_osli_req_context *
1232tw_osli_get_request(struct twa_softc *sc)
1233{
1234 struct tw_osli_req_context *req;
1235
1236 tw_osli_dbg_dprintf(4, sc, "entered");
1237
1238 /* Get a free request packet. */
1239 req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q);
1240
1241 /* Initialize some fields to their defaults. */
1242 if (req) {
1243 req->req_handle.osl_req_ctxt = NULL;
1244 req->req_handle.cl_req_ctxt = NULL;
1245 req->req_handle.is_io = 0;
1246 req->data = NULL;
1247 req->length = 0;
1248 req->deadline = 0;
1249 req->real_data = NULL;
1250 req->real_length = 0;
1251 req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */
1252 req->flags = 0;
1253 req->error_code = 0;
1254 req->orig_req = NULL;
1255
1256 bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet));
1257
1258 }
1259 return(req);
1260}
1261
1262
1263
1264/*
1265 * Function name: twa_map_load_data_callback
1266 * Description: Callback of bus_dmamap_load for the buffer associated
1267 * with data. Updates the cmd pkt (size/sgl_entries
1268 * fields, as applicable) to reflect the number of sg
1269 * elements.
1270 *
1271 * Input: arg -- ptr to OSL internal request context
1272 * segs -- ptr to a list of segment descriptors
1273 * nsegments--# of segments
1274 * error -- 0 if no errors encountered before callback,
1275 * non-zero if errors were encountered
1276 * Output: None
1277 * Return value: None
1278 */
1279static TW_VOID
1280twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1281 TW_INT32 nsegments, TW_INT32 error)
1282{
1283 struct tw_osli_req_context *req =
1284 (struct tw_osli_req_context *)arg;
1285 struct twa_softc *sc = req->ctlr;
1286 struct tw_cl_req_packet *req_pkt = &(req->req_pkt);
1287
1288 tw_osli_dbg_dprintf(10, sc, "entered");
1289
1290 if (error == EINVAL) {
1291 req->error_code = error;
1292 return;
1293 }
1294
1295 /* Mark the request as currently being processed. */
1296 req->state = TW_OSLI_REQ_STATE_BUSY;
1297 /* Move the request into the busy queue. */
1298 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1299
1300 req->flags |= TW_OSLI_REQ_FLAGS_MAPPED;
1301
1302 if (error == EFBIG) {
1303 req->error_code = error;
1304 goto out;
1305 }
1306
1307 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1308 struct tw_cl_passthru_req_packet *pt_req;
1309
1310 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1311 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1312 BUS_DMASYNC_PREREAD);
1313
1314 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1315 /*
1316 * If we're using an alignment buffer, and we're
1317 * writing data, copy the real data out.
1318 */
1319 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1320 bcopy(req->real_data, req->data, req->real_length);
1321 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1322 BUS_DMASYNC_PREWRITE);
1323 }
1324
1325 pt_req = &(req_pkt->gen_req_pkt.pt_req);
1326 pt_req->sg_list = (TW_UINT8 *)segs;
1327 pt_req->sgl_entries += (nsegments - 1);
1328 error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt,
1329 &(req->req_handle));
1330 } else {
1331 struct tw_cl_scsi_req_packet *scsi_req;
1332
1333 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN)
1334 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1335 BUS_DMASYNC_PREREAD);
1336
1337 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) {
1338 /*
1339 * If we're using an alignment buffer, and we're
1340 * writing data, copy the real data out.
1341 */
1342 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1343 bcopy(req->real_data, req->data, req->real_length);
1344 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1345 BUS_DMASYNC_PREWRITE);
1346 }
1347
1348 scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
1349 scsi_req->sg_list = (TW_UINT8 *)segs;
1350 scsi_req->sgl_entries += (nsegments - 1);
1351 error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt,
1352 &(req->req_handle));
1353 }
1354
1355out:
1356 if (error) {
1357 req->error_code = error;
1358 req_pkt->tw_osl_callback(&(req->req_handle));
1359 /*
1360 * If the caller had been returned EINPROGRESS, and he has
1361 * registered a callback for handling completion, the callback
1362 * will never get called because we were unable to submit the
1363 * request. So, free up the request right here.
1364 */
1365 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS)
1366 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q);
1367 }
1368}
1369
1370
1371
1372/*
1373 * Function name: twa_map_load_callback
1374 * Description: Callback of bus_dmamap_load for the buffer associated
1375 * with a cmd pkt.
1376 *
1377 * Input: arg -- ptr to variable to hold phys addr
1378 * segs -- ptr to a list of segment descriptors
1379 * nsegments--# of segments
1380 * error -- 0 if no errors encountered before callback,
1381 * non-zero if errors were encountered
1382 * Output: None
1383 * Return value: None
1384 */
1385static TW_VOID
1386twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs,
1387 TW_INT32 nsegments, TW_INT32 error)
1388{
1389 *((bus_addr_t *)arg) = segs[0].ds_addr;
1390}
1391
1392
1393
1394/*
1395 * Function name: tw_osli_map_request
1396 * Description: Maps a cmd pkt and data associated with it, into
1397 * DMA'able memory.
1398 *
1399 * Input: req -- ptr to request pkt
1400 * Output: None
1401 * Return value: 0 -- success
1402 * non-zero-- failure
1403 */
1404TW_INT32
1405tw_osli_map_request(struct tw_osli_req_context *req)
1406{
1407 struct twa_softc *sc = req->ctlr;
1408 TW_INT32 error = 0;
1409
1410 tw_osli_dbg_dprintf(10, sc, "entered");
1411
1412 /* If the command involves data, map that too. */
1413 if (req->data != NULL) {
1414 /*
1415 * It's sufficient for the data pointer to be 4-byte aligned
1416 * to work with 9000. However, if 4-byte aligned addresses
1417 * are passed to bus_dmamap_load, we can get back sg elements
1418 * that are not 512-byte multiples in size. So, we will let
1419 * only those buffers that are 512-byte aligned to pass
1420 * through, and bounce the rest, so as to make sure that we
1421 * always get back sg elements that are 512-byte multiples
1422 * in size.
1423 */
1424 if (((vm_offset_t)req->data % sc->sg_size_factor) ||
1425 (req->length % sc->sg_size_factor)) {
1426 req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED;
1427 /* Save original data pointer and length. */
1428 req->real_data = req->data;
1429 req->real_length = req->length;
1430 req->length = (req->length +
1431 (sc->sg_size_factor - 1)) &
1432 ~(sc->sg_size_factor - 1);
1433 req->data = malloc(req->length, TW_OSLI_MALLOC_CLASS,
1434 M_NOWAIT);
1435 if (req->data == NULL) {
1436 tw_osli_printf(sc, "error = %d",
1437 TW_CL_SEVERITY_ERROR_STRING,
1438 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1439 0x201E,
1440 "Failed to allocate memory "
1441 "for bounce buffer",
1442 ENOMEM);
1443 /* Restore original data pointer and length. */
1444 req->data = req->real_data;
1445 req->length = req->real_length;
1446 return(ENOMEM);
1447 }
1448 }
1449
1450 /*
1451 * Map the data buffer into bus space and build the SG list.
1452 */
1453 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1454 /* Lock against multiple simultaneous ioctl calls. */
1455 mtx_lock_spin(sc->io_lock);
1456 error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map,
1457 req->data, req->length,
1458 twa_map_load_data_callback, req,
1459 BUS_DMA_WAITOK);
1460 mtx_unlock_spin(sc->io_lock);
1461 } else if (req->flags & TW_OSLI_REQ_FLAGS_CCB) {
1462 error = bus_dmamap_load_ccb(sc->dma_tag, req->dma_map,
1463 req->orig_req, twa_map_load_data_callback, req,
1464 BUS_DMA_WAITOK);
1465 } else {
1466 /*
1467 * There's only one CAM I/O thread running at a time.
1468 * So, there's no need to hold the io_lock.
1469 */
1470 error = bus_dmamap_load(sc->dma_tag, req->dma_map,
1471 req->data, req->length,
1472 twa_map_load_data_callback, req,
1473 BUS_DMA_WAITOK);
1474 }
1475
1476 if (!error)
1477 error = req->error_code;
1478 else {
1479 if (error == EINPROGRESS) {
1480 /*
1481 * Specifying sc->io_lock as the lockfuncarg
1482 * in ...tag_create should protect the access
1483 * of ...FLAGS_MAPPED from the callback.
1484 */
1485 mtx_lock_spin(sc->io_lock);
1486 if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED))
1487 req->flags |= TW_OSLI_REQ_FLAGS_IN_PROGRESS;
1488 tw_osli_disallow_new_requests(sc, &(req->req_handle));
1489 mtx_unlock_spin(sc->io_lock);
1490 error = 0;
1491 } else {
1492 tw_osli_printf(sc, "error = %d",
1493 TW_CL_SEVERITY_ERROR_STRING,
1494 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
1495 0x9999,
1496 "Failed to map DMA memory "
1497 "for I/O request",
1498 error);
1499 req->flags |= TW_OSLI_REQ_FLAGS_FAILED;
1500 /* Free alignment buffer if it was used. */
1501 if (req->flags &
1502 TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1503 free(req->data, TW_OSLI_MALLOC_CLASS);
1504 /*
1505 * Restore original data pointer
1506 * and length.
1507 */
1508 req->data = req->real_data;
1509 req->length = req->real_length;
1510 }
1511 }
1512 }
1513
1514 } else {
1515 /* Mark the request as currently being processed. */
1516 req->state = TW_OSLI_REQ_STATE_BUSY;
1517 /* Move the request into the busy queue. */
1518 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q);
1519 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU)
1520 error = tw_cl_fw_passthru(&sc->ctlr_handle,
1521 &(req->req_pkt), &(req->req_handle));
1522 else
1523 error = tw_cl_start_io(&sc->ctlr_handle,
1524 &(req->req_pkt), &(req->req_handle));
1525 if (error) {
1526 req->error_code = error;
1527 req->req_pkt.tw_osl_callback(&(req->req_handle));
1528 }
1529 }
1530 return(error);
1531}
1532
1533
1534
1535/*
1536 * Function name: tw_osli_unmap_request
1537 * Description: Undoes the mapping done by tw_osli_map_request.
1538 *
1539 * Input: req -- ptr to request pkt
1540 * Output: None
1541 * Return value: None
1542 */
1543TW_VOID
1544tw_osli_unmap_request(struct tw_osli_req_context *req)
1545{
1546 struct twa_softc *sc = req->ctlr;
1547
1548 tw_osli_dbg_dprintf(10, sc, "entered");
1549
1550 /* If the command involved data, unmap that too. */
1551 if (req->data != NULL) {
1552 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) {
1553 /* Lock against multiple simultaneous ioctl calls. */
1554 mtx_lock_spin(sc->io_lock);
1555
1556 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1557 bus_dmamap_sync(sc->ioctl_tag,
1558 sc->ioctl_map, BUS_DMASYNC_POSTREAD);
1559
1560 /*
1561 * If we are using a bounce buffer, and we are
1562 * reading data, copy the real data in.
1563 */
1564 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1565 bcopy(req->data, req->real_data,
1566 req->real_length);
1567 }
1568
1569 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1570 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map,
1571 BUS_DMASYNC_POSTWRITE);
1572
1573 bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map);
1574
1575 mtx_unlock_spin(sc->io_lock);
1576 } else {
1577 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) {
1578 bus_dmamap_sync(sc->dma_tag,
1579 req->dma_map, BUS_DMASYNC_POSTREAD);
1580
1581 /*
1582 * If we are using a bounce buffer, and we are
1583 * reading data, copy the real data in.
1584 */
1585 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED)
1586 bcopy(req->data, req->real_data,
1587 req->real_length);
1588 }
1589 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT)
1590 bus_dmamap_sync(sc->dma_tag, req->dma_map,
1591 BUS_DMASYNC_POSTWRITE);
1592
1593 bus_dmamap_unload(sc->dma_tag, req->dma_map);
1594 }
1595 }
1596
1597 /* Free alignment buffer if it was used. */
1598 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) {
1599 free(req->data, TW_OSLI_MALLOC_CLASS);
1600 /* Restore original data pointer and length. */
1601 req->data = req->real_data;
1602 req->length = req->real_length;
1603 }
1604}
1605
1606
1607
1608#ifdef TW_OSL_DEBUG
1609
1610TW_VOID twa_report_stats(TW_VOID);
1611TW_VOID twa_reset_stats(TW_VOID);
1612TW_VOID tw_osli_print_ctlr_stats(struct twa_softc *sc);
1613TW_VOID twa_print_req_info(struct tw_osli_req_context *req);
1614
1615
1616/*
1617 * Function name: twa_report_stats
1618 * Description: For being called from ddb. Calls functions that print
1619 * OSL and CL internal stats for the controller.
1620 *
1621 * Input: None
1622 * Output: None
1623 * Return value: None
1624 */
1625TW_VOID
1626twa_report_stats(TW_VOID)
1627{
1628 struct twa_softc *sc;
1629 TW_INT32 i;
1630
1631 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1632 tw_osli_print_ctlr_stats(sc);
1633 tw_cl_print_ctlr_stats(&sc->ctlr_handle);
1634 }
1635}
1636
1637
1638
1639/*
1640 * Function name: tw_osli_print_ctlr_stats
1641 * Description: For being called from ddb. Prints OSL controller stats
1642 *
1643 * Input: sc -- ptr to OSL internal controller context
1644 * Output: None
1645 * Return value: None
1646 */
1647TW_VOID
1648tw_osli_print_ctlr_stats(struct twa_softc *sc)
1649{
1650 twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc);
1651 twa_printf(sc, "OSLq type current max\n");
1652 twa_printf(sc, "free %04d %04d\n",
1653 sc->q_stats[TW_OSLI_FREE_Q].cur_len,
1654 sc->q_stats[TW_OSLI_FREE_Q].max_len);
1655 twa_printf(sc, "busy %04d %04d\n",
1656 sc->q_stats[TW_OSLI_BUSY_Q].cur_len,
1657 sc->q_stats[TW_OSLI_BUSY_Q].max_len);
1658}
1659
1660
1661
1662/*
1663 * Function name: twa_print_req_info
1664 * Description: For being called from ddb. Calls functions that print
1665 * OSL and CL internal details for the request.
1666 *
1667 * Input: req -- ptr to OSL internal request context
1668 * Output: None
1669 * Return value: None
1670 */
1671TW_VOID
1672twa_print_req_info(struct tw_osli_req_context *req)
1673{
1674 struct twa_softc *sc = req->ctlr;
1675
1676 twa_printf(sc, "OSL details for request:\n");
1677 twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n"
1678 "data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n"
1679 "state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n"
1680 "next_req = %p, prev_req = %p, dma_map = %p\n",
1681 req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt,
1682 req->data, req->length, req->real_data, req->real_length,
1683 req->state, req->flags, req->error_code, req->orig_req,
1684 req->link.next, req->link.prev, req->dma_map);
1685 tw_cl_print_req_info(&(req->req_handle));
1686}
1687
1688
1689
1690/*
1691 * Function name: twa_reset_stats
1692 * Description: For being called from ddb.
1693 * Resets some OSL controller stats.
1694 *
1695 * Input: None
1696 * Output: None
1697 * Return value: None
1698 */
1699TW_VOID
1700twa_reset_stats(TW_VOID)
1701{
1702 struct twa_softc *sc;
1703 TW_INT32 i;
1704
1705 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) {
1706 sc->q_stats[TW_OSLI_FREE_Q].max_len = 0;
1707 sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0;
1708 tw_cl_reset_stats(&sc->ctlr_handle);
1709 }
1710}
1711
1712#endif /* TW_OSL_DEBUG */