Deleted Added
full compact
tws.c (240900) tws.c (241753)
1/*
2 * Copyright (c) 2010, LSI Corp.
3 * All rights reserved.
4 * Author : Manjunath Ranganathaiah
5 * Support: freebsdraid@lsi.com
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of the <ORGANIZATION> nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
1/*
2 * Copyright (c) 2010, LSI Corp.
3 * All rights reserved.
4 * Author : Manjunath Ranganathaiah
5 * Support: freebsdraid@lsi.com
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of the <ORGANIZATION> nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/dev/tws/tws.c 240900 2012-09-24 21:40:22Z jimharris $");
36__FBSDID("$FreeBSD: head/sys/dev/tws/tws.c 241753 2012-10-19 22:07:40Z delphij $");
37
38#include <dev/tws/tws.h>
39#include <dev/tws/tws_services.h>
40#include <dev/tws/tws_hdm.h>
41
42#include <cam/cam.h>
43#include <cam/cam_ccb.h>
44
45MALLOC_DEFINE(M_TWS, "twsbuf", "buffers used by tws driver");
46int tws_queue_depth = TWS_MAX_REQS;
47int tws_enable_msi = 0;
48int tws_enable_msix = 0;
49
50
51
52/* externs */
53extern int tws_cam_attach(struct tws_softc *sc);
54extern void tws_cam_detach(struct tws_softc *sc);
55extern int tws_init_ctlr(struct tws_softc *sc);
56extern boolean tws_ctlr_ready(struct tws_softc *sc);
57extern void tws_turn_off_interrupts(struct tws_softc *sc);
58extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
59 u_int8_t q_type );
60extern struct tws_request *tws_q_remove_request(struct tws_softc *sc,
61 struct tws_request *req, u_int8_t q_type );
62extern struct tws_request *tws_q_remove_head(struct tws_softc *sc,
63 u_int8_t q_type );
64extern boolean tws_get_response(struct tws_softc *sc, u_int16_t *req_id);
65extern boolean tws_ctlr_reset(struct tws_softc *sc);
66extern void tws_intr(void *arg);
67extern int tws_use_32bit_sgls;
68
69
70struct tws_request *tws_get_request(struct tws_softc *sc, u_int16_t type);
71int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
72void tws_send_event(struct tws_softc *sc, u_int8_t event);
73uint8_t tws_get_state(struct tws_softc *sc);
74void tws_release_request(struct tws_request *req);
75
76
77
78/* Function prototypes */
79static d_open_t tws_open;
80static d_close_t tws_close;
81static d_read_t tws_read;
82static d_write_t tws_write;
83extern d_ioctl_t tws_ioctl;
84
85static int tws_init(struct tws_softc *sc);
86static void tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
87 int nseg, int error);
88
89static int tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size);
90static int tws_init_aen_q(struct tws_softc *sc);
91static int tws_init_trace_q(struct tws_softc *sc);
92static int tws_setup_irq(struct tws_softc *sc);
93int tws_setup_intr(struct tws_softc *sc, int irqs);
94int tws_teardown_intr(struct tws_softc *sc);
95
96
97/* Character device entry points */
98
99static struct cdevsw tws_cdevsw = {
100 .d_version = D_VERSION,
101 .d_open = tws_open,
102 .d_close = tws_close,
103 .d_read = tws_read,
104 .d_write = tws_write,
105 .d_ioctl = tws_ioctl,
106 .d_name = "tws",
107};
108
109/*
110 * In the cdevsw routines, we find our softc by using the si_drv1 member
111 * of struct cdev. We set this variable to point to our softc in our
112 * attach routine when we create the /dev entry.
113 */
114
115int
116tws_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td)
117{
118 struct tws_softc *sc = dev->si_drv1;
119
120 if ( sc )
121 TWS_TRACE_DEBUG(sc, "entry", dev, oflags);
122 return (0);
123}
124
125int
126tws_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td)
127{
128 struct tws_softc *sc = dev->si_drv1;
129
130 if ( sc )
131 TWS_TRACE_DEBUG(sc, "entry", dev, fflag);
132 return (0);
133}
134
135int
136tws_read(struct cdev *dev, struct uio *uio, int ioflag)
137{
138 struct tws_softc *sc = dev->si_drv1;
139
140 if ( sc )
141 TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
142 return (0);
143}
144
145int
146tws_write(struct cdev *dev, struct uio *uio, int ioflag)
147{
148 struct tws_softc *sc = dev->si_drv1;
149
150 if ( sc )
151 TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
152 return (0);
153}
154
155/* PCI Support Functions */
156
157/*
158 * Compare the device ID of this device against the IDs that this driver
159 * supports. If there is a match, set the description and return success.
160 */
161static int
162tws_probe(device_t dev)
163{
164 static u_int8_t first_ctlr = 1;
165
166 if ((pci_get_vendor(dev) == TWS_VENDOR_ID) &&
167 (pci_get_device(dev) == TWS_DEVICE_ID)) {
168 device_set_desc(dev, "LSI 3ware SAS/SATA Storage Controller");
169 if (first_ctlr) {
170 printf("LSI 3ware device driver for SAS/SATA storage "
171 "controllers, version: %s\n", TWS_DRIVER_VERSION_STRING);
172 first_ctlr = 0;
173 }
174
175 return(BUS_PROBE_DEFAULT);
176 }
177 return (ENXIO);
178}
179
180/* Attach function is only called if the probe is successful. */
181
182static int
183tws_attach(device_t dev)
184{
185 struct tws_softc *sc = device_get_softc(dev);
186 u_int32_t cmd, bar;
187 int error=0,i;
188
189 /* no tracing yet */
190 /* Look up our softc and initialize its fields. */
191 sc->tws_dev = dev;
192 sc->device_id = pci_get_device(dev);
193 sc->subvendor_id = pci_get_subvendor(dev);
194 sc->subdevice_id = pci_get_subdevice(dev);
195
196 /* Intialize mutexes */
197 mtx_init( &sc->q_lock, "tws_q_lock", NULL, MTX_DEF);
198 mtx_init( &sc->sim_lock, "tws_sim_lock", NULL, MTX_DEF);
199 mtx_init( &sc->gen_lock, "tws_gen_lock", NULL, MTX_DEF);
200 mtx_init( &sc->io_lock, "tws_io_lock", NULL, MTX_DEF | MTX_RECURSE);
201
202 if ( tws_init_trace_q(sc) == FAILURE )
203 printf("trace init failure\n");
204 /* send init event */
205 mtx_lock(&sc->gen_lock);
206 tws_send_event(sc, TWS_INIT_START);
207 mtx_unlock(&sc->gen_lock);
208
209
210#if _BYTE_ORDER == _BIG_ENDIAN
211 TWS_TRACE(sc, "BIG endian", 0, 0);
212#endif
213 /* sysctl context setup */
214 sysctl_ctx_init(&sc->tws_clist);
215 sc->tws_oidp = SYSCTL_ADD_NODE(&sc->tws_clist,
216 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
217 device_get_nameunit(dev),
218 CTLFLAG_RD, 0, "");
219 if ( sc->tws_oidp == NULL ) {
220 tws_log(sc, SYSCTL_TREE_NODE_ADD);
221 goto attach_fail_1;
222 }
223 SYSCTL_ADD_STRING(&sc->tws_clist, SYSCTL_CHILDREN(sc->tws_oidp),
224 OID_AUTO, "driver_version", CTLFLAG_RD,
225 TWS_DRIVER_VERSION_STRING, 0, "TWS driver version");
226
227 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
228 if ( (cmd & PCIM_CMD_PORTEN) == 0) {
229 tws_log(sc, PCI_COMMAND_READ);
230 goto attach_fail_1;
231 }
232 /* Force the busmaster enable bit on. */
233 cmd |= PCIM_CMD_BUSMASTEREN;
234 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
235
236 bar = pci_read_config(dev, TWS_PCI_BAR0, 4);
237 TWS_TRACE_DEBUG(sc, "bar0 ", bar, 0);
238 bar = pci_read_config(dev, TWS_PCI_BAR1, 4);
239 bar = bar & ~TWS_BIT2;
240 TWS_TRACE_DEBUG(sc, "bar1 ", bar, 0);
241
242 /* MFA base address is BAR2 register used for
243 * push mode. Firmware will evatualy move to
244 * pull mode during witch this needs to change
245 */
246#ifndef TWS_PULL_MODE_ENABLE
247 sc->mfa_base = (u_int64_t)pci_read_config(dev, TWS_PCI_BAR2, 4);
248 sc->mfa_base = sc->mfa_base & ~TWS_BIT2;
249 TWS_TRACE_DEBUG(sc, "bar2 ", sc->mfa_base, 0);
250#endif
251
252 /* allocate MMIO register space */
253 sc->reg_res_id = TWS_PCI_BAR1; /* BAR1 offset */
254 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
255 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
256 == NULL) {
257 tws_log(sc, ALLOC_MEMORY_RES);
258 goto attach_fail_1;
259 }
260 sc->bus_tag = rman_get_bustag(sc->reg_res);
261 sc->bus_handle = rman_get_bushandle(sc->reg_res);
262
263#ifndef TWS_PULL_MODE_ENABLE
264 /* Allocate bus space for inbound mfa */
265 sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */
266 if ((sc->mfa_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
267 &(sc->mfa_res_id), 0, ~0, 0x100000, RF_ACTIVE))
268 == NULL) {
269 tws_log(sc, ALLOC_MEMORY_RES);
270 goto attach_fail_2;
271 }
272 sc->bus_mfa_tag = rman_get_bustag(sc->mfa_res);
273 sc->bus_mfa_handle = rman_get_bushandle(sc->mfa_res);
274#endif
275
276 /* Allocate and register our interrupt. */
277 sc->intr_type = TWS_INTx; /* default */
278
279 if ( tws_enable_msi )
280 sc->intr_type = TWS_MSI;
281 if ( tws_setup_irq(sc) == FAILURE ) {
282 tws_log(sc, ALLOC_MEMORY_RES);
283 goto attach_fail_3;
284 }
285
286 /*
287 * Create a /dev entry for this device. The kernel will assign us
288 * a major number automatically. We use the unit number of this
289 * device as the minor number and name the character device
290 * "tws<unit>".
291 */
292 sc->tws_cdev = make_dev(&tws_cdevsw, device_get_unit(dev),
293 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "tws%u",
294 device_get_unit(dev));
295 sc->tws_cdev->si_drv1 = sc;
296
297 if ( tws_init(sc) == FAILURE ) {
298 tws_log(sc, TWS_INIT_FAILURE);
299 goto attach_fail_4;
300 }
301 if ( tws_init_ctlr(sc) == FAILURE ) {
302 tws_log(sc, TWS_CTLR_INIT_FAILURE);
303 goto attach_fail_4;
304 }
305 if ((error = tws_cam_attach(sc))) {
306 tws_log(sc, TWS_CAM_ATTACH);
307 goto attach_fail_4;
308 }
309 /* send init complete event */
310 mtx_lock(&sc->gen_lock);
311 tws_send_event(sc, TWS_INIT_COMPLETE);
312 mtx_unlock(&sc->gen_lock);
313
314 TWS_TRACE_DEBUG(sc, "attached successfully", 0, sc->device_id);
315 return(0);
316
317attach_fail_4:
318 tws_teardown_intr(sc);
319 destroy_dev(sc->tws_cdev);
320attach_fail_3:
321 for(i=0;i<sc->irqs;i++) {
322 if ( sc->irq_res[i] ){
323 if (bus_release_resource(sc->tws_dev,
324 SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
325 TWS_TRACE(sc, "bus irq res", 0, 0);
326 }
327 }
328#ifndef TWS_PULL_MODE_ENABLE
329attach_fail_2:
330#endif
331 if ( sc->mfa_res ){
332 if (bus_release_resource(sc->tws_dev,
333 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
334 TWS_TRACE(sc, "bus release ", 0, sc->mfa_res_id);
335 }
336 if ( sc->reg_res ){
337 if (bus_release_resource(sc->tws_dev,
338 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
339 TWS_TRACE(sc, "bus release2 ", 0, sc->reg_res_id);
340 }
341attach_fail_1:
342 mtx_destroy(&sc->q_lock);
343 mtx_destroy(&sc->sim_lock);
344 mtx_destroy(&sc->gen_lock);
345 mtx_destroy(&sc->io_lock);
346 sysctl_ctx_free(&sc->tws_clist);
347 return (ENXIO);
348}
349
350/* Detach device. */
351
352static int
353tws_detach(device_t dev)
354{
355 struct tws_softc *sc = device_get_softc(dev);
356 int i;
357 u_int32_t reg;
358
359 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
360
361 mtx_lock(&sc->gen_lock);
362 tws_send_event(sc, TWS_UNINIT_START);
363 mtx_unlock(&sc->gen_lock);
364
365 /* needs to disable interrupt before detaching from cam */
366 tws_turn_off_interrupts(sc);
367 /* clear door bell */
368 tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
369 reg = tws_read_reg(sc, TWS_I2O0_HIMASK, 4);
370 TWS_TRACE_DEBUG(sc, "turn-off-intr", reg, 0);
371 sc->obfl_q_overrun = false;
372 tws_init_connect(sc, 1);
373
374 /* Teardown the state in our softc created in our attach routine. */
375 /* Disconnect the interrupt handler. */
376 tws_teardown_intr(sc);
377
378 /* Release irq resource */
379 for(i=0;i<sc->irqs;i++) {
380 if ( sc->irq_res[i] ){
381 if (bus_release_resource(sc->tws_dev,
382 SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
383 TWS_TRACE(sc, "bus release irq resource",
384 i, sc->irq_res_id[i]);
385 }
386 }
387 if ( sc->intr_type == TWS_MSI ) {
388 pci_release_msi(sc->tws_dev);
389 }
390
391 tws_cam_detach(sc);
392
393 /* Release memory resource */
394 if ( sc->mfa_res ){
395 if (bus_release_resource(sc->tws_dev,
396 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
397 TWS_TRACE(sc, "bus release mem resource", 0, sc->mfa_res_id);
398 }
399 if ( sc->reg_res ){
400 if (bus_release_resource(sc->tws_dev,
401 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
402 TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id);
403 }
404
405 free(sc->reqs, M_TWS);
406 free(sc->sense_bufs, M_TWS);
407 free(sc->scan_ccb, M_TWS);
37
38#include <dev/tws/tws.h>
39#include <dev/tws/tws_services.h>
40#include <dev/tws/tws_hdm.h>
41
42#include <cam/cam.h>
43#include <cam/cam_ccb.h>
44
45MALLOC_DEFINE(M_TWS, "twsbuf", "buffers used by tws driver");
46int tws_queue_depth = TWS_MAX_REQS;
47int tws_enable_msi = 0;
48int tws_enable_msix = 0;
49
50
51
52/* externs */
53extern int tws_cam_attach(struct tws_softc *sc);
54extern void tws_cam_detach(struct tws_softc *sc);
55extern int tws_init_ctlr(struct tws_softc *sc);
56extern boolean tws_ctlr_ready(struct tws_softc *sc);
57extern void tws_turn_off_interrupts(struct tws_softc *sc);
58extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
59 u_int8_t q_type );
60extern struct tws_request *tws_q_remove_request(struct tws_softc *sc,
61 struct tws_request *req, u_int8_t q_type );
62extern struct tws_request *tws_q_remove_head(struct tws_softc *sc,
63 u_int8_t q_type );
64extern boolean tws_get_response(struct tws_softc *sc, u_int16_t *req_id);
65extern boolean tws_ctlr_reset(struct tws_softc *sc);
66extern void tws_intr(void *arg);
67extern int tws_use_32bit_sgls;
68
69
70struct tws_request *tws_get_request(struct tws_softc *sc, u_int16_t type);
71int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
72void tws_send_event(struct tws_softc *sc, u_int8_t event);
73uint8_t tws_get_state(struct tws_softc *sc);
74void tws_release_request(struct tws_request *req);
75
76
77
78/* Function prototypes */
79static d_open_t tws_open;
80static d_close_t tws_close;
81static d_read_t tws_read;
82static d_write_t tws_write;
83extern d_ioctl_t tws_ioctl;
84
85static int tws_init(struct tws_softc *sc);
86static void tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
87 int nseg, int error);
88
89static int tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size);
90static int tws_init_aen_q(struct tws_softc *sc);
91static int tws_init_trace_q(struct tws_softc *sc);
92static int tws_setup_irq(struct tws_softc *sc);
93int tws_setup_intr(struct tws_softc *sc, int irqs);
94int tws_teardown_intr(struct tws_softc *sc);
95
96
97/* Character device entry points */
98
99static struct cdevsw tws_cdevsw = {
100 .d_version = D_VERSION,
101 .d_open = tws_open,
102 .d_close = tws_close,
103 .d_read = tws_read,
104 .d_write = tws_write,
105 .d_ioctl = tws_ioctl,
106 .d_name = "tws",
107};
108
109/*
110 * In the cdevsw routines, we find our softc by using the si_drv1 member
111 * of struct cdev. We set this variable to point to our softc in our
112 * attach routine when we create the /dev entry.
113 */
114
115int
116tws_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td)
117{
118 struct tws_softc *sc = dev->si_drv1;
119
120 if ( sc )
121 TWS_TRACE_DEBUG(sc, "entry", dev, oflags);
122 return (0);
123}
124
125int
126tws_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td)
127{
128 struct tws_softc *sc = dev->si_drv1;
129
130 if ( sc )
131 TWS_TRACE_DEBUG(sc, "entry", dev, fflag);
132 return (0);
133}
134
135int
136tws_read(struct cdev *dev, struct uio *uio, int ioflag)
137{
138 struct tws_softc *sc = dev->si_drv1;
139
140 if ( sc )
141 TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
142 return (0);
143}
144
145int
146tws_write(struct cdev *dev, struct uio *uio, int ioflag)
147{
148 struct tws_softc *sc = dev->si_drv1;
149
150 if ( sc )
151 TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
152 return (0);
153}
154
155/* PCI Support Functions */
156
157/*
158 * Compare the device ID of this device against the IDs that this driver
159 * supports. If there is a match, set the description and return success.
160 */
161static int
162tws_probe(device_t dev)
163{
164 static u_int8_t first_ctlr = 1;
165
166 if ((pci_get_vendor(dev) == TWS_VENDOR_ID) &&
167 (pci_get_device(dev) == TWS_DEVICE_ID)) {
168 device_set_desc(dev, "LSI 3ware SAS/SATA Storage Controller");
169 if (first_ctlr) {
170 printf("LSI 3ware device driver for SAS/SATA storage "
171 "controllers, version: %s\n", TWS_DRIVER_VERSION_STRING);
172 first_ctlr = 0;
173 }
174
175 return(BUS_PROBE_DEFAULT);
176 }
177 return (ENXIO);
178}
179
180/* Attach function is only called if the probe is successful. */
181
182static int
183tws_attach(device_t dev)
184{
185 struct tws_softc *sc = device_get_softc(dev);
186 u_int32_t cmd, bar;
187 int error=0,i;
188
189 /* no tracing yet */
190 /* Look up our softc and initialize its fields. */
191 sc->tws_dev = dev;
192 sc->device_id = pci_get_device(dev);
193 sc->subvendor_id = pci_get_subvendor(dev);
194 sc->subdevice_id = pci_get_subdevice(dev);
195
196 /* Intialize mutexes */
197 mtx_init( &sc->q_lock, "tws_q_lock", NULL, MTX_DEF);
198 mtx_init( &sc->sim_lock, "tws_sim_lock", NULL, MTX_DEF);
199 mtx_init( &sc->gen_lock, "tws_gen_lock", NULL, MTX_DEF);
200 mtx_init( &sc->io_lock, "tws_io_lock", NULL, MTX_DEF | MTX_RECURSE);
201
202 if ( tws_init_trace_q(sc) == FAILURE )
203 printf("trace init failure\n");
204 /* send init event */
205 mtx_lock(&sc->gen_lock);
206 tws_send_event(sc, TWS_INIT_START);
207 mtx_unlock(&sc->gen_lock);
208
209
210#if _BYTE_ORDER == _BIG_ENDIAN
211 TWS_TRACE(sc, "BIG endian", 0, 0);
212#endif
213 /* sysctl context setup */
214 sysctl_ctx_init(&sc->tws_clist);
215 sc->tws_oidp = SYSCTL_ADD_NODE(&sc->tws_clist,
216 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
217 device_get_nameunit(dev),
218 CTLFLAG_RD, 0, "");
219 if ( sc->tws_oidp == NULL ) {
220 tws_log(sc, SYSCTL_TREE_NODE_ADD);
221 goto attach_fail_1;
222 }
223 SYSCTL_ADD_STRING(&sc->tws_clist, SYSCTL_CHILDREN(sc->tws_oidp),
224 OID_AUTO, "driver_version", CTLFLAG_RD,
225 TWS_DRIVER_VERSION_STRING, 0, "TWS driver version");
226
227 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
228 if ( (cmd & PCIM_CMD_PORTEN) == 0) {
229 tws_log(sc, PCI_COMMAND_READ);
230 goto attach_fail_1;
231 }
232 /* Force the busmaster enable bit on. */
233 cmd |= PCIM_CMD_BUSMASTEREN;
234 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
235
236 bar = pci_read_config(dev, TWS_PCI_BAR0, 4);
237 TWS_TRACE_DEBUG(sc, "bar0 ", bar, 0);
238 bar = pci_read_config(dev, TWS_PCI_BAR1, 4);
239 bar = bar & ~TWS_BIT2;
240 TWS_TRACE_DEBUG(sc, "bar1 ", bar, 0);
241
242 /* MFA base address is BAR2 register used for
243 * push mode. Firmware will evatualy move to
244 * pull mode during witch this needs to change
245 */
246#ifndef TWS_PULL_MODE_ENABLE
247 sc->mfa_base = (u_int64_t)pci_read_config(dev, TWS_PCI_BAR2, 4);
248 sc->mfa_base = sc->mfa_base & ~TWS_BIT2;
249 TWS_TRACE_DEBUG(sc, "bar2 ", sc->mfa_base, 0);
250#endif
251
252 /* allocate MMIO register space */
253 sc->reg_res_id = TWS_PCI_BAR1; /* BAR1 offset */
254 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
255 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
256 == NULL) {
257 tws_log(sc, ALLOC_MEMORY_RES);
258 goto attach_fail_1;
259 }
260 sc->bus_tag = rman_get_bustag(sc->reg_res);
261 sc->bus_handle = rman_get_bushandle(sc->reg_res);
262
263#ifndef TWS_PULL_MODE_ENABLE
264 /* Allocate bus space for inbound mfa */
265 sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */
266 if ((sc->mfa_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
267 &(sc->mfa_res_id), 0, ~0, 0x100000, RF_ACTIVE))
268 == NULL) {
269 tws_log(sc, ALLOC_MEMORY_RES);
270 goto attach_fail_2;
271 }
272 sc->bus_mfa_tag = rman_get_bustag(sc->mfa_res);
273 sc->bus_mfa_handle = rman_get_bushandle(sc->mfa_res);
274#endif
275
276 /* Allocate and register our interrupt. */
277 sc->intr_type = TWS_INTx; /* default */
278
279 if ( tws_enable_msi )
280 sc->intr_type = TWS_MSI;
281 if ( tws_setup_irq(sc) == FAILURE ) {
282 tws_log(sc, ALLOC_MEMORY_RES);
283 goto attach_fail_3;
284 }
285
286 /*
287 * Create a /dev entry for this device. The kernel will assign us
288 * a major number automatically. We use the unit number of this
289 * device as the minor number and name the character device
290 * "tws<unit>".
291 */
292 sc->tws_cdev = make_dev(&tws_cdevsw, device_get_unit(dev),
293 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "tws%u",
294 device_get_unit(dev));
295 sc->tws_cdev->si_drv1 = sc;
296
297 if ( tws_init(sc) == FAILURE ) {
298 tws_log(sc, TWS_INIT_FAILURE);
299 goto attach_fail_4;
300 }
301 if ( tws_init_ctlr(sc) == FAILURE ) {
302 tws_log(sc, TWS_CTLR_INIT_FAILURE);
303 goto attach_fail_4;
304 }
305 if ((error = tws_cam_attach(sc))) {
306 tws_log(sc, TWS_CAM_ATTACH);
307 goto attach_fail_4;
308 }
309 /* send init complete event */
310 mtx_lock(&sc->gen_lock);
311 tws_send_event(sc, TWS_INIT_COMPLETE);
312 mtx_unlock(&sc->gen_lock);
313
314 TWS_TRACE_DEBUG(sc, "attached successfully", 0, sc->device_id);
315 return(0);
316
317attach_fail_4:
318 tws_teardown_intr(sc);
319 destroy_dev(sc->tws_cdev);
320attach_fail_3:
321 for(i=0;i<sc->irqs;i++) {
322 if ( sc->irq_res[i] ){
323 if (bus_release_resource(sc->tws_dev,
324 SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
325 TWS_TRACE(sc, "bus irq res", 0, 0);
326 }
327 }
328#ifndef TWS_PULL_MODE_ENABLE
329attach_fail_2:
330#endif
331 if ( sc->mfa_res ){
332 if (bus_release_resource(sc->tws_dev,
333 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
334 TWS_TRACE(sc, "bus release ", 0, sc->mfa_res_id);
335 }
336 if ( sc->reg_res ){
337 if (bus_release_resource(sc->tws_dev,
338 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
339 TWS_TRACE(sc, "bus release2 ", 0, sc->reg_res_id);
340 }
341attach_fail_1:
342 mtx_destroy(&sc->q_lock);
343 mtx_destroy(&sc->sim_lock);
344 mtx_destroy(&sc->gen_lock);
345 mtx_destroy(&sc->io_lock);
346 sysctl_ctx_free(&sc->tws_clist);
347 return (ENXIO);
348}
349
350/* Detach device. */
351
352static int
353tws_detach(device_t dev)
354{
355 struct tws_softc *sc = device_get_softc(dev);
356 int i;
357 u_int32_t reg;
358
359 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
360
361 mtx_lock(&sc->gen_lock);
362 tws_send_event(sc, TWS_UNINIT_START);
363 mtx_unlock(&sc->gen_lock);
364
365 /* needs to disable interrupt before detaching from cam */
366 tws_turn_off_interrupts(sc);
367 /* clear door bell */
368 tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
369 reg = tws_read_reg(sc, TWS_I2O0_HIMASK, 4);
370 TWS_TRACE_DEBUG(sc, "turn-off-intr", reg, 0);
371 sc->obfl_q_overrun = false;
372 tws_init_connect(sc, 1);
373
374 /* Teardown the state in our softc created in our attach routine. */
375 /* Disconnect the interrupt handler. */
376 tws_teardown_intr(sc);
377
378 /* Release irq resource */
379 for(i=0;i<sc->irqs;i++) {
380 if ( sc->irq_res[i] ){
381 if (bus_release_resource(sc->tws_dev,
382 SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
383 TWS_TRACE(sc, "bus release irq resource",
384 i, sc->irq_res_id[i]);
385 }
386 }
387 if ( sc->intr_type == TWS_MSI ) {
388 pci_release_msi(sc->tws_dev);
389 }
390
391 tws_cam_detach(sc);
392
393 /* Release memory resource */
394 if ( sc->mfa_res ){
395 if (bus_release_resource(sc->tws_dev,
396 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
397 TWS_TRACE(sc, "bus release mem resource", 0, sc->mfa_res_id);
398 }
399 if ( sc->reg_res ){
400 if (bus_release_resource(sc->tws_dev,
401 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
402 TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id);
403 }
404
405 free(sc->reqs, M_TWS);
406 free(sc->sense_bufs, M_TWS);
407 free(sc->scan_ccb, M_TWS);
408 if (sc->ioctl_data_mem)
409 bus_dmamem_free(sc->data_tag, sc->ioctl_data_mem, sc->ioctl_data_map);
408 free(sc->aen_q.q, M_TWS);
409 free(sc->trace_q.q, M_TWS);
410 mtx_destroy(&sc->q_lock);
411 mtx_destroy(&sc->sim_lock);
412 mtx_destroy(&sc->gen_lock);
413 mtx_destroy(&sc->io_lock);
414 destroy_dev(sc->tws_cdev);
415 sysctl_ctx_free(&sc->tws_clist);
416 return (0);
417}
418
419int
420tws_setup_intr(struct tws_softc *sc, int irqs)
421{
422 int i, error;
423
424 for(i=0;i<irqs;i++) {
425 if (!(sc->intr_handle[i])) {
426 if ((error = bus_setup_intr(sc->tws_dev, sc->irq_res[i],
427 INTR_TYPE_CAM | INTR_MPSAFE,
428#if (__FreeBSD_version >= 700000)
429 NULL,
430#endif
431 tws_intr, sc, &sc->intr_handle[i]))) {
432 tws_log(sc, SETUP_INTR_RES);
433 return(FAILURE);
434 }
435 }
436 }
437 return(SUCCESS);
438
439}
440
441
442int
443tws_teardown_intr(struct tws_softc *sc)
444{
445 int i, error;
446
447 for(i=0;i<sc->irqs;i++) {
448 if (sc->intr_handle[i]) {
449 error = bus_teardown_intr(sc->tws_dev,
450 sc->irq_res[i], sc->intr_handle[i]);
451 sc->intr_handle[i] = NULL;
452 }
453 }
454 return(SUCCESS);
455}
456
457
458static int
459tws_setup_irq(struct tws_softc *sc)
460{
461 int messages;
462 u_int16_t cmd;
463
464 cmd = pci_read_config(sc->tws_dev, PCIR_COMMAND, 2);
465 switch(sc->intr_type) {
466 case TWS_INTx :
467 cmd = cmd & ~0x0400;
468 pci_write_config(sc->tws_dev, PCIR_COMMAND, cmd, 2);
469 sc->irqs = 1;
470 sc->irq_res_id[0] = 0;
471 sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ,
472 &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE);
473 if ( ! sc->irq_res[0] )
474 return(FAILURE);
475 if ( tws_setup_intr(sc, sc->irqs) == FAILURE )
476 return(FAILURE);
477 device_printf(sc->tws_dev, "Using legacy INTx\n");
478 break;
479 case TWS_MSI :
480 cmd = cmd | 0x0400;
481 pci_write_config(sc->tws_dev, PCIR_COMMAND, cmd, 2);
482 sc->irqs = 1;
483 sc->irq_res_id[0] = 1;
484 messages = 1;
485 if (pci_alloc_msi(sc->tws_dev, &messages) != 0 ) {
486 TWS_TRACE(sc, "pci alloc msi fail", 0, messages);
487 return(FAILURE);
488 }
489 sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ,
490 &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE);
491
492 if ( !sc->irq_res[0] )
493 return(FAILURE);
494 if ( tws_setup_intr(sc, sc->irqs) == FAILURE )
495 return(FAILURE);
496 device_printf(sc->tws_dev, "Using MSI\n");
497 break;
498
499 }
500
501 return(SUCCESS);
502}
503
504static int
505tws_init(struct tws_softc *sc)
506{
507
508 u_int32_t max_sg_elements;
509 u_int32_t dma_mem_size;
510 int error;
511 u_int32_t reg;
512
513 sc->seq_id = 0;
514 if ( tws_queue_depth > TWS_MAX_REQS )
515 tws_queue_depth = TWS_MAX_REQS;
516 if (tws_queue_depth < TWS_RESERVED_REQS+1)
517 tws_queue_depth = TWS_RESERVED_REQS+1;
518 sc->is64bit = (sizeof(bus_addr_t) == 8) ? true : false;
519 max_sg_elements = (sc->is64bit && !tws_use_32bit_sgls) ?
520 TWS_MAX_64BIT_SG_ELEMENTS :
521 TWS_MAX_32BIT_SG_ELEMENTS;
522 dma_mem_size = (sizeof(struct tws_command_packet) * tws_queue_depth) +
523 (TWS_SECTOR_SIZE) ;
524 if ( bus_dma_tag_create(bus_get_dma_tag(sc->tws_dev), /* PCI parent */
525 TWS_ALIGNMENT, /* alignment */
526 0, /* boundary */
527 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
528 BUS_SPACE_MAXADDR, /* highaddr */
529 NULL, NULL, /* filter, filterarg */
530 BUS_SPACE_MAXSIZE, /* maxsize */
531 max_sg_elements, /* numsegs */
532 BUS_SPACE_MAXSIZE, /* maxsegsize */
533 0, /* flags */
534 NULL, NULL, /* lockfunc, lockfuncarg */
535 &sc->parent_tag /* tag */
536 )) {
537 TWS_TRACE_DEBUG(sc, "DMA parent tag Create fail", max_sg_elements,
538 sc->is64bit);
539 return(ENOMEM);
540 }
541 /* In bound message frame requires 16byte alignment.
542 * Outbound MF's can live with 4byte alignment - for now just
543 * use 16 for both.
544 */
545 if ( bus_dma_tag_create(sc->parent_tag, /* parent */
546 TWS_IN_MF_ALIGNMENT, /* alignment */
547 0, /* boundary */
548 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
549 BUS_SPACE_MAXADDR, /* highaddr */
550 NULL, NULL, /* filter, filterarg */
551 dma_mem_size, /* maxsize */
552 1, /* numsegs */
553 BUS_SPACE_MAXSIZE, /* maxsegsize */
554 0, /* flags */
555 NULL, NULL, /* lockfunc, lockfuncarg */
556 &sc->cmd_tag /* tag */
557 )) {
558 TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
559 return(ENOMEM);
560 }
561
562 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
563 BUS_DMA_NOWAIT, &sc->cmd_map)) {
564 TWS_TRACE_DEBUG(sc, "DMA mem alloc fail", max_sg_elements, sc->is64bit);
565 return(ENOMEM);
566 }
567
568 /* if bus_dmamem_alloc succeeds then bus_dmamap_load will succeed */
569 sc->dma_mem_phys=0;
570 error = bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
571 dma_mem_size, tws_dmamap_cmds_load_cbfn,
572 &sc->dma_mem_phys, 0);
573
574 /*
575 * Create a dma tag for data buffers; size will be the maximum
576 * possible I/O size (128kB).
577 */
578 if (bus_dma_tag_create(sc->parent_tag, /* parent */
579 TWS_ALIGNMENT, /* alignment */
580 0, /* boundary */
581 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
582 BUS_SPACE_MAXADDR, /* highaddr */
583 NULL, NULL, /* filter, filterarg */
584 TWS_MAX_IO_SIZE, /* maxsize */
585 max_sg_elements, /* nsegments */
586 TWS_MAX_IO_SIZE, /* maxsegsize */
587 BUS_DMA_ALLOCNOW, /* flags */
588 busdma_lock_mutex, /* lockfunc */
589 &sc->io_lock, /* lockfuncarg */
590 &sc->data_tag /* tag */)) {
591 TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
592 return(ENOMEM);
593 }
594
595 sc->reqs = malloc(sizeof(struct tws_request) * tws_queue_depth, M_TWS,
596 M_WAITOK | M_ZERO);
597 if ( sc->reqs == NULL ) {
598 TWS_TRACE_DEBUG(sc, "malloc failed", 0, sc->is64bit);
599 return(ENOMEM);
600 }
601 sc->sense_bufs = malloc(sizeof(struct tws_sense) * tws_queue_depth, M_TWS,
602 M_WAITOK | M_ZERO);
603 if ( sc->sense_bufs == NULL ) {
604 TWS_TRACE_DEBUG(sc, "sense malloc failed", 0, sc->is64bit);
605 return(ENOMEM);
606 }
607 sc->scan_ccb = malloc(sizeof(union ccb), M_TWS, M_WAITOK | M_ZERO);
608 if ( sc->scan_ccb == NULL ) {
609 TWS_TRACE_DEBUG(sc, "ccb malloc failed", 0, sc->is64bit);
610 return(ENOMEM);
611 }
410 free(sc->aen_q.q, M_TWS);
411 free(sc->trace_q.q, M_TWS);
412 mtx_destroy(&sc->q_lock);
413 mtx_destroy(&sc->sim_lock);
414 mtx_destroy(&sc->gen_lock);
415 mtx_destroy(&sc->io_lock);
416 destroy_dev(sc->tws_cdev);
417 sysctl_ctx_free(&sc->tws_clist);
418 return (0);
419}
420
421int
422tws_setup_intr(struct tws_softc *sc, int irqs)
423{
424 int i, error;
425
426 for(i=0;i<irqs;i++) {
427 if (!(sc->intr_handle[i])) {
428 if ((error = bus_setup_intr(sc->tws_dev, sc->irq_res[i],
429 INTR_TYPE_CAM | INTR_MPSAFE,
430#if (__FreeBSD_version >= 700000)
431 NULL,
432#endif
433 tws_intr, sc, &sc->intr_handle[i]))) {
434 tws_log(sc, SETUP_INTR_RES);
435 return(FAILURE);
436 }
437 }
438 }
439 return(SUCCESS);
440
441}
442
443
444int
445tws_teardown_intr(struct tws_softc *sc)
446{
447 int i, error;
448
449 for(i=0;i<sc->irqs;i++) {
450 if (sc->intr_handle[i]) {
451 error = bus_teardown_intr(sc->tws_dev,
452 sc->irq_res[i], sc->intr_handle[i]);
453 sc->intr_handle[i] = NULL;
454 }
455 }
456 return(SUCCESS);
457}
458
459
460static int
461tws_setup_irq(struct tws_softc *sc)
462{
463 int messages;
464 u_int16_t cmd;
465
466 cmd = pci_read_config(sc->tws_dev, PCIR_COMMAND, 2);
467 switch(sc->intr_type) {
468 case TWS_INTx :
469 cmd = cmd & ~0x0400;
470 pci_write_config(sc->tws_dev, PCIR_COMMAND, cmd, 2);
471 sc->irqs = 1;
472 sc->irq_res_id[0] = 0;
473 sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ,
474 &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE);
475 if ( ! sc->irq_res[0] )
476 return(FAILURE);
477 if ( tws_setup_intr(sc, sc->irqs) == FAILURE )
478 return(FAILURE);
479 device_printf(sc->tws_dev, "Using legacy INTx\n");
480 break;
481 case TWS_MSI :
482 cmd = cmd | 0x0400;
483 pci_write_config(sc->tws_dev, PCIR_COMMAND, cmd, 2);
484 sc->irqs = 1;
485 sc->irq_res_id[0] = 1;
486 messages = 1;
487 if (pci_alloc_msi(sc->tws_dev, &messages) != 0 ) {
488 TWS_TRACE(sc, "pci alloc msi fail", 0, messages);
489 return(FAILURE);
490 }
491 sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ,
492 &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE);
493
494 if ( !sc->irq_res[0] )
495 return(FAILURE);
496 if ( tws_setup_intr(sc, sc->irqs) == FAILURE )
497 return(FAILURE);
498 device_printf(sc->tws_dev, "Using MSI\n");
499 break;
500
501 }
502
503 return(SUCCESS);
504}
505
506static int
507tws_init(struct tws_softc *sc)
508{
509
510 u_int32_t max_sg_elements;
511 u_int32_t dma_mem_size;
512 int error;
513 u_int32_t reg;
514
515 sc->seq_id = 0;
516 if ( tws_queue_depth > TWS_MAX_REQS )
517 tws_queue_depth = TWS_MAX_REQS;
518 if (tws_queue_depth < TWS_RESERVED_REQS+1)
519 tws_queue_depth = TWS_RESERVED_REQS+1;
520 sc->is64bit = (sizeof(bus_addr_t) == 8) ? true : false;
521 max_sg_elements = (sc->is64bit && !tws_use_32bit_sgls) ?
522 TWS_MAX_64BIT_SG_ELEMENTS :
523 TWS_MAX_32BIT_SG_ELEMENTS;
524 dma_mem_size = (sizeof(struct tws_command_packet) * tws_queue_depth) +
525 (TWS_SECTOR_SIZE) ;
526 if ( bus_dma_tag_create(bus_get_dma_tag(sc->tws_dev), /* PCI parent */
527 TWS_ALIGNMENT, /* alignment */
528 0, /* boundary */
529 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
530 BUS_SPACE_MAXADDR, /* highaddr */
531 NULL, NULL, /* filter, filterarg */
532 BUS_SPACE_MAXSIZE, /* maxsize */
533 max_sg_elements, /* numsegs */
534 BUS_SPACE_MAXSIZE, /* maxsegsize */
535 0, /* flags */
536 NULL, NULL, /* lockfunc, lockfuncarg */
537 &sc->parent_tag /* tag */
538 )) {
539 TWS_TRACE_DEBUG(sc, "DMA parent tag Create fail", max_sg_elements,
540 sc->is64bit);
541 return(ENOMEM);
542 }
543 /* In bound message frame requires 16byte alignment.
544 * Outbound MF's can live with 4byte alignment - for now just
545 * use 16 for both.
546 */
547 if ( bus_dma_tag_create(sc->parent_tag, /* parent */
548 TWS_IN_MF_ALIGNMENT, /* alignment */
549 0, /* boundary */
550 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
551 BUS_SPACE_MAXADDR, /* highaddr */
552 NULL, NULL, /* filter, filterarg */
553 dma_mem_size, /* maxsize */
554 1, /* numsegs */
555 BUS_SPACE_MAXSIZE, /* maxsegsize */
556 0, /* flags */
557 NULL, NULL, /* lockfunc, lockfuncarg */
558 &sc->cmd_tag /* tag */
559 )) {
560 TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
561 return(ENOMEM);
562 }
563
564 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
565 BUS_DMA_NOWAIT, &sc->cmd_map)) {
566 TWS_TRACE_DEBUG(sc, "DMA mem alloc fail", max_sg_elements, sc->is64bit);
567 return(ENOMEM);
568 }
569
570 /* if bus_dmamem_alloc succeeds then bus_dmamap_load will succeed */
571 sc->dma_mem_phys=0;
572 error = bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
573 dma_mem_size, tws_dmamap_cmds_load_cbfn,
574 &sc->dma_mem_phys, 0);
575
576 /*
577 * Create a dma tag for data buffers; size will be the maximum
578 * possible I/O size (128kB).
579 */
580 if (bus_dma_tag_create(sc->parent_tag, /* parent */
581 TWS_ALIGNMENT, /* alignment */
582 0, /* boundary */
583 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
584 BUS_SPACE_MAXADDR, /* highaddr */
585 NULL, NULL, /* filter, filterarg */
586 TWS_MAX_IO_SIZE, /* maxsize */
587 max_sg_elements, /* nsegments */
588 TWS_MAX_IO_SIZE, /* maxsegsize */
589 BUS_DMA_ALLOCNOW, /* flags */
590 busdma_lock_mutex, /* lockfunc */
591 &sc->io_lock, /* lockfuncarg */
592 &sc->data_tag /* tag */)) {
593 TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
594 return(ENOMEM);
595 }
596
597 sc->reqs = malloc(sizeof(struct tws_request) * tws_queue_depth, M_TWS,
598 M_WAITOK | M_ZERO);
599 if ( sc->reqs == NULL ) {
600 TWS_TRACE_DEBUG(sc, "malloc failed", 0, sc->is64bit);
601 return(ENOMEM);
602 }
603 sc->sense_bufs = malloc(sizeof(struct tws_sense) * tws_queue_depth, M_TWS,
604 M_WAITOK | M_ZERO);
605 if ( sc->sense_bufs == NULL ) {
606 TWS_TRACE_DEBUG(sc, "sense malloc failed", 0, sc->is64bit);
607 return(ENOMEM);
608 }
609 sc->scan_ccb = malloc(sizeof(union ccb), M_TWS, M_WAITOK | M_ZERO);
610 if ( sc->scan_ccb == NULL ) {
611 TWS_TRACE_DEBUG(sc, "ccb malloc failed", 0, sc->is64bit);
612 return(ENOMEM);
613 }
614 if (bus_dmamem_alloc(sc->data_tag, (void **)&sc->ioctl_data_mem,
615 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &sc->ioctl_data_map)) {
616 device_printf(sc->tws_dev, "Cannot allocate ioctl data mem\n");
617 return(ENOMEM);
618 }
612
613 if ( !tws_ctlr_ready(sc) )
614 if( !tws_ctlr_reset(sc) )
615 return(FAILURE);
616
617 bzero(&sc->stats, sizeof(struct tws_stats));
618 tws_init_qs(sc);
619 tws_turn_off_interrupts(sc);
620
621 /*
622 * enable pull mode by setting bit1 .
623 * setting bit0 to 1 will enable interrupt coalesing
624 * will revisit.
625 */
626
627#ifdef TWS_PULL_MODE_ENABLE
628
629 reg = tws_read_reg(sc, TWS_I2O0_CTL, 4);
630 TWS_TRACE_DEBUG(sc, "i20 ctl", reg, TWS_I2O0_CTL);
631 tws_write_reg(sc, TWS_I2O0_CTL, reg | TWS_BIT1, 4);
632
633#endif
634
635 TWS_TRACE_DEBUG(sc, "dma_mem_phys", sc->dma_mem_phys, TWS_I2O0_CTL);
636 if ( tws_init_reqs(sc, dma_mem_size) == FAILURE )
637 return(FAILURE);
638 if ( tws_init_aen_q(sc) == FAILURE )
639 return(FAILURE);
640
641 return(SUCCESS);
642
643}
644
645static int
646tws_init_aen_q(struct tws_softc *sc)
647{
648 sc->aen_q.head=0;
649 sc->aen_q.tail=0;
650 sc->aen_q.depth=256;
651 sc->aen_q.overflow=0;
652 sc->aen_q.q = malloc(sizeof(struct tws_event_packet)*sc->aen_q.depth,
653 M_TWS, M_WAITOK | M_ZERO);
654 if ( ! sc->aen_q.q )
655 return(FAILURE);
656 return(SUCCESS);
657}
658
659static int
660tws_init_trace_q(struct tws_softc *sc)
661{
662 sc->trace_q.head=0;
663 sc->trace_q.tail=0;
664 sc->trace_q.depth=256;
665 sc->trace_q.overflow=0;
666 sc->trace_q.q = malloc(sizeof(struct tws_trace_rec)*sc->trace_q.depth,
667 M_TWS, M_WAITOK | M_ZERO);
668 if ( ! sc->trace_q.q )
669 return(FAILURE);
670 return(SUCCESS);
671}
672
673static int
674tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size)
675{
676
677 struct tws_command_packet *cmd_buf;
678 cmd_buf = (struct tws_command_packet *)sc->dma_mem;
679 int i;
680
681 bzero(cmd_buf, dma_mem_size);
682 TWS_TRACE_DEBUG(sc, "phy cmd", sc->dma_mem_phys, 0);
683 mtx_lock(&sc->q_lock);
684 for ( i=0; i< tws_queue_depth; i++)
685 {
686 if (bus_dmamap_create(sc->data_tag, 0, &sc->reqs[i].dma_map)) {
687 /* log a ENOMEM failure msg here */
688 mtx_unlock(&sc->q_lock);
689 return(FAILURE);
690 }
691 sc->reqs[i].cmd_pkt = &cmd_buf[i];
692
693 sc->sense_bufs[i].hdr = &cmd_buf[i].hdr ;
694 sc->sense_bufs[i].hdr_pkt_phy = sc->dma_mem_phys +
695 (i * sizeof(struct tws_command_packet));
696
697 sc->reqs[i].cmd_pkt_phy = sc->dma_mem_phys +
698 sizeof(struct tws_command_header) +
699 (i * sizeof(struct tws_command_packet));
700 sc->reqs[i].request_id = i;
701 sc->reqs[i].sc = sc;
702
703 sc->reqs[i].cmd_pkt->hdr.header_desc.size_header = 128;
704
705 sc->reqs[i].state = TWS_REQ_STATE_FREE;
706 if ( i >= TWS_RESERVED_REQS )
707 tws_q_insert_tail(sc, &sc->reqs[i], TWS_FREE_Q);
708 }
709 mtx_unlock(&sc->q_lock);
710 return(SUCCESS);
711}
712
713static void
714tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
715 int nseg, int error)
716{
717
718 /* printf("command load done \n"); */
719
720 *((bus_addr_t *)arg) = segs[0].ds_addr;
721}
722
723void
724tws_send_event(struct tws_softc *sc, u_int8_t event)
725{
726 mtx_assert(&sc->gen_lock, MA_OWNED);
727 TWS_TRACE_DEBUG(sc, "received event ", 0, event);
728 switch (event) {
729
730 case TWS_INIT_START:
731 sc->tws_state = TWS_INIT;
732 break;
733
734 case TWS_INIT_COMPLETE:
735 if (sc->tws_state != TWS_INIT) {
736 device_printf(sc->tws_dev, "invalid state transition %d => TWS_ONLINE\n", sc->tws_state);
737 } else {
738 sc->tws_state = TWS_ONLINE;
739 }
740 break;
741
742 case TWS_RESET_START:
743 /* We can transition to reset state from any state except reset*/
744 if (sc->tws_state != TWS_RESET) {
745 sc->tws_prev_state = sc->tws_state;
746 sc->tws_state = TWS_RESET;
747 }
748 break;
749
750 case TWS_RESET_COMPLETE:
751 if (sc->tws_state != TWS_RESET) {
752 device_printf(sc->tws_dev, "invalid state transition %d => %d (previous state)\n", sc->tws_state, sc->tws_prev_state);
753 } else {
754 sc->tws_state = sc->tws_prev_state;
755 }
756 break;
757
758 case TWS_SCAN_FAILURE:
759 if (sc->tws_state != TWS_ONLINE) {
760 device_printf(sc->tws_dev, "invalid state transition %d => TWS_OFFLINE\n", sc->tws_state);
761 } else {
762 sc->tws_state = TWS_OFFLINE;
763 }
764 break;
765
766 case TWS_UNINIT_START:
767 if ((sc->tws_state != TWS_ONLINE) && (sc->tws_state != TWS_OFFLINE)) {
768 device_printf(sc->tws_dev, "invalid state transition %d => TWS_UNINIT\n", sc->tws_state);
769 } else {
770 sc->tws_state = TWS_UNINIT;
771 }
772 break;
773 }
774
775}
776
777uint8_t
778tws_get_state(struct tws_softc *sc)
779{
780
781 return((u_int8_t)sc->tws_state);
782
783}
784
785/* Called during system shutdown after sync. */
786
787static int
788tws_shutdown(device_t dev)
789{
790
791 struct tws_softc *sc = device_get_softc(dev);
792
793 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
794
795 tws_turn_off_interrupts(sc);
796 tws_init_connect(sc, 1);
797
798 return (0);
799}
800
801/*
802 * Device suspend routine.
803 */
804static int
805tws_suspend(device_t dev)
806{
807 struct tws_softc *sc = device_get_softc(dev);
808
809 if ( sc )
810 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
811 return (0);
812}
813
814/*
815 * Device resume routine.
816 */
817static int
818tws_resume(device_t dev)
819{
820
821 struct tws_softc *sc = device_get_softc(dev);
822
823 if ( sc )
824 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
825 return (0);
826}
827
828
829struct tws_request *
830tws_get_request(struct tws_softc *sc, u_int16_t type)
831{
832 struct mtx *my_mutex = ((type == TWS_REQ_TYPE_SCSI_IO) ? &sc->q_lock : &sc->gen_lock);
833 struct tws_request *r = NULL;
834
835 mtx_lock(my_mutex);
836
837 if (type == TWS_REQ_TYPE_SCSI_IO) {
838 r = tws_q_remove_head(sc, TWS_FREE_Q);
839 } else {
840 if ( sc->reqs[type].state == TWS_REQ_STATE_FREE ) {
841 r = &sc->reqs[type];
842 }
843 }
844
845 if ( r ) {
846 bzero(&r->cmd_pkt->cmd, sizeof(struct tws_command_apache));
847 r->data = NULL;
848 r->length = 0;
849 r->type = type;
850 r->flags = TWS_DIR_UNKNOWN;
851 r->error_code = TWS_REQ_RET_INVALID;
852 r->cb = NULL;
853 r->ccb_ptr = NULL;
854 r->thandle.callout = NULL;
855 r->next = r->prev = NULL;
856
857 r->state = ((type == TWS_REQ_TYPE_SCSI_IO) ? TWS_REQ_STATE_TRAN : TWS_REQ_STATE_BUSY);
858 }
859
860 mtx_unlock(my_mutex);
861
862 return(r);
863}
864
865void
866tws_release_request(struct tws_request *req)
867{
868
869 struct tws_softc *sc = req->sc;
870
871 TWS_TRACE_DEBUG(sc, "entry", sc, 0);
872 mtx_lock(&sc->q_lock);
873 tws_q_insert_tail(sc, req, TWS_FREE_Q);
874 mtx_unlock(&sc->q_lock);
875}
876
877static device_method_t tws_methods[] = {
878 /* Device interface */
879 DEVMETHOD(device_probe, tws_probe),
880 DEVMETHOD(device_attach, tws_attach),
881 DEVMETHOD(device_detach, tws_detach),
882 DEVMETHOD(device_shutdown, tws_shutdown),
883 DEVMETHOD(device_suspend, tws_suspend),
884 DEVMETHOD(device_resume, tws_resume),
885
886 DEVMETHOD_END
887};
888
889static driver_t tws_driver = {
890 "tws",
891 tws_methods,
892 sizeof(struct tws_softc)
893};
894
895
896static devclass_t tws_devclass;
897
898/* DEFINE_CLASS_0(tws, tws_driver, tws_methods, sizeof(struct tws_softc)); */
899DRIVER_MODULE(tws, pci, tws_driver, tws_devclass, 0, 0);
900MODULE_DEPEND(tws, cam, 1, 1, 1);
901MODULE_DEPEND(tws, pci, 1, 1, 1);
902
903TUNABLE_INT("hw.tws.queue_depth", &tws_queue_depth);
904TUNABLE_INT("hw.tws.enable_msi", &tws_enable_msi);
619
620 if ( !tws_ctlr_ready(sc) )
621 if( !tws_ctlr_reset(sc) )
622 return(FAILURE);
623
624 bzero(&sc->stats, sizeof(struct tws_stats));
625 tws_init_qs(sc);
626 tws_turn_off_interrupts(sc);
627
628 /*
629 * enable pull mode by setting bit1 .
630 * setting bit0 to 1 will enable interrupt coalesing
631 * will revisit.
632 */
633
634#ifdef TWS_PULL_MODE_ENABLE
635
636 reg = tws_read_reg(sc, TWS_I2O0_CTL, 4);
637 TWS_TRACE_DEBUG(sc, "i20 ctl", reg, TWS_I2O0_CTL);
638 tws_write_reg(sc, TWS_I2O0_CTL, reg | TWS_BIT1, 4);
639
640#endif
641
642 TWS_TRACE_DEBUG(sc, "dma_mem_phys", sc->dma_mem_phys, TWS_I2O0_CTL);
643 if ( tws_init_reqs(sc, dma_mem_size) == FAILURE )
644 return(FAILURE);
645 if ( tws_init_aen_q(sc) == FAILURE )
646 return(FAILURE);
647
648 return(SUCCESS);
649
650}
651
652static int
653tws_init_aen_q(struct tws_softc *sc)
654{
655 sc->aen_q.head=0;
656 sc->aen_q.tail=0;
657 sc->aen_q.depth=256;
658 sc->aen_q.overflow=0;
659 sc->aen_q.q = malloc(sizeof(struct tws_event_packet)*sc->aen_q.depth,
660 M_TWS, M_WAITOK | M_ZERO);
661 if ( ! sc->aen_q.q )
662 return(FAILURE);
663 return(SUCCESS);
664}
665
666static int
667tws_init_trace_q(struct tws_softc *sc)
668{
669 sc->trace_q.head=0;
670 sc->trace_q.tail=0;
671 sc->trace_q.depth=256;
672 sc->trace_q.overflow=0;
673 sc->trace_q.q = malloc(sizeof(struct tws_trace_rec)*sc->trace_q.depth,
674 M_TWS, M_WAITOK | M_ZERO);
675 if ( ! sc->trace_q.q )
676 return(FAILURE);
677 return(SUCCESS);
678}
679
680static int
681tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size)
682{
683
684 struct tws_command_packet *cmd_buf;
685 cmd_buf = (struct tws_command_packet *)sc->dma_mem;
686 int i;
687
688 bzero(cmd_buf, dma_mem_size);
689 TWS_TRACE_DEBUG(sc, "phy cmd", sc->dma_mem_phys, 0);
690 mtx_lock(&sc->q_lock);
691 for ( i=0; i< tws_queue_depth; i++)
692 {
693 if (bus_dmamap_create(sc->data_tag, 0, &sc->reqs[i].dma_map)) {
694 /* log a ENOMEM failure msg here */
695 mtx_unlock(&sc->q_lock);
696 return(FAILURE);
697 }
698 sc->reqs[i].cmd_pkt = &cmd_buf[i];
699
700 sc->sense_bufs[i].hdr = &cmd_buf[i].hdr ;
701 sc->sense_bufs[i].hdr_pkt_phy = sc->dma_mem_phys +
702 (i * sizeof(struct tws_command_packet));
703
704 sc->reqs[i].cmd_pkt_phy = sc->dma_mem_phys +
705 sizeof(struct tws_command_header) +
706 (i * sizeof(struct tws_command_packet));
707 sc->reqs[i].request_id = i;
708 sc->reqs[i].sc = sc;
709
710 sc->reqs[i].cmd_pkt->hdr.header_desc.size_header = 128;
711
712 sc->reqs[i].state = TWS_REQ_STATE_FREE;
713 if ( i >= TWS_RESERVED_REQS )
714 tws_q_insert_tail(sc, &sc->reqs[i], TWS_FREE_Q);
715 }
716 mtx_unlock(&sc->q_lock);
717 return(SUCCESS);
718}
719
720static void
721tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
722 int nseg, int error)
723{
724
725 /* printf("command load done \n"); */
726
727 *((bus_addr_t *)arg) = segs[0].ds_addr;
728}
729
730void
731tws_send_event(struct tws_softc *sc, u_int8_t event)
732{
733 mtx_assert(&sc->gen_lock, MA_OWNED);
734 TWS_TRACE_DEBUG(sc, "received event ", 0, event);
735 switch (event) {
736
737 case TWS_INIT_START:
738 sc->tws_state = TWS_INIT;
739 break;
740
741 case TWS_INIT_COMPLETE:
742 if (sc->tws_state != TWS_INIT) {
743 device_printf(sc->tws_dev, "invalid state transition %d => TWS_ONLINE\n", sc->tws_state);
744 } else {
745 sc->tws_state = TWS_ONLINE;
746 }
747 break;
748
749 case TWS_RESET_START:
750 /* We can transition to reset state from any state except reset*/
751 if (sc->tws_state != TWS_RESET) {
752 sc->tws_prev_state = sc->tws_state;
753 sc->tws_state = TWS_RESET;
754 }
755 break;
756
757 case TWS_RESET_COMPLETE:
758 if (sc->tws_state != TWS_RESET) {
759 device_printf(sc->tws_dev, "invalid state transition %d => %d (previous state)\n", sc->tws_state, sc->tws_prev_state);
760 } else {
761 sc->tws_state = sc->tws_prev_state;
762 }
763 break;
764
765 case TWS_SCAN_FAILURE:
766 if (sc->tws_state != TWS_ONLINE) {
767 device_printf(sc->tws_dev, "invalid state transition %d => TWS_OFFLINE\n", sc->tws_state);
768 } else {
769 sc->tws_state = TWS_OFFLINE;
770 }
771 break;
772
773 case TWS_UNINIT_START:
774 if ((sc->tws_state != TWS_ONLINE) && (sc->tws_state != TWS_OFFLINE)) {
775 device_printf(sc->tws_dev, "invalid state transition %d => TWS_UNINIT\n", sc->tws_state);
776 } else {
777 sc->tws_state = TWS_UNINIT;
778 }
779 break;
780 }
781
782}
783
784uint8_t
785tws_get_state(struct tws_softc *sc)
786{
787
788 return((u_int8_t)sc->tws_state);
789
790}
791
792/* Called during system shutdown after sync. */
793
794static int
795tws_shutdown(device_t dev)
796{
797
798 struct tws_softc *sc = device_get_softc(dev);
799
800 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
801
802 tws_turn_off_interrupts(sc);
803 tws_init_connect(sc, 1);
804
805 return (0);
806}
807
808/*
809 * Device suspend routine.
810 */
811static int
812tws_suspend(device_t dev)
813{
814 struct tws_softc *sc = device_get_softc(dev);
815
816 if ( sc )
817 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
818 return (0);
819}
820
821/*
822 * Device resume routine.
823 */
824static int
825tws_resume(device_t dev)
826{
827
828 struct tws_softc *sc = device_get_softc(dev);
829
830 if ( sc )
831 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
832 return (0);
833}
834
835
836struct tws_request *
837tws_get_request(struct tws_softc *sc, u_int16_t type)
838{
839 struct mtx *my_mutex = ((type == TWS_REQ_TYPE_SCSI_IO) ? &sc->q_lock : &sc->gen_lock);
840 struct tws_request *r = NULL;
841
842 mtx_lock(my_mutex);
843
844 if (type == TWS_REQ_TYPE_SCSI_IO) {
845 r = tws_q_remove_head(sc, TWS_FREE_Q);
846 } else {
847 if ( sc->reqs[type].state == TWS_REQ_STATE_FREE ) {
848 r = &sc->reqs[type];
849 }
850 }
851
852 if ( r ) {
853 bzero(&r->cmd_pkt->cmd, sizeof(struct tws_command_apache));
854 r->data = NULL;
855 r->length = 0;
856 r->type = type;
857 r->flags = TWS_DIR_UNKNOWN;
858 r->error_code = TWS_REQ_RET_INVALID;
859 r->cb = NULL;
860 r->ccb_ptr = NULL;
861 r->thandle.callout = NULL;
862 r->next = r->prev = NULL;
863
864 r->state = ((type == TWS_REQ_TYPE_SCSI_IO) ? TWS_REQ_STATE_TRAN : TWS_REQ_STATE_BUSY);
865 }
866
867 mtx_unlock(my_mutex);
868
869 return(r);
870}
871
872void
873tws_release_request(struct tws_request *req)
874{
875
876 struct tws_softc *sc = req->sc;
877
878 TWS_TRACE_DEBUG(sc, "entry", sc, 0);
879 mtx_lock(&sc->q_lock);
880 tws_q_insert_tail(sc, req, TWS_FREE_Q);
881 mtx_unlock(&sc->q_lock);
882}
883
884static device_method_t tws_methods[] = {
885 /* Device interface */
886 DEVMETHOD(device_probe, tws_probe),
887 DEVMETHOD(device_attach, tws_attach),
888 DEVMETHOD(device_detach, tws_detach),
889 DEVMETHOD(device_shutdown, tws_shutdown),
890 DEVMETHOD(device_suspend, tws_suspend),
891 DEVMETHOD(device_resume, tws_resume),
892
893 DEVMETHOD_END
894};
895
896static driver_t tws_driver = {
897 "tws",
898 tws_methods,
899 sizeof(struct tws_softc)
900};
901
902
903static devclass_t tws_devclass;
904
905/* DEFINE_CLASS_0(tws, tws_driver, tws_methods, sizeof(struct tws_softc)); */
906DRIVER_MODULE(tws, pci, tws_driver, tws_devclass, 0, 0);
907MODULE_DEPEND(tws, cam, 1, 1, 1);
908MODULE_DEPEND(tws, pci, 1, 1, 1);
909
910TUNABLE_INT("hw.tws.queue_depth", &tws_queue_depth);
911TUNABLE_INT("hw.tws.enable_msi", &tws_enable_msi);