tws.c revision 331722
1/*
2 * Copyright (c) 2010, LSI Corp.
3 * All rights reserved.
4 * Author : Manjunath Ranganathaiah
5 * Support: freebsdraid@lsi.com
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in
15 *    the documentation and/or other materials provided with the
16 *    distribution.
17 * 3. Neither the name of the <ORGANIZATION> nor the names of its
18 *    contributors may be used to endorse or promote products derived
19 *    from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: stable/11/sys/dev/tws/tws.c 331722 2018-03-29 02:50:57Z eadler $");
37
38#include <dev/tws/tws.h>
39#include <dev/tws/tws_services.h>
40#include <dev/tws/tws_hdm.h>
41
42#include <cam/cam.h>
43#include <cam/cam_ccb.h>
44
45MALLOC_DEFINE(M_TWS, "twsbuf", "buffers used by tws driver");
46int tws_queue_depth = TWS_MAX_REQS;
47int tws_enable_msi = 0;
48int tws_enable_msix = 0;
49
50
51
52/* externs */
53extern int tws_cam_attach(struct tws_softc *sc);
54extern void tws_cam_detach(struct tws_softc *sc);
55extern int tws_init_ctlr(struct tws_softc *sc);
56extern boolean tws_ctlr_ready(struct tws_softc *sc);
57extern void tws_turn_off_interrupts(struct tws_softc *sc);
58extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
59                                u_int8_t q_type );
60extern struct tws_request *tws_q_remove_request(struct tws_softc *sc,
61                                   struct tws_request *req, u_int8_t q_type );
62extern struct tws_request *tws_q_remove_head(struct tws_softc *sc,
63                                                       u_int8_t q_type );
64extern boolean tws_get_response(struct tws_softc *sc, u_int16_t *req_id);
65extern boolean tws_ctlr_reset(struct tws_softc *sc);
66extern void tws_intr(void *arg);
67extern int tws_use_32bit_sgls;
68
69
70struct tws_request *tws_get_request(struct tws_softc *sc, u_int16_t type);
71int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
72void tws_send_event(struct tws_softc *sc, u_int8_t event);
73uint8_t tws_get_state(struct tws_softc *sc);
74void tws_release_request(struct tws_request *req);
75
76
77
78/* Function prototypes */
79static d_open_t     tws_open;
80static d_close_t    tws_close;
81static d_read_t     tws_read;
82static d_write_t    tws_write;
83extern d_ioctl_t    tws_ioctl;
84
85static int tws_init(struct tws_softc *sc);
86static void tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
87                           int nseg, int error);
88
89static int tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size);
90static int tws_init_aen_q(struct tws_softc *sc);
91static int tws_init_trace_q(struct tws_softc *sc);
92static int tws_setup_irq(struct tws_softc *sc);
93int tws_setup_intr(struct tws_softc *sc, int irqs);
94int tws_teardown_intr(struct tws_softc *sc);
95
96
97/* Character device entry points */
98
99static struct cdevsw tws_cdevsw = {
100    .d_version =    D_VERSION,
101    .d_open =   tws_open,
102    .d_close =  tws_close,
103    .d_read =   tws_read,
104    .d_write =  tws_write,
105    .d_ioctl =  tws_ioctl,
106    .d_name =   "tws",
107};
108
109/*
110 * In the cdevsw routines, we find our softc by using the si_drv1 member
111 * of struct cdev.  We set this variable to point to our softc in our
112 * attach routine when we create the /dev entry.
113 */
114
115int
116tws_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
117{
118    struct tws_softc *sc = dev->si_drv1;
119
120    if ( sc )
121        TWS_TRACE_DEBUG(sc, "entry", dev, oflags);
122    return (0);
123}
124
125int
126tws_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
127{
128    struct tws_softc *sc = dev->si_drv1;
129
130    if ( sc )
131        TWS_TRACE_DEBUG(sc, "entry", dev, fflag);
132    return (0);
133}
134
135int
136tws_read(struct cdev *dev, struct uio *uio, int ioflag)
137{
138    struct tws_softc *sc = dev->si_drv1;
139
140    if ( sc )
141        TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
142    return (0);
143}
144
145int
146tws_write(struct cdev *dev, struct uio *uio, int ioflag)
147{
148    struct tws_softc *sc = dev->si_drv1;
149
150    if ( sc )
151        TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
152    return (0);
153}
154
155/* PCI Support Functions */
156
157/*
158 * Compare the device ID of this device against the IDs that this driver
159 * supports.  If there is a match, set the description and return success.
160 */
161static int
162tws_probe(device_t dev)
163{
164    static u_int8_t first_ctlr = 1;
165
166    if ((pci_get_vendor(dev) == TWS_VENDOR_ID) &&
167        (pci_get_device(dev) == TWS_DEVICE_ID)) {
168        device_set_desc(dev, "LSI 3ware SAS/SATA Storage Controller");
169        if (first_ctlr) {
170            printf("LSI 3ware device driver for SAS/SATA storage "
171                    "controllers, version: %s\n", TWS_DRIVER_VERSION_STRING);
172            first_ctlr = 0;
173        }
174
175        return(BUS_PROBE_DEFAULT);
176    }
177    return (ENXIO);
178}
179
180/* Attach function is only called if the probe is successful. */
181
182static int
183tws_attach(device_t dev)
184{
185    struct tws_softc *sc = device_get_softc(dev);
186    u_int32_t bar;
187    int error=0,i;
188
189    /* no tracing yet */
190    /* Look up our softc and initialize its fields. */
191    sc->tws_dev = dev;
192    sc->device_id = pci_get_device(dev);
193    sc->subvendor_id = pci_get_subvendor(dev);
194    sc->subdevice_id = pci_get_subdevice(dev);
195
196    /* Intialize mutexes */
197    mtx_init( &sc->q_lock, "tws_q_lock", NULL, MTX_DEF);
198    mtx_init( &sc->sim_lock,  "tws_sim_lock", NULL, MTX_DEF);
199    mtx_init( &sc->gen_lock,  "tws_gen_lock", NULL, MTX_DEF);
200    mtx_init( &sc->io_lock,  "tws_io_lock", NULL, MTX_DEF | MTX_RECURSE);
201    callout_init(&sc->stats_timer, 1);
202
203    if ( tws_init_trace_q(sc) == FAILURE )
204        printf("trace init failure\n");
205    /* send init event */
206    mtx_lock(&sc->gen_lock);
207    tws_send_event(sc, TWS_INIT_START);
208    mtx_unlock(&sc->gen_lock);
209
210
211#if _BYTE_ORDER == _BIG_ENDIAN
212    TWS_TRACE(sc, "BIG endian", 0, 0);
213#endif
214    /* sysctl context setup */
215    sysctl_ctx_init(&sc->tws_clist);
216    sc->tws_oidp = SYSCTL_ADD_NODE(&sc->tws_clist,
217                                   SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
218                                   device_get_nameunit(dev),
219                                   CTLFLAG_RD, 0, "");
220    if ( sc->tws_oidp == NULL ) {
221        tws_log(sc, SYSCTL_TREE_NODE_ADD);
222        goto attach_fail_1;
223    }
224    SYSCTL_ADD_STRING(&sc->tws_clist, SYSCTL_CHILDREN(sc->tws_oidp),
225                      OID_AUTO, "driver_version", CTLFLAG_RD,
226                      TWS_DRIVER_VERSION_STRING, 0, "TWS driver version");
227
228    pci_enable_busmaster(dev);
229
230    bar = pci_read_config(dev, TWS_PCI_BAR0, 4);
231    TWS_TRACE_DEBUG(sc, "bar0 ", bar, 0);
232    bar = pci_read_config(dev, TWS_PCI_BAR1, 4);
233    bar = bar & ~TWS_BIT2;
234    TWS_TRACE_DEBUG(sc, "bar1 ", bar, 0);
235
236    /* MFA base address is BAR2 register used for
237     * push mode. Firmware will evatualy move to
238     * pull mode during witch this needs to change
239     */
240#ifndef TWS_PULL_MODE_ENABLE
241    sc->mfa_base = (u_int64_t)pci_read_config(dev, TWS_PCI_BAR2, 4);
242    sc->mfa_base = sc->mfa_base & ~TWS_BIT2;
243    TWS_TRACE_DEBUG(sc, "bar2 ", sc->mfa_base, 0);
244#endif
245
246    /* allocate MMIO register space */
247    sc->reg_res_id = TWS_PCI_BAR1; /* BAR1 offset */
248    if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
249                                &(sc->reg_res_id), RF_ACTIVE))
250                                == NULL) {
251        tws_log(sc, ALLOC_MEMORY_RES);
252        goto attach_fail_1;
253    }
254    sc->bus_tag = rman_get_bustag(sc->reg_res);
255    sc->bus_handle = rman_get_bushandle(sc->reg_res);
256
257#ifndef TWS_PULL_MODE_ENABLE
258    /* Allocate bus space for inbound mfa */
259    sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */
260    if ((sc->mfa_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
261                          &(sc->mfa_res_id), RF_ACTIVE))
262                                == NULL) {
263        tws_log(sc, ALLOC_MEMORY_RES);
264        goto attach_fail_2;
265    }
266    sc->bus_mfa_tag = rman_get_bustag(sc->mfa_res);
267    sc->bus_mfa_handle = rman_get_bushandle(sc->mfa_res);
268#endif
269
270    /* Allocate and register our interrupt. */
271    sc->intr_type = TWS_INTx; /* default */
272
273    if ( tws_enable_msi )
274        sc->intr_type = TWS_MSI;
275    if ( tws_setup_irq(sc) == FAILURE ) {
276        tws_log(sc, ALLOC_MEMORY_RES);
277        goto attach_fail_3;
278    }
279
280    /*
281     * Create a /dev entry for this device.  The kernel will assign us
282     * a major number automatically.  We use the unit number of this
283     * device as the minor number and name the character device
284     * "tws<unit>".
285     */
286    sc->tws_cdev = make_dev(&tws_cdevsw, device_get_unit(dev),
287        UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "tws%u",
288        device_get_unit(dev));
289    sc->tws_cdev->si_drv1 = sc;
290
291    if ( tws_init(sc) == FAILURE ) {
292        tws_log(sc, TWS_INIT_FAILURE);
293        goto attach_fail_4;
294    }
295    if ( tws_init_ctlr(sc) == FAILURE ) {
296        tws_log(sc, TWS_CTLR_INIT_FAILURE);
297        goto attach_fail_4;
298    }
299    if ((error = tws_cam_attach(sc))) {
300        tws_log(sc, TWS_CAM_ATTACH);
301        goto attach_fail_4;
302    }
303    /* send init complete event */
304    mtx_lock(&sc->gen_lock);
305    tws_send_event(sc, TWS_INIT_COMPLETE);
306    mtx_unlock(&sc->gen_lock);
307
308    TWS_TRACE_DEBUG(sc, "attached successfully", 0, sc->device_id);
309    return(0);
310
311attach_fail_4:
312    tws_teardown_intr(sc);
313    destroy_dev(sc->tws_cdev);
314    if (sc->dma_mem_phys)
315	    bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
316    if (sc->dma_mem)
317	    bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map);
318    if (sc->cmd_tag)
319	    bus_dma_tag_destroy(sc->cmd_tag);
320attach_fail_3:
321    for(i=0;i<sc->irqs;i++) {
322        if ( sc->irq_res[i] ){
323            if (bus_release_resource(sc->tws_dev,
324                 SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
325                TWS_TRACE(sc, "bus irq res", 0, 0);
326        }
327    }
328#ifndef TWS_PULL_MODE_ENABLE
329attach_fail_2:
330#endif
331    if ( sc->mfa_res ){
332        if (bus_release_resource(sc->tws_dev,
333                 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
334            TWS_TRACE(sc, "bus release ", 0, sc->mfa_res_id);
335    }
336    if ( sc->reg_res ){
337        if (bus_release_resource(sc->tws_dev,
338                 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
339            TWS_TRACE(sc, "bus release2 ", 0, sc->reg_res_id);
340    }
341attach_fail_1:
342    mtx_destroy(&sc->q_lock);
343    mtx_destroy(&sc->sim_lock);
344    mtx_destroy(&sc->gen_lock);
345    mtx_destroy(&sc->io_lock);
346    sysctl_ctx_free(&sc->tws_clist);
347    return (ENXIO);
348}
349
350/* Detach device. */
351
352static int
353tws_detach(device_t dev)
354{
355    struct tws_softc *sc = device_get_softc(dev);
356    int i;
357    u_int32_t reg;
358
359    TWS_TRACE_DEBUG(sc, "entry", 0, 0);
360
361    mtx_lock(&sc->gen_lock);
362    tws_send_event(sc, TWS_UNINIT_START);
363    mtx_unlock(&sc->gen_lock);
364
365    /* needs to disable interrupt before detaching from cam */
366    tws_turn_off_interrupts(sc);
367    /* clear door bell */
368    tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
369    reg = tws_read_reg(sc, TWS_I2O0_HIMASK, 4);
370    TWS_TRACE_DEBUG(sc, "turn-off-intr", reg, 0);
371    sc->obfl_q_overrun = false;
372    tws_init_connect(sc, 1);
373
374    /* Teardown the state in our softc created in our attach routine. */
375    /* Disconnect the interrupt handler. */
376    tws_teardown_intr(sc);
377
378    /* Release irq resource */
379    for(i=0;i<sc->irqs;i++) {
380        if ( sc->irq_res[i] ){
381            if (bus_release_resource(sc->tws_dev,
382                     SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
383                TWS_TRACE(sc, "bus release irq resource",
384                                       i, sc->irq_res_id[i]);
385        }
386    }
387    if ( sc->intr_type == TWS_MSI ) {
388        pci_release_msi(sc->tws_dev);
389    }
390
391    tws_cam_detach(sc);
392
393    if (sc->dma_mem_phys)
394	    bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
395    if (sc->dma_mem)
396	    bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map);
397    if (sc->cmd_tag)
398	    bus_dma_tag_destroy(sc->cmd_tag);
399
400    /* Release memory resource */
401    if ( sc->mfa_res ){
402        if (bus_release_resource(sc->tws_dev,
403                 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
404            TWS_TRACE(sc, "bus release mem resource", 0, sc->mfa_res_id);
405    }
406    if ( sc->reg_res ){
407        if (bus_release_resource(sc->tws_dev,
408                 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
409            TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id);
410    }
411
412    for ( i=0; i< tws_queue_depth; i++) {
413	    if (sc->reqs[i].dma_map)
414		    bus_dmamap_destroy(sc->data_tag, sc->reqs[i].dma_map);
415	    callout_drain(&sc->reqs[i].timeout);
416    }
417
418    callout_drain(&sc->stats_timer);
419    free(sc->reqs, M_TWS);
420    free(sc->sense_bufs, M_TWS);
421    free(sc->scan_ccb, M_TWS);
422    if (sc->ioctl_data_mem)
423            bus_dmamem_free(sc->data_tag, sc->ioctl_data_mem, sc->ioctl_data_map);
424    if (sc->data_tag)
425	    bus_dma_tag_destroy(sc->data_tag);
426    free(sc->aen_q.q, M_TWS);
427    free(sc->trace_q.q, M_TWS);
428    mtx_destroy(&sc->q_lock);
429    mtx_destroy(&sc->sim_lock);
430    mtx_destroy(&sc->gen_lock);
431    mtx_destroy(&sc->io_lock);
432    destroy_dev(sc->tws_cdev);
433    sysctl_ctx_free(&sc->tws_clist);
434    return (0);
435}
436
437int
438tws_setup_intr(struct tws_softc *sc, int irqs)
439{
440    int i, error;
441
442    for(i=0;i<irqs;i++) {
443        if (!(sc->intr_handle[i])) {
444            if ((error = bus_setup_intr(sc->tws_dev, sc->irq_res[i],
445                                    INTR_TYPE_CAM | INTR_MPSAFE,
446#if (__FreeBSD_version >= 700000)
447                                    NULL,
448#endif
449                                    tws_intr, sc, &sc->intr_handle[i]))) {
450                tws_log(sc, SETUP_INTR_RES);
451                return(FAILURE);
452            }
453        }
454    }
455    return(SUCCESS);
456
457}
458
459
460int
461tws_teardown_intr(struct tws_softc *sc)
462{
463    int i, error;
464
465    for(i=0;i<sc->irqs;i++) {
466        if (sc->intr_handle[i]) {
467            error = bus_teardown_intr(sc->tws_dev,
468                                      sc->irq_res[i], sc->intr_handle[i]);
469            sc->intr_handle[i] = NULL;
470        }
471    }
472    return(SUCCESS);
473}
474
475
476static int
477tws_setup_irq(struct tws_softc *sc)
478{
479    int messages;
480
481    switch(sc->intr_type) {
482        case TWS_INTx :
483            sc->irqs = 1;
484            sc->irq_res_id[0] = 0;
485            sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ,
486                            &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE);
487            if ( ! sc->irq_res[0] )
488                return(FAILURE);
489            if ( tws_setup_intr(sc, sc->irqs) == FAILURE )
490                return(FAILURE);
491            device_printf(sc->tws_dev, "Using legacy INTx\n");
492            break;
493        case TWS_MSI :
494            sc->irqs = 1;
495            sc->irq_res_id[0] = 1;
496            messages = 1;
497            if (pci_alloc_msi(sc->tws_dev, &messages) != 0 ) {
498                TWS_TRACE(sc, "pci alloc msi fail", 0, messages);
499                return(FAILURE);
500            }
501            sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ,
502                              &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE);
503
504            if ( !sc->irq_res[0]  )
505                return(FAILURE);
506            if ( tws_setup_intr(sc, sc->irqs) == FAILURE )
507                return(FAILURE);
508            device_printf(sc->tws_dev, "Using MSI\n");
509            break;
510
511    }
512
513    return(SUCCESS);
514}
515
516static int
517tws_init(struct tws_softc *sc)
518{
519
520    u_int32_t max_sg_elements;
521    u_int32_t dma_mem_size;
522    int error;
523    u_int32_t reg;
524
525    sc->seq_id = 0;
526    if ( tws_queue_depth > TWS_MAX_REQS )
527        tws_queue_depth = TWS_MAX_REQS;
528    if (tws_queue_depth < TWS_RESERVED_REQS+1)
529        tws_queue_depth = TWS_RESERVED_REQS+1;
530    sc->is64bit = (sizeof(bus_addr_t) == 8) ? true : false;
531    max_sg_elements = (sc->is64bit && !tws_use_32bit_sgls) ?
532                                 TWS_MAX_64BIT_SG_ELEMENTS :
533                                 TWS_MAX_32BIT_SG_ELEMENTS;
534    dma_mem_size = (sizeof(struct tws_command_packet) * tws_queue_depth) +
535                             (TWS_SECTOR_SIZE) ;
536    if ( bus_dma_tag_create(bus_get_dma_tag(sc->tws_dev), /* PCI parent */
537                            TWS_ALIGNMENT,           /* alignment */
538                            0,                       /* boundary */
539                            BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
540                            BUS_SPACE_MAXADDR,       /* highaddr */
541                            NULL, NULL,              /* filter, filterarg */
542                            BUS_SPACE_MAXSIZE,       /* maxsize */
543                            max_sg_elements,         /* numsegs */
544                            BUS_SPACE_MAXSIZE,       /* maxsegsize */
545                            0,                       /* flags */
546                            NULL, NULL,              /* lockfunc, lockfuncarg */
547                            &sc->parent_tag          /* tag */
548                           )) {
549        TWS_TRACE_DEBUG(sc, "DMA parent tag Create fail", max_sg_elements,
550                                                    sc->is64bit);
551        return(ENOMEM);
552    }
553    /* In bound message frame requires 16byte alignment.
554     * Outbound MF's can live with 4byte alignment - for now just
555     * use 16 for both.
556     */
557    if ( bus_dma_tag_create(sc->parent_tag,       /* parent */
558                            TWS_IN_MF_ALIGNMENT,  /* alignment */
559                            0,                    /* boundary */
560                            BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
561                            BUS_SPACE_MAXADDR,    /* highaddr */
562                            NULL, NULL,           /* filter, filterarg */
563                            dma_mem_size,         /* maxsize */
564                            1,                    /* numsegs */
565                            BUS_SPACE_MAXSIZE,    /* maxsegsize */
566                            0,                    /* flags */
567                            NULL, NULL,           /* lockfunc, lockfuncarg */
568                            &sc->cmd_tag          /* tag */
569                           )) {
570        TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
571        return(ENOMEM);
572    }
573
574    if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
575                    BUS_DMA_NOWAIT, &sc->cmd_map)) {
576        TWS_TRACE_DEBUG(sc, "DMA mem alloc fail", max_sg_elements, sc->is64bit);
577        return(ENOMEM);
578    }
579
580    /* if bus_dmamem_alloc succeeds then bus_dmamap_load will succeed */
581    sc->dma_mem_phys=0;
582    error = bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
583                    dma_mem_size, tws_dmamap_cmds_load_cbfn,
584                    &sc->dma_mem_phys, 0);
585
586   /*
587    * Create a dma tag for data buffers; size will be the maximum
588    * possible I/O size (128kB).
589    */
590    if (bus_dma_tag_create(sc->parent_tag,         /* parent */
591                           TWS_ALIGNMENT,          /* alignment */
592                           0,                      /* boundary */
593                           BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
594                           BUS_SPACE_MAXADDR,      /* highaddr */
595                           NULL, NULL,             /* filter, filterarg */
596                           TWS_MAX_IO_SIZE,        /* maxsize */
597                           max_sg_elements,        /* nsegments */
598                           TWS_MAX_IO_SIZE,        /* maxsegsize */
599                           BUS_DMA_ALLOCNOW,       /* flags */
600                           busdma_lock_mutex,      /* lockfunc */
601                           &sc->io_lock,           /* lockfuncarg */
602                           &sc->data_tag           /* tag */)) {
603        TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
604        return(ENOMEM);
605    }
606
607    sc->reqs = malloc(sizeof(struct tws_request) * tws_queue_depth, M_TWS,
608                      M_WAITOK | M_ZERO);
609    if ( sc->reqs == NULL ) {
610        TWS_TRACE_DEBUG(sc, "malloc failed", 0, sc->is64bit);
611        return(ENOMEM);
612    }
613    sc->sense_bufs = malloc(sizeof(struct tws_sense) * tws_queue_depth, M_TWS,
614                      M_WAITOK | M_ZERO);
615    if ( sc->sense_bufs == NULL ) {
616        TWS_TRACE_DEBUG(sc, "sense malloc failed", 0, sc->is64bit);
617        return(ENOMEM);
618    }
619    sc->scan_ccb = malloc(sizeof(union ccb), M_TWS, M_WAITOK | M_ZERO);
620    if ( sc->scan_ccb == NULL ) {
621        TWS_TRACE_DEBUG(sc, "ccb malloc failed", 0, sc->is64bit);
622        return(ENOMEM);
623    }
624    if (bus_dmamem_alloc(sc->data_tag, (void **)&sc->ioctl_data_mem,
625            (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &sc->ioctl_data_map)) {
626        device_printf(sc->tws_dev, "Cannot allocate ioctl data mem\n");
627        return(ENOMEM);
628    }
629
630    if ( !tws_ctlr_ready(sc) )
631        if( !tws_ctlr_reset(sc) )
632            return(FAILURE);
633
634    bzero(&sc->stats, sizeof(struct tws_stats));
635    tws_init_qs(sc);
636    tws_turn_off_interrupts(sc);
637
638    /*
639     * enable pull mode by setting bit1 .
640     * setting bit0 to 1 will enable interrupt coalesing
641     * will revisit.
642     */
643
644#ifdef TWS_PULL_MODE_ENABLE
645
646    reg = tws_read_reg(sc, TWS_I2O0_CTL, 4);
647    TWS_TRACE_DEBUG(sc, "i20 ctl", reg, TWS_I2O0_CTL);
648    tws_write_reg(sc, TWS_I2O0_CTL, reg | TWS_BIT1, 4);
649
650#endif
651
652    TWS_TRACE_DEBUG(sc, "dma_mem_phys", sc->dma_mem_phys, TWS_I2O0_CTL);
653    if ( tws_init_reqs(sc, dma_mem_size) == FAILURE )
654        return(FAILURE);
655    if ( tws_init_aen_q(sc) == FAILURE )
656        return(FAILURE);
657
658    return(SUCCESS);
659
660}
661
662static int
663tws_init_aen_q(struct tws_softc *sc)
664{
665    sc->aen_q.head=0;
666    sc->aen_q.tail=0;
667    sc->aen_q.depth=256;
668    sc->aen_q.overflow=0;
669    sc->aen_q.q = malloc(sizeof(struct tws_event_packet)*sc->aen_q.depth,
670                              M_TWS, M_WAITOK | M_ZERO);
671    if ( ! sc->aen_q.q )
672        return(FAILURE);
673    return(SUCCESS);
674}
675
676static int
677tws_init_trace_q(struct tws_softc *sc)
678{
679    sc->trace_q.head=0;
680    sc->trace_q.tail=0;
681    sc->trace_q.depth=256;
682    sc->trace_q.overflow=0;
683    sc->trace_q.q = malloc(sizeof(struct tws_trace_rec)*sc->trace_q.depth,
684                              M_TWS, M_WAITOK | M_ZERO);
685    if ( ! sc->trace_q.q )
686        return(FAILURE);
687    return(SUCCESS);
688}
689
690static int
691tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size)
692{
693
694    struct tws_command_packet *cmd_buf;
695    cmd_buf = (struct tws_command_packet *)sc->dma_mem;
696    int i;
697
698    bzero(cmd_buf, dma_mem_size);
699    TWS_TRACE_DEBUG(sc, "phy cmd", sc->dma_mem_phys, 0);
700    mtx_lock(&sc->q_lock);
701    for ( i=0; i< tws_queue_depth; i++)
702    {
703        if (bus_dmamap_create(sc->data_tag, 0, &sc->reqs[i].dma_map)) {
704            /* log a ENOMEM failure msg here */
705            mtx_unlock(&sc->q_lock);
706            return(FAILURE);
707        }
708        sc->reqs[i].cmd_pkt =  &cmd_buf[i];
709
710        sc->sense_bufs[i].hdr = &cmd_buf[i].hdr ;
711        sc->sense_bufs[i].hdr_pkt_phy = sc->dma_mem_phys +
712                              (i * sizeof(struct tws_command_packet));
713
714        sc->reqs[i].cmd_pkt_phy = sc->dma_mem_phys +
715                              sizeof(struct tws_command_header) +
716                              (i * sizeof(struct tws_command_packet));
717        sc->reqs[i].request_id = i;
718        sc->reqs[i].sc = sc;
719
720        sc->reqs[i].cmd_pkt->hdr.header_desc.size_header = 128;
721
722	callout_init(&sc->reqs[i].timeout, 1);
723        sc->reqs[i].state = TWS_REQ_STATE_FREE;
724        if ( i >= TWS_RESERVED_REQS )
725            tws_q_insert_tail(sc, &sc->reqs[i], TWS_FREE_Q);
726    }
727    mtx_unlock(&sc->q_lock);
728    return(SUCCESS);
729}
730
731static void
732tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
733                           int nseg, int error)
734{
735
736    /* printf("command load done \n"); */
737
738    *((bus_addr_t *)arg) = segs[0].ds_addr;
739}
740
741void
742tws_send_event(struct tws_softc *sc, u_int8_t event)
743{
744    mtx_assert(&sc->gen_lock, MA_OWNED);
745    TWS_TRACE_DEBUG(sc, "received event ", 0, event);
746    switch (event) {
747
748        case TWS_INIT_START:
749            sc->tws_state = TWS_INIT;
750            break;
751
752        case TWS_INIT_COMPLETE:
753            if (sc->tws_state != TWS_INIT) {
754                device_printf(sc->tws_dev, "invalid state transition %d => TWS_ONLINE\n", sc->tws_state);
755            } else {
756                sc->tws_state = TWS_ONLINE;
757            }
758            break;
759
760        case TWS_RESET_START:
761            /* We can transition to reset state from any state except reset*/
762            if (sc->tws_state != TWS_RESET) {
763                sc->tws_prev_state = sc->tws_state;
764                sc->tws_state = TWS_RESET;
765            }
766            break;
767
768        case TWS_RESET_COMPLETE:
769            if (sc->tws_state != TWS_RESET) {
770                device_printf(sc->tws_dev, "invalid state transition %d => %d (previous state)\n", sc->tws_state, sc->tws_prev_state);
771            } else {
772                sc->tws_state = sc->tws_prev_state;
773            }
774            break;
775
776        case TWS_SCAN_FAILURE:
777            if (sc->tws_state != TWS_ONLINE) {
778                device_printf(sc->tws_dev, "invalid state transition %d => TWS_OFFLINE\n", sc->tws_state);
779            } else {
780                sc->tws_state = TWS_OFFLINE;
781            }
782            break;
783
784        case TWS_UNINIT_START:
785            if ((sc->tws_state != TWS_ONLINE) && (sc->tws_state != TWS_OFFLINE)) {
786                device_printf(sc->tws_dev, "invalid state transition %d => TWS_UNINIT\n", sc->tws_state);
787            } else {
788                sc->tws_state = TWS_UNINIT;
789            }
790            break;
791    }
792
793}
794
795uint8_t
796tws_get_state(struct tws_softc *sc)
797{
798
799    return((u_int8_t)sc->tws_state);
800
801}
802
803/* Called during system shutdown after sync. */
804
805static int
806tws_shutdown(device_t dev)
807{
808
809    struct tws_softc *sc = device_get_softc(dev);
810
811    TWS_TRACE_DEBUG(sc, "entry", 0, 0);
812
813    tws_turn_off_interrupts(sc);
814    tws_init_connect(sc, 1);
815
816    return (0);
817}
818
819/*
820 * Device suspend routine.
821 */
822static int
823tws_suspend(device_t dev)
824{
825    struct tws_softc *sc = device_get_softc(dev);
826
827    if ( sc )
828        TWS_TRACE_DEBUG(sc, "entry", 0, 0);
829    return (0);
830}
831
832/*
833 * Device resume routine.
834 */
835static int
836tws_resume(device_t dev)
837{
838
839    struct tws_softc *sc = device_get_softc(dev);
840
841    if ( sc )
842        TWS_TRACE_DEBUG(sc, "entry", 0, 0);
843    return (0);
844}
845
846
847struct tws_request *
848tws_get_request(struct tws_softc *sc, u_int16_t type)
849{
850    struct mtx *my_mutex = ((type == TWS_REQ_TYPE_SCSI_IO) ? &sc->q_lock : &sc->gen_lock);
851    struct tws_request *r = NULL;
852
853    mtx_lock(my_mutex);
854
855    if (type == TWS_REQ_TYPE_SCSI_IO) {
856        r = tws_q_remove_head(sc, TWS_FREE_Q);
857    } else {
858        if ( sc->reqs[type].state == TWS_REQ_STATE_FREE ) {
859            r = &sc->reqs[type];
860        }
861    }
862
863    if ( r ) {
864        bzero(&r->cmd_pkt->cmd, sizeof(struct tws_command_apache));
865        r->data = NULL;
866        r->length = 0;
867        r->type = type;
868        r->flags = TWS_DIR_UNKNOWN;
869        r->error_code = TWS_REQ_RET_INVALID;
870        r->cb = NULL;
871        r->ccb_ptr = NULL;
872	callout_stop(&r->timeout);
873        r->next = r->prev = NULL;
874
875        r->state = ((type == TWS_REQ_TYPE_SCSI_IO) ? TWS_REQ_STATE_TRAN : TWS_REQ_STATE_BUSY);
876    }
877
878    mtx_unlock(my_mutex);
879
880    return(r);
881}
882
883void
884tws_release_request(struct tws_request *req)
885{
886
887    struct tws_softc *sc = req->sc;
888
889    TWS_TRACE_DEBUG(sc, "entry", sc, 0);
890    mtx_lock(&sc->q_lock);
891    tws_q_insert_tail(sc, req, TWS_FREE_Q);
892    mtx_unlock(&sc->q_lock);
893}
894
895static device_method_t tws_methods[] = {
896    /* Device interface */
897    DEVMETHOD(device_probe,     tws_probe),
898    DEVMETHOD(device_attach,    tws_attach),
899    DEVMETHOD(device_detach,    tws_detach),
900    DEVMETHOD(device_shutdown,  tws_shutdown),
901    DEVMETHOD(device_suspend,   tws_suspend),
902    DEVMETHOD(device_resume,    tws_resume),
903
904    DEVMETHOD_END
905};
906
907static driver_t tws_driver = {
908        "tws",
909        tws_methods,
910        sizeof(struct tws_softc)
911};
912
913
914static devclass_t tws_devclass;
915
916/* DEFINE_CLASS_0(tws, tws_driver, tws_methods, sizeof(struct tws_softc)); */
917DRIVER_MODULE(tws, pci, tws_driver, tws_devclass, 0, 0);
918MODULE_DEPEND(tws, cam, 1, 1, 1);
919MODULE_DEPEND(tws, pci, 1, 1, 1);
920
921TUNABLE_INT("hw.tws.queue_depth", &tws_queue_depth);
922TUNABLE_INT("hw.tws.enable_msi", &tws_enable_msi);
923