tws_cam.c revision 246713
1/*
2 * Copyright (c) 2010 LSI Corp.
3 * All rights reserved.
4 * Author : Manjunath Ranganathaiah <manjunath.ranganathaiah@lsi.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/dev/tws/tws_cam.c 246713 2013-02-12 16:57:20Z kib $
28 */
29
30#include <dev/tws/tws.h>
31#include <dev/tws/tws_services.h>
32#include <dev/tws/tws_hdm.h>
33#include <dev/tws/tws_user.h>
34#include <cam/cam.h>
35#include <cam/cam_ccb.h>
36#include <cam/cam_sim.h>
37#include <cam/cam_xpt_sim.h>
38#include <cam/cam_debug.h>
39#include <cam/cam_periph.h>
40
41#include <cam/scsi/scsi_all.h>
42#include <cam/scsi/scsi_message.h>
43
44static int tws_cam_depth=(TWS_MAX_REQS - TWS_RESERVED_REQS);
45static char tws_sev_str[5][8]={"","ERROR","WARNING","INFO","DEBUG"};
46
47static void  tws_action(struct cam_sim *sim, union ccb *ccb);
48static void  tws_poll(struct cam_sim *sim);
49static void tws_scsi_complete(struct tws_request *req);
50
51
52
53void tws_unmap_request(struct tws_softc *sc, struct tws_request *req);
54int32_t tws_map_request(struct tws_softc *sc, struct tws_request *req);
55int tws_bus_scan(struct tws_softc *sc);
56int tws_cam_attach(struct tws_softc *sc);
57void tws_cam_detach(struct tws_softc *sc);
58void tws_reset(void *arg);
59
60static void tws_reset_cb(void *arg);
61static void tws_reinit(void *arg);
62static int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb);
63static void tws_freeze_simq(struct tws_softc *sc, struct tws_request *req);
64static void tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
65                            int nseg, int error);
66static void tws_fill_sg_list(struct tws_softc *sc, void *sgl_src,
67                            void *sgl_dest, u_int16_t num_sgl_entries);
68static void tws_err_complete(struct tws_softc *sc, u_int64_t mfa);
69static void tws_scsi_err_complete(struct tws_request *req,
70                                               struct tws_command_header *hdr);
71static void tws_passthru_err_complete(struct tws_request *req,
72                                               struct tws_command_header *hdr);
73
74
75void tws_timeout(void *arg);
76static void tws_intr_attn_aen(struct tws_softc *sc);
77static void tws_intr_attn_error(struct tws_softc *sc);
78static void tws_intr_resp(struct tws_softc *sc);
79void tws_intr(void *arg);
80void tws_cmd_complete(struct tws_request *req);
81void tws_aen_complete(struct tws_request *req);
82int tws_send_scsi_cmd(struct tws_softc *sc, int cmd);
83void tws_getset_param_complete(struct tws_request *req);
84int tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
85              u_int32_t param_size, void *data);
86int tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
87              u_int32_t param_size, void *data);
88
89
90extern struct tws_request *tws_get_request(struct tws_softc *sc,
91                                            u_int16_t type);
92extern void *tws_release_request(struct tws_request *req);
93extern int tws_submit_command(struct tws_softc *sc, struct tws_request *req);
94extern boolean tws_get_response(struct tws_softc *sc,
95                                           u_int16_t *req_id, u_int64_t *mfa);
96extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
97                                u_int8_t q_type );
98extern struct tws_request * tws_q_remove_request(struct tws_softc *sc,
99                                   struct tws_request *req, u_int8_t q_type );
100extern void tws_send_event(struct tws_softc *sc, u_int8_t event);
101
102extern struct tws_sense *
103tws_find_sense_from_mfa(struct tws_softc *sc, u_int64_t mfa);
104
105extern void tws_fetch_aen(void *arg);
106extern void tws_disable_db_intr(struct tws_softc *sc);
107extern void tws_enable_db_intr(struct tws_softc *sc);
108extern void tws_passthru_complete(struct tws_request *req);
109extern void tws_aen_synctime_with_host(struct tws_softc *sc);
110extern void tws_circular_aenq_insert(struct tws_softc *sc,
111                    struct tws_circular_q *cq, struct tws_event_packet *aen);
112extern int tws_use_32bit_sgls;
113extern boolean tws_ctlr_reset(struct tws_softc *sc);
114extern struct tws_request * tws_q_remove_tail(struct tws_softc *sc,
115                                                           u_int8_t q_type );
116extern void tws_turn_off_interrupts(struct tws_softc *sc);
117extern void tws_turn_on_interrupts(struct tws_softc *sc);
118extern int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
119extern void tws_init_obfl_q(struct tws_softc *sc);
120extern uint8_t tws_get_state(struct tws_softc *sc);
121extern void tws_assert_soft_reset(struct tws_softc *sc);
122extern boolean tws_ctlr_ready(struct tws_softc *sc);
123extern u_int16_t tws_poll4_response(struct tws_softc *sc, u_int64_t *mfa);
124extern int tws_setup_intr(struct tws_softc *sc, int irqs);
125extern int tws_teardown_intr(struct tws_softc *sc);
126
127
128
129int
130tws_cam_attach(struct tws_softc *sc)
131{
132    struct cam_devq *devq;
133
134    TWS_TRACE_DEBUG(sc, "entry", 0, sc);
135    /* Create a device queue for sim */
136
137    /*
138     * if the user sets cam depth to less than 1
139     * cam may get confused
140     */
141    if ( tws_cam_depth < 1 )
142        tws_cam_depth = 1;
143    if ( tws_cam_depth > (tws_queue_depth - TWS_RESERVED_REQS)  )
144        tws_cam_depth = tws_queue_depth - TWS_RESERVED_REQS;
145
146    TWS_TRACE_DEBUG(sc, "depths,ctlr,cam", tws_queue_depth, tws_cam_depth);
147
148    if ((devq = cam_simq_alloc(tws_cam_depth)) == NULL) {
149        tws_log(sc, CAM_SIMQ_ALLOC);
150        return(ENOMEM);
151    }
152
153   /*
154    * Create a SIM entry.  Though we can support tws_cam_depth
155    * simultaneous requests, we claim to be able to handle only
156    * (tws_cam_depth), so that we always have reserved  requests
157    * packet available to service ioctls and internal commands.
158    */
159    sc->sim = cam_sim_alloc(tws_action, tws_poll, "tws", sc,
160                      device_get_unit(sc->tws_dev),
161#if (__FreeBSD_version >= 700000)
162                      &sc->sim_lock,
163#endif
164                      tws_cam_depth, 1, devq);
165                      /* 1, 1, devq); */
166    if (sc->sim == NULL) {
167        cam_simq_free(devq);
168        tws_log(sc, CAM_SIM_ALLOC);
169    }
170    /* Register the bus. */
171    mtx_lock(&sc->sim_lock);
172    if (xpt_bus_register(sc->sim,
173#if (__FreeBSD_version >= 700000)
174                         sc->tws_dev,
175#endif
176                         0) != CAM_SUCCESS) {
177        cam_sim_free(sc->sim, TRUE); /* passing true will free the devq */
178        sc->sim = NULL; /* so cam_detach will not try to free it */
179        mtx_unlock(&sc->sim_lock);
180        tws_log(sc, TWS_XPT_BUS_REGISTER);
181        return(ENXIO);
182    }
183    if (xpt_create_path(&sc->path, NULL, cam_sim_path(sc->sim),
184                         CAM_TARGET_WILDCARD,
185                         CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
186        xpt_bus_deregister(cam_sim_path(sc->sim));
187        /* Passing TRUE to cam_sim_free will free the devq as well. */
188        cam_sim_free(sc->sim, TRUE);
189        tws_log(sc, TWS_XPT_CREATE_PATH);
190        mtx_unlock(&sc->sim_lock);
191        return(ENXIO);
192    }
193    mtx_unlock(&sc->sim_lock);
194
195    return(0);
196}
197
198void
199tws_cam_detach(struct tws_softc *sc)
200{
201    TWS_TRACE_DEBUG(sc, "entry", 0, 0);
202    mtx_lock(&sc->sim_lock);
203    if (sc->path)
204        xpt_free_path(sc->path);
205    if (sc->sim) {
206        xpt_bus_deregister(cam_sim_path(sc->sim));
207        cam_sim_free(sc->sim, TRUE);
208    }
209    mtx_unlock(&sc->sim_lock);
210}
211
212int
213tws_bus_scan(struct tws_softc *sc)
214{
215    union ccb       *ccb;
216
217    TWS_TRACE_DEBUG(sc, "entry", sc, 0);
218    if (!(sc->sim))
219        return(ENXIO);
220    mtx_assert(&sc->sim_lock, MA_OWNED);
221    if ((ccb = xpt_alloc_ccb()) == NULL)
222		    return(ENOMEM);
223
224    if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sc->sim),
225                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
226        xpt_free_ccb(ccb);
227        return(EIO);
228    }
229    xpt_rescan(ccb);
230
231    return(0);
232}
233
234static void
235tws_action(struct cam_sim *sim, union ccb *ccb)
236{
237    struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
238
239
240    switch( ccb->ccb_h.func_code ) {
241        case XPT_SCSI_IO:
242        {
243            if ( tws_execute_scsi(sc, ccb) )
244                TWS_TRACE_DEBUG(sc, "execute scsi failed", 0, 0);
245            break;
246        }
247        case XPT_ABORT:
248        {
249            TWS_TRACE_DEBUG(sc, "abort i/o", 0, 0);
250            ccb->ccb_h.status = CAM_UA_ABORT;
251            xpt_done(ccb);
252            break;
253        }
254        case XPT_RESET_BUS:
255        {
256            TWS_TRACE_DEBUG(sc, "reset bus", sim, ccb);
257            break;
258        }
259        case XPT_SET_TRAN_SETTINGS:
260        {
261            TWS_TRACE_DEBUG(sc, "set tran settings", sim, ccb);
262            ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
263            xpt_done(ccb);
264
265            break;
266        }
267        case XPT_GET_TRAN_SETTINGS:
268        {
269            TWS_TRACE_DEBUG(sc, "get tran settings", sim, ccb);
270
271#if (__FreeBSD_version >= 700000 )
272            ccb->cts.protocol = PROTO_SCSI;
273            ccb->cts.protocol_version = SCSI_REV_2;
274            ccb->cts.transport = XPORT_SPI;
275            ccb->cts.transport_version = 2;
276
277            ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
278            ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
279            ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
280            ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
281#else
282            ccb->cts.valid = (CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID);
283            ccb->cts.flags &= ~(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB);
284#endif
285            ccb->ccb_h.status = CAM_REQ_CMP;
286            xpt_done(ccb);
287
288            break;
289        }
290        case XPT_CALC_GEOMETRY:
291        {
292            TWS_TRACE_DEBUG(sc, "calc geometry(ccb,block-size)", ccb,
293                                          ccb->ccg.block_size);
294            cam_calc_geometry(&ccb->ccg, 1/* extended */);
295            xpt_done(ccb);
296
297            break;
298        }
299        case XPT_PATH_INQ:
300        {
301            TWS_TRACE_DEBUG(sc, "path inquiry", sim, ccb);
302            ccb->cpi.version_num = 1;
303            ccb->cpi.hba_inquiry = 0;
304            ccb->cpi.target_sprt = 0;
305            ccb->cpi.hba_misc = 0;
306            ccb->cpi.hba_eng_cnt = 0;
307            ccb->cpi.max_target = TWS_MAX_NUM_UNITS;
308            ccb->cpi.max_lun = TWS_MAX_NUM_LUNS - 1;
309            ccb->cpi.unit_number = cam_sim_unit(sim);
310            ccb->cpi.bus_id = cam_sim_bus(sim);
311            ccb->cpi.initiator_id = TWS_SCSI_INITIATOR_ID;
312            ccb->cpi.base_transfer_speed = 6000000;
313            strncpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
314            strncpy(ccb->cpi.hba_vid, "3ware", HBA_IDLEN);
315            strncpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
316#if (__FreeBSD_version >= 700000 )
317            ccb->cpi.transport = XPORT_SPI;
318            ccb->cpi.transport_version = 2;
319            ccb->cpi.protocol = PROTO_SCSI;
320            ccb->cpi.protocol_version = SCSI_REV_2;
321            ccb->cpi.maxio = TWS_MAX_IO_SIZE;
322#endif
323            ccb->ccb_h.status = CAM_REQ_CMP;
324            xpt_done(ccb);
325
326            break;
327        }
328        default:
329            TWS_TRACE_DEBUG(sc, "default", sim, ccb);
330            ccb->ccb_h.status = CAM_REQ_INVALID;
331            xpt_done(ccb);
332            break;
333    }
334}
335
336static void
337tws_scsi_complete(struct tws_request *req)
338{
339    struct tws_softc *sc = req->sc;
340
341    mtx_lock(&sc->q_lock);
342    tws_q_remove_request(sc, req, TWS_BUSY_Q);
343    mtx_unlock(&sc->q_lock);
344
345    untimeout(tws_timeout, req, req->ccb_ptr->ccb_h.timeout_ch);
346    tws_unmap_request(req->sc, req);
347
348
349    req->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
350    mtx_lock(&sc->sim_lock);
351    xpt_done(req->ccb_ptr);
352    mtx_unlock(&sc->sim_lock);
353
354    mtx_lock(&sc->q_lock);
355    tws_q_insert_tail(sc, req, TWS_FREE_Q);
356    mtx_unlock(&sc->q_lock);
357}
358
359void
360tws_getset_param_complete(struct tws_request *req)
361{
362    struct tws_softc *sc = req->sc;
363
364    TWS_TRACE_DEBUG(sc, "getset complete", req, req->request_id);
365
366    untimeout(tws_timeout, req, req->thandle);
367    tws_unmap_request(sc, req);
368
369    free(req->data, M_TWS);
370
371    req->state = TWS_REQ_STATE_FREE;
372}
373
374void
375tws_aen_complete(struct tws_request *req)
376{
377    struct tws_softc *sc = req->sc;
378    struct tws_command_header *sense;
379    struct tws_event_packet event;
380    u_int16_t aen_code=0;
381
382    TWS_TRACE_DEBUG(sc, "aen complete", 0, req->request_id);
383
384    untimeout(tws_timeout, req, req->thandle);
385    tws_unmap_request(sc, req);
386
387    sense = (struct tws_command_header *)req->data;
388
389    TWS_TRACE_DEBUG(sc,"sense code, key",sense->sense_data[0],
390                                   sense->sense_data[2]);
391    TWS_TRACE_DEBUG(sc,"sense rid, seve",sense->header_desc.request_id,
392                                   sense->status_block.res__severity);
393    TWS_TRACE_DEBUG(sc,"sense srcnum, error",sense->status_block.srcnum,
394                                   sense->status_block.error);
395    TWS_TRACE_DEBUG(sc,"sense shdr, ssense",sense->header_desc.size_header,
396                                   sense->header_desc.size_sense);
397
398    aen_code = sense->status_block.error;
399
400    switch ( aen_code ) {
401        case TWS_AEN_SYNC_TIME_WITH_HOST :
402            tws_aen_synctime_with_host(sc);
403            break;
404        case TWS_AEN_QUEUE_EMPTY :
405            break;
406        default :
407            bzero(&event, sizeof(struct tws_event_packet));
408            event.sequence_id = sc->seq_id;
409            event.time_stamp_sec = (u_int32_t)TWS_LOCAL_TIME;
410            event.aen_code = sense->status_block.error;
411            event.severity = sense->status_block.res__severity & 0x7;
412            event.event_src = TWS_SRC_CTRL_EVENT;
413            strcpy(event.severity_str, tws_sev_str[event.severity]);
414            event.retrieved = TWS_AEN_NOT_RETRIEVED;
415
416            bcopy(sense->err_specific_desc, event.parameter_data,
417                                    TWS_ERROR_SPECIFIC_DESC_LEN);
418            event.parameter_data[TWS_ERROR_SPECIFIC_DESC_LEN - 1] = '\0';
419            event.parameter_len = (u_int8_t)strlen(event.parameter_data)+1;
420
421            if ( event.parameter_len < TWS_ERROR_SPECIFIC_DESC_LEN ) {
422                event.parameter_len += ((u_int8_t)strlen(event.parameter_data +
423                                                event.parameter_len) + 1);
424            }
425
426            device_printf(sc->tws_dev, "%s: (0x%02X: 0x%04X): %s: %s\n",
427                event.severity_str,
428                event.event_src,
429                event.aen_code,
430                event.parameter_data +
431                     (strlen(event.parameter_data) + 1),
432                event.parameter_data);
433
434            mtx_lock(&sc->gen_lock);
435            tws_circular_aenq_insert(sc, &sc->aen_q, &event);
436            sc->seq_id++;
437            mtx_unlock(&sc->gen_lock);
438            break;
439
440    }
441
442    free(req->data, M_TWS);
443
444    req->state = TWS_REQ_STATE_FREE;
445
446    if ( aen_code != TWS_AEN_QUEUE_EMPTY ) {
447        /* timeout(tws_fetch_aen, sc, 1);*/
448        sc->stats.num_aens++;
449        tws_fetch_aen((void *)sc);
450    }
451}
452
453void
454tws_cmd_complete(struct tws_request *req)
455{
456    struct tws_softc *sc = req->sc;
457
458    untimeout(tws_timeout, req, req->ccb_ptr->ccb_h.timeout_ch);
459    tws_unmap_request(sc, req);
460}
461
462static void
463tws_err_complete(struct tws_softc *sc, u_int64_t mfa)
464{
465    struct tws_command_header *hdr;
466    struct tws_sense *sen;
467    struct tws_request *req;
468    u_int16_t req_id;
469    u_int32_t reg, status;
470
471    if ( !mfa ) {
472        TWS_TRACE_DEBUG(sc, "null mfa", 0, mfa);
473        return;
474    } else {
475        /* lookup the sense */
476        sen = tws_find_sense_from_mfa(sc, mfa);
477        if ( sen == NULL ) {
478            TWS_TRACE_DEBUG(sc, "found null req", 0, mfa);
479            return;
480        }
481        hdr = sen->hdr;
482        TWS_TRACE_DEBUG(sc, "sen, hdr", sen, hdr);
483        req_id = hdr->header_desc.request_id;
484        req = &sc->reqs[req_id];
485        TWS_TRACE_DEBUG(sc, "req, id", req, req_id);
486        if ( req->error_code != TWS_REQ_RET_SUBMIT_SUCCESS )
487            TWS_TRACE_DEBUG(sc, "submit failure?", 0, req->error_code);
488    }
489
490    switch (req->type) {
491        case TWS_REQ_TYPE_PASSTHRU :
492            tws_passthru_err_complete(req, hdr);
493            break;
494        case TWS_REQ_TYPE_GETSET_PARAM :
495            tws_getset_param_complete(req);
496            break;
497        case TWS_REQ_TYPE_SCSI_IO :
498            tws_scsi_err_complete(req, hdr);
499            break;
500
501    }
502
503    mtx_lock(&sc->io_lock);
504    hdr->header_desc.size_header = 128;
505    reg = (u_int32_t)( mfa>>32);
506    tws_write_reg(sc, TWS_I2O0_HOBQPH, reg, 4);
507    reg = (u_int32_t)(mfa);
508    tws_write_reg(sc, TWS_I2O0_HOBQPL, reg, 4);
509
510    status = tws_read_reg(sc, TWS_I2O0_STATUS, 4);
511    if ( status & TWS_BIT13 ) {
512        device_printf(sc->tws_dev,  "OBFL Overrun\n");
513        sc->obfl_q_overrun = true;
514    }
515    mtx_unlock(&sc->io_lock);
516}
517
518static void
519tws_scsi_err_complete(struct tws_request *req, struct tws_command_header *hdr)
520{
521    u_int8_t *sense_data;
522    struct tws_softc *sc = req->sc;
523    union ccb *ccb = req->ccb_ptr;
524
525    TWS_TRACE_DEBUG(sc, "sbe, cmd_status", hdr->status_block.error,
526                                 req->cmd_pkt->cmd.pkt_a.status);
527    if ( hdr->status_block.error == TWS_ERROR_LOGICAL_UNIT_NOT_SUPPORTED ||
528         hdr->status_block.error == TWS_ERROR_UNIT_OFFLINE ) {
529
530        if ( ccb->ccb_h.target_lun ) {
531            TWS_TRACE_DEBUG(sc, "invalid lun error",0,0);
532            ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
533        } else {
534            TWS_TRACE_DEBUG(sc, "invalid target error",0,0);
535            ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
536        }
537
538    } else {
539        TWS_TRACE_DEBUG(sc, "scsi status  error",0,0);
540        ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
541        if (((ccb->csio.cdb_io.cdb_bytes[0] == 0x1A) &&
542              (hdr->status_block.error == TWS_ERROR_NOT_SUPPORTED))) {
543            ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
544            TWS_TRACE_DEBUG(sc, "page mode not supported",0,0);
545        }
546    }
547
548    /* if there were no error simply mark complete error */
549    if (ccb->ccb_h.status == 0)
550        ccb->ccb_h.status = CAM_REQ_CMP_ERR;
551
552    sense_data = (u_int8_t *)&ccb->csio.sense_data;
553    if (sense_data) {
554        memcpy(sense_data, hdr->sense_data, TWS_SENSE_DATA_LENGTH );
555        ccb->csio.sense_len = TWS_SENSE_DATA_LENGTH;
556        ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
557    }
558    ccb->csio.scsi_status = req->cmd_pkt->cmd.pkt_a.status;
559
560    ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
561    mtx_lock(&sc->sim_lock);
562    xpt_done(ccb);
563    mtx_unlock(&sc->sim_lock);
564
565    untimeout(tws_timeout, req, req->ccb_ptr->ccb_h.timeout_ch);
566    tws_unmap_request(req->sc, req);
567    mtx_lock(&sc->q_lock);
568    tws_q_remove_request(sc, req, TWS_BUSY_Q);
569    tws_q_insert_tail(sc, req, TWS_FREE_Q);
570    mtx_unlock(&sc->q_lock);
571}
572
573static void
574tws_passthru_err_complete(struct tws_request *req,
575                                          struct tws_command_header *hdr)
576{
577    TWS_TRACE_DEBUG(req->sc, "entry", hdr, req->request_id);
578    req->error_code = hdr->status_block.error;
579    memcpy(&(req->cmd_pkt->hdr), hdr, sizeof(struct tws_command_header));
580    tws_passthru_complete(req);
581}
582
583static void
584tws_drain_busy_queue(struct tws_softc *sc)
585{
586    struct tws_request *req;
587    union ccb          *ccb;
588    TWS_TRACE_DEBUG(sc, "entry", 0, 0);
589
590    mtx_lock(&sc->q_lock);
591    req = tws_q_remove_tail(sc, TWS_BUSY_Q);
592    mtx_unlock(&sc->q_lock);
593    while ( req ) {
594        TWS_TRACE_DEBUG(sc, "moved to TWS_COMPLETE_Q", 0, req->request_id);
595        untimeout(tws_timeout, req, req->ccb_ptr->ccb_h.timeout_ch);
596
597        req->error_code = TWS_REQ_RET_RESET;
598        ccb = (union ccb *)(req->ccb_ptr);
599
600        ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
601        ccb->ccb_h.status |=  CAM_REQUEUE_REQ;
602        ccb->ccb_h.status |=  CAM_SCSI_BUS_RESET;
603
604        tws_unmap_request(req->sc, req);
605
606        mtx_lock(&sc->sim_lock);
607        xpt_done(req->ccb_ptr);
608        mtx_unlock(&sc->sim_lock);
609
610        mtx_lock(&sc->q_lock);
611        tws_q_insert_tail(sc, req, TWS_FREE_Q);
612        req = tws_q_remove_tail(sc, TWS_BUSY_Q);
613        mtx_unlock(&sc->q_lock);
614    }
615}
616
617
618static void
619tws_drain_reserved_reqs(struct tws_softc *sc)
620{
621    struct tws_request *r;
622
623    r = &sc->reqs[TWS_REQ_TYPE_AEN_FETCH];
624    if ( r->state != TWS_REQ_STATE_FREE ) {
625        TWS_TRACE_DEBUG(sc, "reset aen req", 0, 0);
626        untimeout(tws_timeout, r, r->thandle);
627        tws_unmap_request(sc, r);
628        free(r->data, M_TWS);
629        r->state = TWS_REQ_STATE_FREE;
630        r->error_code = TWS_REQ_RET_RESET;
631    }
632
633    r = &sc->reqs[TWS_REQ_TYPE_PASSTHRU];
634    if ( r->state == TWS_REQ_STATE_BUSY ) {
635        TWS_TRACE_DEBUG(sc, "reset passthru req", 0, 0);
636        r->error_code = TWS_REQ_RET_RESET;
637    }
638
639    r = &sc->reqs[TWS_REQ_TYPE_GETSET_PARAM];
640    if ( r->state != TWS_REQ_STATE_FREE ) {
641        TWS_TRACE_DEBUG(sc, "reset setparam req", 0, 0);
642        untimeout(tws_timeout, r, r->thandle);
643        tws_unmap_request(sc, r);
644        free(r->data, M_TWS);
645        r->state = TWS_REQ_STATE_FREE;
646        r->error_code = TWS_REQ_RET_RESET;
647    }
648}
649
650static void
651tws_drain_response_queue(struct tws_softc *sc)
652{
653    u_int16_t req_id;
654    u_int64_t mfa;
655    while ( tws_get_response(sc, &req_id, &mfa) );
656}
657
658
659static int32_t
660tws_execute_scsi(struct tws_softc *sc, union ccb *ccb)
661{
662    struct tws_command_packet *cmd_pkt;
663    struct tws_request *req;
664    struct ccb_hdr *ccb_h = &(ccb->ccb_h);
665    struct ccb_scsiio *csio = &(ccb->csio);
666    int error;
667    u_int16_t lun;
668
669    mtx_assert(&sc->sim_lock, MA_OWNED);
670    if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) {
671        TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun);
672        ccb_h->status |= CAM_TID_INVALID;
673        xpt_done(ccb);
674        return(0);
675    }
676    if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) {
677        TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun);
678        ccb_h->status |= CAM_LUN_INVALID;
679        xpt_done(ccb);
680        return(0);
681    }
682
683    if(ccb_h->flags & CAM_CDB_PHYS) {
684        TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun);
685        ccb_h->status = CAM_REQ_INVALID;
686        xpt_done(ccb);
687        return(0);
688    }
689
690    /*
691     * We are going to work on this request.  Mark it as enqueued (though
692     * we don't actually queue it...)
693     */
694    ccb_h->status |= CAM_SIM_QUEUED;
695
696    req = tws_get_request(sc, TWS_REQ_TYPE_SCSI_IO);
697    if ( !req ) {
698        TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun);
699        ccb_h->status |= CAM_REQUEUE_REQ;
700        xpt_done(ccb);
701        return(0);
702    }
703
704    if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
705        if(ccb_h->flags & CAM_DIR_IN)
706            req->flags |= TWS_DIR_IN;
707        if(ccb_h->flags & CAM_DIR_OUT)
708            req->flags |= TWS_DIR_OUT;
709    } else {
710        req->flags = TWS_DIR_NONE; /* no data */
711    }
712
713    req->type = TWS_REQ_TYPE_SCSI_IO;
714    req->cb = tws_scsi_complete;
715
716    cmd_pkt = req->cmd_pkt;
717    /* cmd_pkt->hdr.header_desc.size_header = 128; */
718    cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
719    cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id;
720    cmd_pkt->cmd.pkt_a.status = 0;
721    cmd_pkt->cmd.pkt_a.sgl_offset = 16;
722
723    /* lower nibble */
724    lun = ccb_h->target_lun & 0XF;
725    lun = lun << 12;
726    cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id;
727    /* upper nibble */
728    lun = ccb_h->target_lun & 0XF0;
729    lun = lun << 8;
730    cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun;
731
732#ifdef TWS_DEBUG
733    if ( csio->cdb_len > 16 )
734         TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len);
735#endif
736
737    if(ccb_h->flags & CAM_CDB_POINTER)
738        bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
739    else
740        bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
741
742    req->data = ccb;
743    req->flags |= TWS_DATA_CCB;
744    /* save ccb ptr */
745    req->ccb_ptr = ccb;
746    /*
747     * tws_map_load_data_callback will fill in the SGL,
748     * and submit the I/O.
749     */
750    sc->stats.scsi_ios++;
751    ccb_h->timeout_ch = timeout(tws_timeout, req, (ccb_h->timeout * hz)/1000);
752    error = tws_map_request(sc, req);
753    return(error);
754}
755
756
757int
758tws_send_scsi_cmd(struct tws_softc *sc, int cmd)
759{
760    struct tws_request *req;
761    struct tws_command_packet *cmd_pkt;
762    int error;
763
764    TWS_TRACE_DEBUG(sc, "entry",sc, cmd);
765    req = tws_get_request(sc, TWS_REQ_TYPE_AEN_FETCH);
766
767    if ( req == NULL )
768        return(ENOMEM);
769
770    req->cb = tws_aen_complete;
771
772    cmd_pkt = req->cmd_pkt;
773    cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
774    cmd_pkt->cmd.pkt_a.status = 0;
775    cmd_pkt->cmd.pkt_a.unit = 0;
776    cmd_pkt->cmd.pkt_a.sgl_offset = 16;
777    cmd_pkt->cmd.pkt_a.lun_l4__req_id = req->request_id;
778
779    cmd_pkt->cmd.pkt_a.cdb[0] = (u_int8_t)cmd;
780    cmd_pkt->cmd.pkt_a.cdb[4] = 128;
781
782    req->length = TWS_SECTOR_SIZE;
783    req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
784    if ( req->data == NULL )
785        return(ENOMEM);
786    bzero(req->data, TWS_SECTOR_SIZE);
787    req->flags = TWS_DIR_IN;
788
789    req->thandle = timeout(tws_timeout, req, (TWS_IO_TIMEOUT * hz));
790    error = tws_map_request(sc, req);
791    return(error);
792
793}
794
795int
796tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
797              u_int32_t param_size, void *data)
798{
799    struct tws_request *req;
800    struct tws_command_packet *cmd_pkt;
801    union tws_command_giga *cmd;
802    struct tws_getset_param *param;
803    int error;
804
805    req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM);
806    if ( req == NULL ) {
807        TWS_TRACE_DEBUG(sc, "null req", 0, 0);
808        return(ENOMEM);
809    }
810
811    req->length = TWS_SECTOR_SIZE;
812    req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
813    if ( req->data == NULL )
814        return(ENOMEM);
815    bzero(req->data, TWS_SECTOR_SIZE);
816    param = (struct tws_getset_param *)req->data;
817
818    req->cb = tws_getset_param_complete;
819    req->flags = TWS_DIR_OUT;
820    cmd_pkt = req->cmd_pkt;
821
822    cmd = &cmd_pkt->cmd.pkt_g;
823    cmd->param.sgl_off__opcode =
824            BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_SET_PARAM);
825    cmd->param.request_id = (u_int8_t)req->request_id;
826    cmd->param.host_id__unit = 0;
827    cmd->param.param_count = 1;
828    cmd->param.size = 2; /* map routine will add sgls */
829
830    /* Specify which parameter we want to set. */
831    param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
832    param->parameter_id = (u_int8_t)(param_id);
833    param->parameter_size_bytes = (u_int16_t)param_size;
834    memcpy(param->data, data, param_size);
835
836    req->thandle = timeout(tws_timeout, req, (TWS_IOCTL_TIMEOUT * hz));
837    error = tws_map_request(sc, req);
838    return(error);
839
840}
841
842int
843tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
844              u_int32_t param_size, void *data)
845{
846    struct tws_request *req;
847    struct tws_command_packet *cmd_pkt;
848    union tws_command_giga *cmd;
849    struct tws_getset_param *param;
850    u_int16_t reqid;
851    u_int64_t mfa;
852    int error = SUCCESS;
853
854
855    req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM);
856    if ( req == NULL ) {
857        TWS_TRACE_DEBUG(sc, "null req", 0, 0);
858        return(FAILURE);
859    }
860
861    req->length = TWS_SECTOR_SIZE;
862    req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
863    if ( req->data == NULL )
864        return(FAILURE);
865    bzero(req->data, TWS_SECTOR_SIZE);
866    param = (struct tws_getset_param *)req->data;
867
868    req->cb = NULL;
869    req->flags = TWS_DIR_IN;
870    cmd_pkt = req->cmd_pkt;
871
872    cmd = &cmd_pkt->cmd.pkt_g;
873    cmd->param.sgl_off__opcode =
874            BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_GET_PARAM);
875    cmd->param.request_id = (u_int8_t)req->request_id;
876    cmd->param.host_id__unit = 0;
877    cmd->param.param_count = 1;
878    cmd->param.size = 2; /* map routine will add sgls */
879
880    /* Specify which parameter we want to set. */
881    param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
882    param->parameter_id = (u_int8_t)(param_id);
883    param->parameter_size_bytes = (u_int16_t)param_size;
884
885    error = tws_map_request(sc, req);
886    if (!error) {
887        reqid = tws_poll4_response(sc, &mfa);
888        tws_unmap_request(sc, req);
889
890        if ( reqid == TWS_REQ_TYPE_GETSET_PARAM ) {
891            memcpy(data, param->data, param_size);
892        } else {
893            error = FAILURE;
894        }
895    }
896
897    free(req->data, M_TWS);
898    req->state = TWS_REQ_STATE_FREE;
899    return(error);
900
901}
902
903void
904tws_unmap_request(struct tws_softc *sc, struct tws_request *req)
905{
906    if (req->data != NULL) {
907        if ( req->flags & TWS_DIR_IN )
908            bus_dmamap_sync(sc->data_tag, req->dma_map,
909                                            BUS_DMASYNC_POSTREAD);
910        if ( req->flags & TWS_DIR_OUT )
911            bus_dmamap_sync(sc->data_tag, req->dma_map,
912                                            BUS_DMASYNC_POSTWRITE);
913        mtx_lock(&sc->io_lock);
914        bus_dmamap_unload(sc->data_tag, req->dma_map);
915        mtx_unlock(&sc->io_lock);
916    }
917}
918
919int32_t
920tws_map_request(struct tws_softc *sc, struct tws_request *req)
921{
922    int32_t error = 0;
923
924
925    /* If the command involves data, map that too. */
926    if (req->data != NULL) {
927        int my_flags = ((req->type == TWS_REQ_TYPE_SCSI_IO) ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
928
929        /*
930         * Map the data buffer into bus space and build the SG list.
931         */
932        mtx_lock(&sc->io_lock);
933	if (req->flags & TWS_DATA_CCB)
934		error = bus_dmamap_load_ccb(sc->data_tag, req->dma_map,
935					    req->data,
936					    tws_dmamap_data_load_cbfn, req,
937					    my_flags);
938	else
939		error = bus_dmamap_load(sc->data_tag, req->dma_map,
940					req->data, req->length,
941					tws_dmamap_data_load_cbfn, req,
942					my_flags);
943        mtx_unlock(&sc->io_lock);
944
945        if (error == EINPROGRESS) {
946            TWS_TRACE(sc, "in progress", 0, error);
947            tws_freeze_simq(sc, req);
948            error = 0;  // EINPROGRESS is not a fatal error.
949        }
950    } else { /* no data involved */
951        error = tws_submit_command(sc, req);
952    }
953    return(error);
954}
955
956
957static void
958tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
959                            int nseg, int error)
960{
961    struct tws_request *req = (struct tws_request *)arg;
962    struct tws_softc *sc = req->sc;
963    u_int16_t sgls = nseg;
964    void *sgl_ptr;
965    struct tws_cmd_generic *gcmd;
966
967
968    if ( error ) {
969        TWS_TRACE(sc, "SOMETHING BAD HAPPENED! error = %d\n", error, 0);
970    }
971
972    if ( error == EFBIG ) {
973        TWS_TRACE(sc, "not enough data segs", 0, nseg);
974        req->error_code = error;
975        req->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
976        return;
977    }
978
979    if ( req->flags & TWS_DIR_IN )
980        bus_dmamap_sync(req->sc->data_tag, req->dma_map,
981                                            BUS_DMASYNC_PREREAD);
982    if ( req->flags & TWS_DIR_OUT )
983        bus_dmamap_sync(req->sc->data_tag, req->dma_map,
984                                        BUS_DMASYNC_PREWRITE);
985    if ( segs ) {
986        if ( (req->type == TWS_REQ_TYPE_PASSTHRU &&
987             GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) !=
988                            TWS_FW_CMD_EXECUTE_SCSI) ||
989              req->type == TWS_REQ_TYPE_GETSET_PARAM) {
990            gcmd = &req->cmd_pkt->cmd.pkt_g.generic;
991            sgl_ptr = (u_int32_t *)(gcmd) + gcmd->size;
992            gcmd->size += sgls *
993                          ((req->sc->is64bit && !tws_use_32bit_sgls) ? 4 : 2 );
994            tws_fill_sg_list(req->sc, (void *)segs, sgl_ptr, sgls);
995
996        } else {
997            tws_fill_sg_list(req->sc, (void *)segs,
998                      (void *)&(req->cmd_pkt->cmd.pkt_a.sg_list), sgls);
999            req->cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= sgls ;
1000        }
1001    }
1002
1003
1004    req->error_code = tws_submit_command(req->sc, req);
1005
1006}
1007
1008
1009static void
1010tws_fill_sg_list(struct tws_softc *sc, void *sgl_src, void *sgl_dest,
1011                          u_int16_t num_sgl_entries)
1012{
1013    int i;
1014
1015    if ( sc->is64bit ) {
1016        struct tws_sg_desc64 *sgl_s = (struct tws_sg_desc64 *)sgl_src;
1017
1018        if ( !tws_use_32bit_sgls ) {
1019            struct tws_sg_desc64 *sgl_d = (struct tws_sg_desc64 *)sgl_dest;
1020            if ( num_sgl_entries > TWS_MAX_64BIT_SG_ELEMENTS )
1021                TWS_TRACE(sc, "64bit sg overflow", num_sgl_entries, 0);
1022            for (i = 0; i < num_sgl_entries; i++) {
1023                sgl_d[i].address = sgl_s->address;
1024                sgl_d[i].length = sgl_s->length;
1025                sgl_d[i].flag = 0;
1026                sgl_d[i].reserved = 0;
1027                sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
1028                                               sizeof(bus_dma_segment_t));
1029            }
1030        } else {
1031            struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1032            if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1033                TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1034            for (i = 0; i < num_sgl_entries; i++) {
1035                sgl_d[i].address = sgl_s->address;
1036                sgl_d[i].length = sgl_s->length;
1037                sgl_d[i].flag = 0;
1038                sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
1039                                               sizeof(bus_dma_segment_t));
1040            }
1041        }
1042    } else {
1043        struct tws_sg_desc32 *sgl_s = (struct tws_sg_desc32 *)sgl_src;
1044        struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1045
1046        if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1047            TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1048
1049
1050        for (i = 0; i < num_sgl_entries; i++) {
1051            sgl_d[i].address = sgl_s[i].address;
1052            sgl_d[i].length = sgl_s[i].length;
1053            sgl_d[i].flag = 0;
1054        }
1055    }
1056}
1057
1058
1059void
1060tws_intr(void *arg)
1061{
1062    struct tws_softc *sc = (struct tws_softc *)arg;
1063    u_int32_t histat=0, db=0;
1064
1065    if (!(sc)) {
1066        device_printf(sc->tws_dev, "null softc!!!\n");
1067        return;
1068    }
1069
1070    if ( tws_get_state(sc) == TWS_RESET ) {
1071        return;
1072    }
1073
1074    if ( tws_get_state(sc) != TWS_ONLINE ) {
1075        return;
1076    }
1077
1078    sc->stats.num_intrs++;
1079    histat = tws_read_reg(sc, TWS_I2O0_HISTAT, 4);
1080    if ( histat & TWS_BIT2 ) {
1081        TWS_TRACE_DEBUG(sc, "door bell :)", histat, TWS_I2O0_HISTAT);
1082        db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1083        if ( db & TWS_BIT21 ) {
1084            tws_intr_attn_error(sc);
1085            return;
1086        }
1087        if ( db & TWS_BIT18 ) {
1088            tws_intr_attn_aen(sc);
1089        }
1090    }
1091
1092    if ( histat & TWS_BIT3 ) {
1093        tws_intr_resp(sc);
1094    }
1095}
1096
1097static void
1098tws_intr_attn_aen(struct tws_softc *sc)
1099{
1100    u_int32_t db=0;
1101
1102    /* maskoff db intrs untill all the aens are fetched */
1103    /* tws_disable_db_intr(sc); */
1104    tws_fetch_aen((void *)sc);
1105    tws_write_reg(sc, TWS_I2O0_HOBDBC, TWS_BIT18, 4);
1106    db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1107
1108}
1109
1110static void
1111tws_intr_attn_error(struct tws_softc *sc)
1112{
1113    u_int32_t db=0;
1114
1115    TWS_TRACE(sc, "attn error", 0, 0);
1116    tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
1117    db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1118    device_printf(sc->tws_dev, "Micro controller error.\n");
1119    tws_reset(sc);
1120}
1121
1122static void
1123tws_intr_resp(struct tws_softc *sc)
1124{
1125    u_int16_t req_id;
1126    u_int64_t mfa;
1127
1128    while ( tws_get_response(sc, &req_id, &mfa) ) {
1129        sc->stats.reqs_out++;
1130        if ( req_id == TWS_INVALID_REQID ) {
1131            TWS_TRACE_DEBUG(sc, "invalid req_id", mfa, req_id);
1132            sc->stats.reqs_errored++;
1133            tws_err_complete(sc, mfa);
1134            continue;
1135        }
1136        sc->reqs[req_id].cb(&sc->reqs[req_id]);
1137    }
1138
1139}
1140
1141
1142static void
1143tws_poll(struct cam_sim *sim)
1144{
1145    struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
1146    TWS_TRACE_DEBUG(sc, "entry", 0, 0);
1147    tws_intr((void *) sc);
1148}
1149
1150void
1151tws_timeout(void *arg)
1152{
1153    struct tws_request *req = (struct tws_request *)arg;
1154    struct tws_softc *sc = req->sc;
1155
1156
1157    if ( req->error_code == TWS_REQ_RET_RESET ) {
1158        return;
1159    }
1160
1161    mtx_lock(&sc->gen_lock);
1162    if ( req->error_code == TWS_REQ_RET_RESET ) {
1163        mtx_unlock(&sc->gen_lock);
1164        return;
1165    }
1166
1167    if ( tws_get_state(sc) == TWS_RESET ) {
1168        mtx_unlock(&sc->gen_lock);
1169        return;
1170    }
1171
1172    tws_teardown_intr(sc);
1173    xpt_freeze_simq(sc->sim, 1);
1174
1175    tws_send_event(sc, TWS_RESET_START);
1176
1177    if (req->type == TWS_REQ_TYPE_SCSI_IO) {
1178        device_printf(sc->tws_dev, "I/O Request timed out... Resetting controller\n");
1179    } else if (req->type == TWS_REQ_TYPE_PASSTHRU) {
1180        device_printf(sc->tws_dev, "IOCTL Request timed out... Resetting controller\n");
1181    } else {
1182        device_printf(sc->tws_dev, "Internal Request timed out... Resetting controller\n");
1183    }
1184
1185    tws_assert_soft_reset(sc);
1186    tws_turn_off_interrupts(sc);
1187    tws_reset_cb( (void*) sc );
1188    tws_reinit( (void*) sc );
1189
1190//  device_printf(sc->tws_dev,  "Controller Reset complete!\n");
1191    tws_send_event(sc, TWS_RESET_COMPLETE);
1192    mtx_unlock(&sc->gen_lock);
1193
1194    xpt_release_simq(sc->sim, 1);
1195    tws_setup_intr(sc, sc->irqs);
1196}
1197
1198void
1199tws_reset(void *arg)
1200{
1201    struct tws_softc *sc = (struct tws_softc *)arg;
1202
1203    mtx_lock(&sc->gen_lock);
1204    if ( tws_get_state(sc) == TWS_RESET ) {
1205        mtx_unlock(&sc->gen_lock);
1206        return;
1207    }
1208
1209    tws_teardown_intr(sc);
1210    xpt_freeze_simq(sc->sim, 1);
1211
1212    tws_send_event(sc, TWS_RESET_START);
1213
1214    device_printf(sc->tws_dev,  "Resetting controller\n");
1215
1216    tws_assert_soft_reset(sc);
1217    tws_turn_off_interrupts(sc);
1218    tws_reset_cb( (void*) sc );
1219    tws_reinit( (void*) sc );
1220
1221//  device_printf(sc->tws_dev,  "Controller Reset complete!\n");
1222    tws_send_event(sc, TWS_RESET_COMPLETE);
1223    mtx_unlock(&sc->gen_lock);
1224
1225    xpt_release_simq(sc->sim, 1);
1226    tws_setup_intr(sc, sc->irqs);
1227}
1228
1229static void
1230tws_reset_cb(void *arg)
1231{
1232    struct tws_softc *sc = (struct tws_softc *)arg;
1233    time_t endt;
1234    int found = 0;
1235    u_int32_t reg;
1236
1237    if ( tws_get_state(sc) != TWS_RESET ) {
1238        return;
1239    }
1240
1241//  device_printf(sc->tws_dev,  "Draining Busy Queue\n");
1242    tws_drain_busy_queue(sc);
1243//  device_printf(sc->tws_dev,  "Draining Reserved Reqs\n");
1244    tws_drain_reserved_reqs(sc);
1245//  device_printf(sc->tws_dev,  "Draining Response Queue\n");
1246    tws_drain_response_queue(sc);
1247
1248//  device_printf(sc->tws_dev,  "Looking for controller ready flag...\n");
1249    endt = TWS_LOCAL_TIME + TWS_POLL_TIMEOUT;
1250    while ((TWS_LOCAL_TIME <= endt) && (!found)) {
1251        reg = tws_read_reg(sc, TWS_I2O0_SCRPD3, 4);
1252        if ( reg & TWS_BIT13 ) {
1253            found = 1;
1254//          device_printf(sc->tws_dev,  " ... Got it!\n");
1255        }
1256    }
1257    if ( !found )
1258            device_printf(sc->tws_dev,  " ... Controller ready flag NOT found!\n");
1259}
1260
1261static void
1262tws_reinit(void *arg)
1263{
1264    struct tws_softc *sc = (struct tws_softc *)arg;
1265    int timeout_val=0;
1266    int try=2;
1267    int done=0;
1268
1269
1270//  device_printf(sc->tws_dev,  "Waiting for Controller Ready\n");
1271    while ( !done && try ) {
1272        if ( tws_ctlr_ready(sc) ) {
1273            done = 1;
1274            break;
1275        } else {
1276            timeout_val += 5;
1277            if ( timeout_val >= TWS_RESET_TIMEOUT ) {
1278               timeout_val = 0;
1279               if ( try )
1280                   tws_assert_soft_reset(sc);
1281               try--;
1282            }
1283            mtx_sleep(sc, &sc->gen_lock, 0, "tws_reinit", 5*hz);
1284        }
1285    }
1286
1287    if (!done) {
1288        device_printf(sc->tws_dev,  "FAILED to get Controller Ready!\n");
1289        return;
1290    }
1291
1292    sc->obfl_q_overrun = false;
1293//  device_printf(sc->tws_dev,  "Sending initConnect\n");
1294    if ( tws_init_connect(sc, tws_queue_depth) ) {
1295        TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit);
1296    }
1297    tws_init_obfl_q(sc);
1298
1299    tws_turn_on_interrupts(sc);
1300
1301    wakeup_one(sc->chan);
1302}
1303
1304
1305static void
1306tws_freeze_simq(struct tws_softc *sc, struct tws_request *req)
1307{
1308    /* Only for IO commands */
1309    if (req->type == TWS_REQ_TYPE_SCSI_IO) {
1310        union ccb   *ccb = (union ccb *)(req->ccb_ptr);
1311
1312        xpt_freeze_simq(sc->sim, 1);
1313        ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1314        ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1315    }
1316}
1317
1318
1319TUNABLE_INT("hw.tws.cam_depth", &tws_cam_depth);
1320