tws_cam.c revision 249468
1226584Sdim/*
2226584Sdim * Copyright (c) 2010 LSI Corp.
3226584Sdim * All rights reserved.
4226584Sdim * Author : Manjunath Ranganathaiah <manjunath.ranganathaiah@lsi.com>
5226584Sdim *
6226584Sdim * Redistribution and use in source and binary forms, with or without
7226584Sdim * modification, are permitted provided that the following conditions
8226584Sdim * are met:
9226584Sdim * 1. Redistributions of source code must retain the above copyright
10226584Sdim *    notice, this list of conditions and the following disclaimer.
11226584Sdim * 2. Redistributions in binary form must reproduce the above copyright
12226584Sdim *    notice, this list of conditions and the following disclaimer in the
13226584Sdim *    documentation and/or other materials provided with the distribution.
14226584Sdim *
15226584Sdim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16226584Sdim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17226584Sdim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18226584Sdim * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19226584Sdim * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20226584Sdim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21226584Sdim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22226584Sdim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23226584Sdim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24226584Sdim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25226584Sdim * SUCH DAMAGE.
26226584Sdim *
27226584Sdim * $FreeBSD: head/sys/dev/tws/tws_cam.c 249468 2013-04-14 09:55:48Z mav $
28226584Sdim */
29226584Sdim
30226584Sdim#include <dev/tws/tws.h>
31226584Sdim#include <dev/tws/tws_services.h>
32226584Sdim#include <dev/tws/tws_hdm.h>
33226584Sdim#include <dev/tws/tws_user.h>
34226584Sdim#include <cam/cam.h>
35226584Sdim#include <cam/cam_ccb.h>
36226584Sdim#include <cam/cam_sim.h>
37226584Sdim#include <cam/cam_xpt_sim.h>
38226584Sdim#include <cam/cam_debug.h>
39226584Sdim#include <cam/cam_periph.h>
40226584Sdim
41226584Sdim#include <cam/scsi/scsi_all.h>
42226584Sdim#include <cam/scsi/scsi_message.h>
43226584Sdim
44226584Sdimstatic int tws_cam_depth=(TWS_MAX_REQS - TWS_RESERVED_REQS);
45226584Sdimstatic char tws_sev_str[5][8]={"","ERROR","WARNING","INFO","DEBUG"};
46226584Sdim
47226584Sdimstatic void  tws_action(struct cam_sim *sim, union ccb *ccb);
48226584Sdimstatic void  tws_poll(struct cam_sim *sim);
49226584Sdimstatic void tws_scsi_complete(struct tws_request *req);
50226584Sdim
51226584Sdim
52226584Sdim
53226584Sdimvoid tws_unmap_request(struct tws_softc *sc, struct tws_request *req);
54226584Sdimint32_t tws_map_request(struct tws_softc *sc, struct tws_request *req);
55226584Sdimint tws_bus_scan(struct tws_softc *sc);
56226584Sdimint tws_cam_attach(struct tws_softc *sc);
57226584Sdimvoid tws_cam_detach(struct tws_softc *sc);
58226584Sdimvoid tws_reset(void *arg);
59226584Sdim
60226584Sdimstatic void tws_reset_cb(void *arg);
61226584Sdimstatic void tws_reinit(void *arg);
62226584Sdimstatic int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb);
63226584Sdimstatic void tws_freeze_simq(struct tws_softc *sc, struct tws_request *req);
64226584Sdimstatic void tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
65226584Sdim                            int nseg, int error);
66226584Sdimstatic void tws_fill_sg_list(struct tws_softc *sc, void *sgl_src,
67226584Sdim                            void *sgl_dest, u_int16_t num_sgl_entries);
68226584Sdimstatic void tws_err_complete(struct tws_softc *sc, u_int64_t mfa);
69226584Sdimstatic void tws_scsi_err_complete(struct tws_request *req,
70226584Sdim                                               struct tws_command_header *hdr);
71226584Sdimstatic void tws_passthru_err_complete(struct tws_request *req,
72226584Sdim                                               struct tws_command_header *hdr);
73226584Sdim
74226584Sdim
75226584Sdimvoid tws_timeout(void *arg);
76226584Sdimstatic void tws_intr_attn_aen(struct tws_softc *sc);
77226584Sdimstatic void tws_intr_attn_error(struct tws_softc *sc);
78226584Sdimstatic void tws_intr_resp(struct tws_softc *sc);
79226584Sdimvoid tws_intr(void *arg);
80226584Sdimvoid tws_cmd_complete(struct tws_request *req);
81226584Sdimvoid tws_aen_complete(struct tws_request *req);
82226584Sdimint tws_send_scsi_cmd(struct tws_softc *sc, int cmd);
83226584Sdimvoid tws_getset_param_complete(struct tws_request *req);
84226584Sdimint tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
85226584Sdim              u_int32_t param_size, void *data);
86226584Sdimint tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
87226584Sdim              u_int32_t param_size, void *data);
88226584Sdim
89226584Sdim
90226584Sdimextern struct tws_request *tws_get_request(struct tws_softc *sc,
91226584Sdim                                            u_int16_t type);
92226584Sdimextern void *tws_release_request(struct tws_request *req);
93226584Sdimextern int tws_submit_command(struct tws_softc *sc, struct tws_request *req);
94226584Sdimextern boolean tws_get_response(struct tws_softc *sc,
95226584Sdim                                           u_int16_t *req_id, u_int64_t *mfa);
96226584Sdimextern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
97226584Sdim                                u_int8_t q_type );
98226584Sdimextern struct tws_request * tws_q_remove_request(struct tws_softc *sc,
99226584Sdim                                   struct tws_request *req, u_int8_t q_type );
100226584Sdimextern void tws_send_event(struct tws_softc *sc, u_int8_t event);
101226584Sdim
102226584Sdimextern struct tws_sense *
103226584Sdimtws_find_sense_from_mfa(struct tws_softc *sc, u_int64_t mfa);
104226584Sdim
105226584Sdimextern void tws_fetch_aen(void *arg);
106226584Sdimextern void tws_disable_db_intr(struct tws_softc *sc);
107226584Sdimextern void tws_enable_db_intr(struct tws_softc *sc);
108226584Sdimextern void tws_passthru_complete(struct tws_request *req);
109226584Sdimextern void tws_aen_synctime_with_host(struct tws_softc *sc);
110226584Sdimextern void tws_circular_aenq_insert(struct tws_softc *sc,
111226584Sdim                    struct tws_circular_q *cq, struct tws_event_packet *aen);
112226584Sdimextern int tws_use_32bit_sgls;
113226584Sdimextern boolean tws_ctlr_reset(struct tws_softc *sc);
114226584Sdimextern struct tws_request * tws_q_remove_tail(struct tws_softc *sc,
115226584Sdim                                                           u_int8_t q_type );
116226584Sdimextern void tws_turn_off_interrupts(struct tws_softc *sc);
117226584Sdimextern void tws_turn_on_interrupts(struct tws_softc *sc);
118226584Sdimextern int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
119226584Sdimextern void tws_init_obfl_q(struct tws_softc *sc);
120226584Sdimextern uint8_t tws_get_state(struct tws_softc *sc);
121226584Sdimextern void tws_assert_soft_reset(struct tws_softc *sc);
122226584Sdimextern boolean tws_ctlr_ready(struct tws_softc *sc);
123226584Sdimextern u_int16_t tws_poll4_response(struct tws_softc *sc, u_int64_t *mfa);
124226584Sdimextern int tws_setup_intr(struct tws_softc *sc, int irqs);
125extern int tws_teardown_intr(struct tws_softc *sc);
126
127
128
129int
130tws_cam_attach(struct tws_softc *sc)
131{
132    struct cam_devq *devq;
133
134    TWS_TRACE_DEBUG(sc, "entry", 0, sc);
135    /* Create a device queue for sim */
136
137    /*
138     * if the user sets cam depth to less than 1
139     * cam may get confused
140     */
141    if ( tws_cam_depth < 1 )
142        tws_cam_depth = 1;
143    if ( tws_cam_depth > (tws_queue_depth - TWS_RESERVED_REQS)  )
144        tws_cam_depth = tws_queue_depth - TWS_RESERVED_REQS;
145
146    TWS_TRACE_DEBUG(sc, "depths,ctlr,cam", tws_queue_depth, tws_cam_depth);
147
148    if ((devq = cam_simq_alloc(tws_cam_depth)) == NULL) {
149        tws_log(sc, CAM_SIMQ_ALLOC);
150        return(ENOMEM);
151    }
152
153   /*
154    * Create a SIM entry.  Though we can support tws_cam_depth
155    * simultaneous requests, we claim to be able to handle only
156    * (tws_cam_depth), so that we always have reserved  requests
157    * packet available to service ioctls and internal commands.
158    */
159    sc->sim = cam_sim_alloc(tws_action, tws_poll, "tws", sc,
160                      device_get_unit(sc->tws_dev),
161#if (__FreeBSD_version >= 700000)
162                      &sc->sim_lock,
163#endif
164                      tws_cam_depth, 1, devq);
165                      /* 1, 1, devq); */
166    if (sc->sim == NULL) {
167        cam_simq_free(devq);
168        tws_log(sc, CAM_SIM_ALLOC);
169    }
170    /* Register the bus. */
171    mtx_lock(&sc->sim_lock);
172    if (xpt_bus_register(sc->sim,
173#if (__FreeBSD_version >= 700000)
174                         sc->tws_dev,
175#endif
176                         0) != CAM_SUCCESS) {
177        cam_sim_free(sc->sim, TRUE); /* passing true will free the devq */
178        sc->sim = NULL; /* so cam_detach will not try to free it */
179        mtx_unlock(&sc->sim_lock);
180        tws_log(sc, TWS_XPT_BUS_REGISTER);
181        return(ENXIO);
182    }
183    if (xpt_create_path(&sc->path, NULL, cam_sim_path(sc->sim),
184                         CAM_TARGET_WILDCARD,
185                         CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
186        xpt_bus_deregister(cam_sim_path(sc->sim));
187        /* Passing TRUE to cam_sim_free will free the devq as well. */
188        cam_sim_free(sc->sim, TRUE);
189        tws_log(sc, TWS_XPT_CREATE_PATH);
190        mtx_unlock(&sc->sim_lock);
191        return(ENXIO);
192    }
193    mtx_unlock(&sc->sim_lock);
194
195    return(0);
196}
197
198void
199tws_cam_detach(struct tws_softc *sc)
200{
201    TWS_TRACE_DEBUG(sc, "entry", 0, 0);
202    mtx_lock(&sc->sim_lock);
203    if (sc->path)
204        xpt_free_path(sc->path);
205    if (sc->sim) {
206        xpt_bus_deregister(cam_sim_path(sc->sim));
207        cam_sim_free(sc->sim, TRUE);
208    }
209    mtx_unlock(&sc->sim_lock);
210}
211
212int
213tws_bus_scan(struct tws_softc *sc)
214{
215    union ccb       *ccb;
216
217    TWS_TRACE_DEBUG(sc, "entry", sc, 0);
218    if (!(sc->sim))
219        return(ENXIO);
220    ccb = xpt_alloc_ccb();
221    mtx_lock(&sc->sim_lock);
222    if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sc->sim),
223                  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
224	mtx_unlock(&sc->sim_lock);
225        xpt_free_ccb(ccb);
226        return(EIO);
227    }
228    xpt_rescan(ccb);
229    mtx_unlock(&sc->sim_lock);
230    return(0);
231}
232
233static void
234tws_action(struct cam_sim *sim, union ccb *ccb)
235{
236    struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
237
238
239    switch( ccb->ccb_h.func_code ) {
240        case XPT_SCSI_IO:
241        {
242            if ( tws_execute_scsi(sc, ccb) )
243                TWS_TRACE_DEBUG(sc, "execute scsi failed", 0, 0);
244            break;
245        }
246        case XPT_ABORT:
247        {
248            TWS_TRACE_DEBUG(sc, "abort i/o", 0, 0);
249            ccb->ccb_h.status = CAM_UA_ABORT;
250            xpt_done(ccb);
251            break;
252        }
253        case XPT_RESET_BUS:
254        {
255            TWS_TRACE_DEBUG(sc, "reset bus", sim, ccb);
256            break;
257        }
258        case XPT_SET_TRAN_SETTINGS:
259        {
260            TWS_TRACE_DEBUG(sc, "set tran settings", sim, ccb);
261            ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
262            xpt_done(ccb);
263
264            break;
265        }
266        case XPT_GET_TRAN_SETTINGS:
267        {
268            TWS_TRACE_DEBUG(sc, "get tran settings", sim, ccb);
269
270#if (__FreeBSD_version >= 700000 )
271            ccb->cts.protocol = PROTO_SCSI;
272            ccb->cts.protocol_version = SCSI_REV_2;
273            ccb->cts.transport = XPORT_SPI;
274            ccb->cts.transport_version = 2;
275
276            ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
277            ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
278            ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
279            ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
280#else
281            ccb->cts.valid = (CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID);
282            ccb->cts.flags &= ~(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB);
283#endif
284            ccb->ccb_h.status = CAM_REQ_CMP;
285            xpt_done(ccb);
286
287            break;
288        }
289        case XPT_CALC_GEOMETRY:
290        {
291            TWS_TRACE_DEBUG(sc, "calc geometry(ccb,block-size)", ccb,
292                                          ccb->ccg.block_size);
293            cam_calc_geometry(&ccb->ccg, 1/* extended */);
294            xpt_done(ccb);
295
296            break;
297        }
298        case XPT_PATH_INQ:
299        {
300            TWS_TRACE_DEBUG(sc, "path inquiry", sim, ccb);
301            ccb->cpi.version_num = 1;
302            ccb->cpi.hba_inquiry = 0;
303            ccb->cpi.target_sprt = 0;
304            ccb->cpi.hba_misc = 0;
305            ccb->cpi.hba_eng_cnt = 0;
306            ccb->cpi.max_target = TWS_MAX_NUM_UNITS;
307            ccb->cpi.max_lun = TWS_MAX_NUM_LUNS - 1;
308            ccb->cpi.unit_number = cam_sim_unit(sim);
309            ccb->cpi.bus_id = cam_sim_bus(sim);
310            ccb->cpi.initiator_id = TWS_SCSI_INITIATOR_ID;
311            ccb->cpi.base_transfer_speed = 6000000;
312            strncpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
313            strncpy(ccb->cpi.hba_vid, "3ware", HBA_IDLEN);
314            strncpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
315#if (__FreeBSD_version >= 700000 )
316            ccb->cpi.transport = XPORT_SPI;
317            ccb->cpi.transport_version = 2;
318            ccb->cpi.protocol = PROTO_SCSI;
319            ccb->cpi.protocol_version = SCSI_REV_2;
320            ccb->cpi.maxio = TWS_MAX_IO_SIZE;
321#endif
322            ccb->ccb_h.status = CAM_REQ_CMP;
323            xpt_done(ccb);
324
325            break;
326        }
327        default:
328            TWS_TRACE_DEBUG(sc, "default", sim, ccb);
329            ccb->ccb_h.status = CAM_REQ_INVALID;
330            xpt_done(ccb);
331            break;
332    }
333}
334
335static void
336tws_scsi_complete(struct tws_request *req)
337{
338    struct tws_softc *sc = req->sc;
339
340    mtx_lock(&sc->q_lock);
341    tws_q_remove_request(sc, req, TWS_BUSY_Q);
342    mtx_unlock(&sc->q_lock);
343
344    untimeout(tws_timeout, req, req->ccb_ptr->ccb_h.timeout_ch);
345    tws_unmap_request(req->sc, req);
346
347
348    req->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
349    mtx_lock(&sc->sim_lock);
350    xpt_done(req->ccb_ptr);
351    mtx_unlock(&sc->sim_lock);
352
353    mtx_lock(&sc->q_lock);
354    tws_q_insert_tail(sc, req, TWS_FREE_Q);
355    mtx_unlock(&sc->q_lock);
356}
357
358void
359tws_getset_param_complete(struct tws_request *req)
360{
361    struct tws_softc *sc = req->sc;
362
363    TWS_TRACE_DEBUG(sc, "getset complete", req, req->request_id);
364
365    untimeout(tws_timeout, req, req->thandle);
366    tws_unmap_request(sc, req);
367
368    free(req->data, M_TWS);
369
370    req->state = TWS_REQ_STATE_FREE;
371}
372
373void
374tws_aen_complete(struct tws_request *req)
375{
376    struct tws_softc *sc = req->sc;
377    struct tws_command_header *sense;
378    struct tws_event_packet event;
379    u_int16_t aen_code=0;
380
381    TWS_TRACE_DEBUG(sc, "aen complete", 0, req->request_id);
382
383    untimeout(tws_timeout, req, req->thandle);
384    tws_unmap_request(sc, req);
385
386    sense = (struct tws_command_header *)req->data;
387
388    TWS_TRACE_DEBUG(sc,"sense code, key",sense->sense_data[0],
389                                   sense->sense_data[2]);
390    TWS_TRACE_DEBUG(sc,"sense rid, seve",sense->header_desc.request_id,
391                                   sense->status_block.res__severity);
392    TWS_TRACE_DEBUG(sc,"sense srcnum, error",sense->status_block.srcnum,
393                                   sense->status_block.error);
394    TWS_TRACE_DEBUG(sc,"sense shdr, ssense",sense->header_desc.size_header,
395                                   sense->header_desc.size_sense);
396
397    aen_code = sense->status_block.error;
398
399    switch ( aen_code ) {
400        case TWS_AEN_SYNC_TIME_WITH_HOST :
401            tws_aen_synctime_with_host(sc);
402            break;
403        case TWS_AEN_QUEUE_EMPTY :
404            break;
405        default :
406            bzero(&event, sizeof(struct tws_event_packet));
407            event.sequence_id = sc->seq_id;
408            event.time_stamp_sec = (u_int32_t)TWS_LOCAL_TIME;
409            event.aen_code = sense->status_block.error;
410            event.severity = sense->status_block.res__severity & 0x7;
411            event.event_src = TWS_SRC_CTRL_EVENT;
412            strcpy(event.severity_str, tws_sev_str[event.severity]);
413            event.retrieved = TWS_AEN_NOT_RETRIEVED;
414
415            bcopy(sense->err_specific_desc, event.parameter_data,
416                                    TWS_ERROR_SPECIFIC_DESC_LEN);
417            event.parameter_data[TWS_ERROR_SPECIFIC_DESC_LEN - 1] = '\0';
418            event.parameter_len = (u_int8_t)strlen(event.parameter_data)+1;
419
420            if ( event.parameter_len < TWS_ERROR_SPECIFIC_DESC_LEN ) {
421                event.parameter_len += ((u_int8_t)strlen(event.parameter_data +
422                                                event.parameter_len) + 1);
423            }
424
425            device_printf(sc->tws_dev, "%s: (0x%02X: 0x%04X): %s: %s\n",
426                event.severity_str,
427                event.event_src,
428                event.aen_code,
429                event.parameter_data +
430                     (strlen(event.parameter_data) + 1),
431                event.parameter_data);
432
433            mtx_lock(&sc->gen_lock);
434            tws_circular_aenq_insert(sc, &sc->aen_q, &event);
435            sc->seq_id++;
436            mtx_unlock(&sc->gen_lock);
437            break;
438
439    }
440
441    free(req->data, M_TWS);
442
443    req->state = TWS_REQ_STATE_FREE;
444
445    if ( aen_code != TWS_AEN_QUEUE_EMPTY ) {
446        /* timeout(tws_fetch_aen, sc, 1);*/
447        sc->stats.num_aens++;
448        tws_fetch_aen((void *)sc);
449    }
450}
451
452void
453tws_cmd_complete(struct tws_request *req)
454{
455    struct tws_softc *sc = req->sc;
456
457    untimeout(tws_timeout, req, req->ccb_ptr->ccb_h.timeout_ch);
458    tws_unmap_request(sc, req);
459}
460
461static void
462tws_err_complete(struct tws_softc *sc, u_int64_t mfa)
463{
464    struct tws_command_header *hdr;
465    struct tws_sense *sen;
466    struct tws_request *req;
467    u_int16_t req_id;
468    u_int32_t reg, status;
469
470    if ( !mfa ) {
471        TWS_TRACE_DEBUG(sc, "null mfa", 0, mfa);
472        return;
473    } else {
474        /* lookup the sense */
475        sen = tws_find_sense_from_mfa(sc, mfa);
476        if ( sen == NULL ) {
477            TWS_TRACE_DEBUG(sc, "found null req", 0, mfa);
478            return;
479        }
480        hdr = sen->hdr;
481        TWS_TRACE_DEBUG(sc, "sen, hdr", sen, hdr);
482        req_id = hdr->header_desc.request_id;
483        req = &sc->reqs[req_id];
484        TWS_TRACE_DEBUG(sc, "req, id", req, req_id);
485        if ( req->error_code != TWS_REQ_RET_SUBMIT_SUCCESS )
486            TWS_TRACE_DEBUG(sc, "submit failure?", 0, req->error_code);
487    }
488
489    switch (req->type) {
490        case TWS_REQ_TYPE_PASSTHRU :
491            tws_passthru_err_complete(req, hdr);
492            break;
493        case TWS_REQ_TYPE_GETSET_PARAM :
494            tws_getset_param_complete(req);
495            break;
496        case TWS_REQ_TYPE_SCSI_IO :
497            tws_scsi_err_complete(req, hdr);
498            break;
499
500    }
501
502    mtx_lock(&sc->io_lock);
503    hdr->header_desc.size_header = 128;
504    reg = (u_int32_t)( mfa>>32);
505    tws_write_reg(sc, TWS_I2O0_HOBQPH, reg, 4);
506    reg = (u_int32_t)(mfa);
507    tws_write_reg(sc, TWS_I2O0_HOBQPL, reg, 4);
508
509    status = tws_read_reg(sc, TWS_I2O0_STATUS, 4);
510    if ( status & TWS_BIT13 ) {
511        device_printf(sc->tws_dev,  "OBFL Overrun\n");
512        sc->obfl_q_overrun = true;
513    }
514    mtx_unlock(&sc->io_lock);
515}
516
517static void
518tws_scsi_err_complete(struct tws_request *req, struct tws_command_header *hdr)
519{
520    u_int8_t *sense_data;
521    struct tws_softc *sc = req->sc;
522    union ccb *ccb = req->ccb_ptr;
523
524    TWS_TRACE_DEBUG(sc, "sbe, cmd_status", hdr->status_block.error,
525                                 req->cmd_pkt->cmd.pkt_a.status);
526    if ( hdr->status_block.error == TWS_ERROR_LOGICAL_UNIT_NOT_SUPPORTED ||
527         hdr->status_block.error == TWS_ERROR_UNIT_OFFLINE ) {
528
529        if ( ccb->ccb_h.target_lun ) {
530            TWS_TRACE_DEBUG(sc, "invalid lun error",0,0);
531            ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
532        } else {
533            TWS_TRACE_DEBUG(sc, "invalid target error",0,0);
534            ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
535        }
536
537    } else {
538        TWS_TRACE_DEBUG(sc, "scsi status  error",0,0);
539        ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
540        if (((ccb->csio.cdb_io.cdb_bytes[0] == 0x1A) &&
541              (hdr->status_block.error == TWS_ERROR_NOT_SUPPORTED))) {
542            ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
543            TWS_TRACE_DEBUG(sc, "page mode not supported",0,0);
544        }
545    }
546
547    /* if there were no error simply mark complete error */
548    if (ccb->ccb_h.status == 0)
549        ccb->ccb_h.status = CAM_REQ_CMP_ERR;
550
551    sense_data = (u_int8_t *)&ccb->csio.sense_data;
552    if (sense_data) {
553        memcpy(sense_data, hdr->sense_data, TWS_SENSE_DATA_LENGTH );
554        ccb->csio.sense_len = TWS_SENSE_DATA_LENGTH;
555        ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
556    }
557    ccb->csio.scsi_status = req->cmd_pkt->cmd.pkt_a.status;
558
559    ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
560    mtx_lock(&sc->sim_lock);
561    xpt_done(ccb);
562    mtx_unlock(&sc->sim_lock);
563
564    untimeout(tws_timeout, req, req->ccb_ptr->ccb_h.timeout_ch);
565    tws_unmap_request(req->sc, req);
566    mtx_lock(&sc->q_lock);
567    tws_q_remove_request(sc, req, TWS_BUSY_Q);
568    tws_q_insert_tail(sc, req, TWS_FREE_Q);
569    mtx_unlock(&sc->q_lock);
570}
571
572static void
573tws_passthru_err_complete(struct tws_request *req,
574                                          struct tws_command_header *hdr)
575{
576    TWS_TRACE_DEBUG(req->sc, "entry", hdr, req->request_id);
577    req->error_code = hdr->status_block.error;
578    memcpy(&(req->cmd_pkt->hdr), hdr, sizeof(struct tws_command_header));
579    tws_passthru_complete(req);
580}
581
582static void
583tws_drain_busy_queue(struct tws_softc *sc)
584{
585    struct tws_request *req;
586    union ccb          *ccb;
587    TWS_TRACE_DEBUG(sc, "entry", 0, 0);
588
589    mtx_lock(&sc->q_lock);
590    req = tws_q_remove_tail(sc, TWS_BUSY_Q);
591    mtx_unlock(&sc->q_lock);
592    while ( req ) {
593        TWS_TRACE_DEBUG(sc, "moved to TWS_COMPLETE_Q", 0, req->request_id);
594        untimeout(tws_timeout, req, req->ccb_ptr->ccb_h.timeout_ch);
595
596        req->error_code = TWS_REQ_RET_RESET;
597        ccb = (union ccb *)(req->ccb_ptr);
598
599        ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
600        ccb->ccb_h.status |=  CAM_REQUEUE_REQ;
601        ccb->ccb_h.status |=  CAM_SCSI_BUS_RESET;
602
603        tws_unmap_request(req->sc, req);
604
605        mtx_lock(&sc->sim_lock);
606        xpt_done(req->ccb_ptr);
607        mtx_unlock(&sc->sim_lock);
608
609        mtx_lock(&sc->q_lock);
610        tws_q_insert_tail(sc, req, TWS_FREE_Q);
611        req = tws_q_remove_tail(sc, TWS_BUSY_Q);
612        mtx_unlock(&sc->q_lock);
613    }
614}
615
616
617static void
618tws_drain_reserved_reqs(struct tws_softc *sc)
619{
620    struct tws_request *r;
621
622    r = &sc->reqs[TWS_REQ_TYPE_AEN_FETCH];
623    if ( r->state != TWS_REQ_STATE_FREE ) {
624        TWS_TRACE_DEBUG(sc, "reset aen req", 0, 0);
625        untimeout(tws_timeout, r, r->thandle);
626        tws_unmap_request(sc, r);
627        free(r->data, M_TWS);
628        r->state = TWS_REQ_STATE_FREE;
629        r->error_code = TWS_REQ_RET_RESET;
630    }
631
632    r = &sc->reqs[TWS_REQ_TYPE_PASSTHRU];
633    if ( r->state == TWS_REQ_STATE_BUSY ) {
634        TWS_TRACE_DEBUG(sc, "reset passthru req", 0, 0);
635        r->error_code = TWS_REQ_RET_RESET;
636    }
637
638    r = &sc->reqs[TWS_REQ_TYPE_GETSET_PARAM];
639    if ( r->state != TWS_REQ_STATE_FREE ) {
640        TWS_TRACE_DEBUG(sc, "reset setparam req", 0, 0);
641        untimeout(tws_timeout, r, r->thandle);
642        tws_unmap_request(sc, r);
643        free(r->data, M_TWS);
644        r->state = TWS_REQ_STATE_FREE;
645        r->error_code = TWS_REQ_RET_RESET;
646    }
647}
648
649static void
650tws_drain_response_queue(struct tws_softc *sc)
651{
652    u_int16_t req_id;
653    u_int64_t mfa;
654    while ( tws_get_response(sc, &req_id, &mfa) );
655}
656
657
658static int32_t
659tws_execute_scsi(struct tws_softc *sc, union ccb *ccb)
660{
661    struct tws_command_packet *cmd_pkt;
662    struct tws_request *req;
663    struct ccb_hdr *ccb_h = &(ccb->ccb_h);
664    struct ccb_scsiio *csio = &(ccb->csio);
665    int error;
666    u_int16_t lun;
667
668    mtx_assert(&sc->sim_lock, MA_OWNED);
669    if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) {
670        TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun);
671        ccb_h->status |= CAM_TID_INVALID;
672        xpt_done(ccb);
673        return(0);
674    }
675    if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) {
676        TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun);
677        ccb_h->status |= CAM_LUN_INVALID;
678        xpt_done(ccb);
679        return(0);
680    }
681
682    if(ccb_h->flags & CAM_CDB_PHYS) {
683        TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun);
684        ccb_h->status = CAM_REQ_INVALID;
685        xpt_done(ccb);
686        return(0);
687    }
688
689    /*
690     * We are going to work on this request.  Mark it as enqueued (though
691     * we don't actually queue it...)
692     */
693    ccb_h->status |= CAM_SIM_QUEUED;
694
695    req = tws_get_request(sc, TWS_REQ_TYPE_SCSI_IO);
696    if ( !req ) {
697        TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun);
698        ccb_h->status |= CAM_REQUEUE_REQ;
699        xpt_done(ccb);
700        return(0);
701    }
702
703    if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
704        if(ccb_h->flags & CAM_DIR_IN)
705            req->flags |= TWS_DIR_IN;
706        if(ccb_h->flags & CAM_DIR_OUT)
707            req->flags |= TWS_DIR_OUT;
708    } else {
709        req->flags = TWS_DIR_NONE; /* no data */
710    }
711
712    req->type = TWS_REQ_TYPE_SCSI_IO;
713    req->cb = tws_scsi_complete;
714
715    cmd_pkt = req->cmd_pkt;
716    /* cmd_pkt->hdr.header_desc.size_header = 128; */
717    cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
718    cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id;
719    cmd_pkt->cmd.pkt_a.status = 0;
720    cmd_pkt->cmd.pkt_a.sgl_offset = 16;
721
722    /* lower nibble */
723    lun = ccb_h->target_lun & 0XF;
724    lun = lun << 12;
725    cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id;
726    /* upper nibble */
727    lun = ccb_h->target_lun & 0XF0;
728    lun = lun << 8;
729    cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun;
730
731#ifdef TWS_DEBUG
732    if ( csio->cdb_len > 16 )
733         TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len);
734#endif
735
736    if(ccb_h->flags & CAM_CDB_POINTER)
737        bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
738    else
739        bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
740
741    req->data = ccb;
742    req->flags |= TWS_DATA_CCB;
743    /* save ccb ptr */
744    req->ccb_ptr = ccb;
745    /*
746     * tws_map_load_data_callback will fill in the SGL,
747     * and submit the I/O.
748     */
749    sc->stats.scsi_ios++;
750    ccb_h->timeout_ch = timeout(tws_timeout, req, (ccb_h->timeout * hz)/1000);
751    error = tws_map_request(sc, req);
752    return(error);
753}
754
755
756int
757tws_send_scsi_cmd(struct tws_softc *sc, int cmd)
758{
759    struct tws_request *req;
760    struct tws_command_packet *cmd_pkt;
761    int error;
762
763    TWS_TRACE_DEBUG(sc, "entry",sc, cmd);
764    req = tws_get_request(sc, TWS_REQ_TYPE_AEN_FETCH);
765
766    if ( req == NULL )
767        return(ENOMEM);
768
769    req->cb = tws_aen_complete;
770
771    cmd_pkt = req->cmd_pkt;
772    cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
773    cmd_pkt->cmd.pkt_a.status = 0;
774    cmd_pkt->cmd.pkt_a.unit = 0;
775    cmd_pkt->cmd.pkt_a.sgl_offset = 16;
776    cmd_pkt->cmd.pkt_a.lun_l4__req_id = req->request_id;
777
778    cmd_pkt->cmd.pkt_a.cdb[0] = (u_int8_t)cmd;
779    cmd_pkt->cmd.pkt_a.cdb[4] = 128;
780
781    req->length = TWS_SECTOR_SIZE;
782    req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
783    if ( req->data == NULL )
784        return(ENOMEM);
785    bzero(req->data, TWS_SECTOR_SIZE);
786    req->flags = TWS_DIR_IN;
787
788    req->thandle = timeout(tws_timeout, req, (TWS_IO_TIMEOUT * hz));
789    error = tws_map_request(sc, req);
790    return(error);
791
792}
793
794int
795tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
796              u_int32_t param_size, void *data)
797{
798    struct tws_request *req;
799    struct tws_command_packet *cmd_pkt;
800    union tws_command_giga *cmd;
801    struct tws_getset_param *param;
802    int error;
803
804    req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM);
805    if ( req == NULL ) {
806        TWS_TRACE_DEBUG(sc, "null req", 0, 0);
807        return(ENOMEM);
808    }
809
810    req->length = TWS_SECTOR_SIZE;
811    req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
812    if ( req->data == NULL )
813        return(ENOMEM);
814    bzero(req->data, TWS_SECTOR_SIZE);
815    param = (struct tws_getset_param *)req->data;
816
817    req->cb = tws_getset_param_complete;
818    req->flags = TWS_DIR_OUT;
819    cmd_pkt = req->cmd_pkt;
820
821    cmd = &cmd_pkt->cmd.pkt_g;
822    cmd->param.sgl_off__opcode =
823            BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_SET_PARAM);
824    cmd->param.request_id = (u_int8_t)req->request_id;
825    cmd->param.host_id__unit = 0;
826    cmd->param.param_count = 1;
827    cmd->param.size = 2; /* map routine will add sgls */
828
829    /* Specify which parameter we want to set. */
830    param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
831    param->parameter_id = (u_int8_t)(param_id);
832    param->parameter_size_bytes = (u_int16_t)param_size;
833    memcpy(param->data, data, param_size);
834
835    req->thandle = timeout(tws_timeout, req, (TWS_IOCTL_TIMEOUT * hz));
836    error = tws_map_request(sc, req);
837    return(error);
838
839}
840
841int
842tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
843              u_int32_t param_size, void *data)
844{
845    struct tws_request *req;
846    struct tws_command_packet *cmd_pkt;
847    union tws_command_giga *cmd;
848    struct tws_getset_param *param;
849    u_int16_t reqid;
850    u_int64_t mfa;
851    int error = SUCCESS;
852
853
854    req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM);
855    if ( req == NULL ) {
856        TWS_TRACE_DEBUG(sc, "null req", 0, 0);
857        return(FAILURE);
858    }
859
860    req->length = TWS_SECTOR_SIZE;
861    req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
862    if ( req->data == NULL )
863        return(FAILURE);
864    bzero(req->data, TWS_SECTOR_SIZE);
865    param = (struct tws_getset_param *)req->data;
866
867    req->cb = NULL;
868    req->flags = TWS_DIR_IN;
869    cmd_pkt = req->cmd_pkt;
870
871    cmd = &cmd_pkt->cmd.pkt_g;
872    cmd->param.sgl_off__opcode =
873            BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_GET_PARAM);
874    cmd->param.request_id = (u_int8_t)req->request_id;
875    cmd->param.host_id__unit = 0;
876    cmd->param.param_count = 1;
877    cmd->param.size = 2; /* map routine will add sgls */
878
879    /* Specify which parameter we want to set. */
880    param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
881    param->parameter_id = (u_int8_t)(param_id);
882    param->parameter_size_bytes = (u_int16_t)param_size;
883
884    error = tws_map_request(sc, req);
885    if (!error) {
886        reqid = tws_poll4_response(sc, &mfa);
887        tws_unmap_request(sc, req);
888
889        if ( reqid == TWS_REQ_TYPE_GETSET_PARAM ) {
890            memcpy(data, param->data, param_size);
891        } else {
892            error = FAILURE;
893        }
894    }
895
896    free(req->data, M_TWS);
897    req->state = TWS_REQ_STATE_FREE;
898    return(error);
899
900}
901
902void
903tws_unmap_request(struct tws_softc *sc, struct tws_request *req)
904{
905    if (req->data != NULL) {
906        if ( req->flags & TWS_DIR_IN )
907            bus_dmamap_sync(sc->data_tag, req->dma_map,
908                                            BUS_DMASYNC_POSTREAD);
909        if ( req->flags & TWS_DIR_OUT )
910            bus_dmamap_sync(sc->data_tag, req->dma_map,
911                                            BUS_DMASYNC_POSTWRITE);
912        mtx_lock(&sc->io_lock);
913        bus_dmamap_unload(sc->data_tag, req->dma_map);
914        mtx_unlock(&sc->io_lock);
915    }
916}
917
918int32_t
919tws_map_request(struct tws_softc *sc, struct tws_request *req)
920{
921    int32_t error = 0;
922
923
924    /* If the command involves data, map that too. */
925    if (req->data != NULL) {
926        int my_flags = ((req->type == TWS_REQ_TYPE_SCSI_IO) ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
927
928        /*
929         * Map the data buffer into bus space and build the SG list.
930         */
931        mtx_lock(&sc->io_lock);
932	if (req->flags & TWS_DATA_CCB)
933		error = bus_dmamap_load_ccb(sc->data_tag, req->dma_map,
934					    req->data,
935					    tws_dmamap_data_load_cbfn, req,
936					    my_flags);
937	else
938		error = bus_dmamap_load(sc->data_tag, req->dma_map,
939					req->data, req->length,
940					tws_dmamap_data_load_cbfn, req,
941					my_flags);
942        mtx_unlock(&sc->io_lock);
943
944        if (error == EINPROGRESS) {
945            TWS_TRACE(sc, "in progress", 0, error);
946            tws_freeze_simq(sc, req);
947            error = 0;  // EINPROGRESS is not a fatal error.
948        }
949    } else { /* no data involved */
950        error = tws_submit_command(sc, req);
951    }
952    return(error);
953}
954
955
956static void
957tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
958                            int nseg, int error)
959{
960    struct tws_request *req = (struct tws_request *)arg;
961    struct tws_softc *sc = req->sc;
962    u_int16_t sgls = nseg;
963    void *sgl_ptr;
964    struct tws_cmd_generic *gcmd;
965
966
967    if ( error ) {
968        TWS_TRACE(sc, "SOMETHING BAD HAPPENED! error = %d\n", error, 0);
969    }
970
971    if ( error == EFBIG ) {
972        TWS_TRACE(sc, "not enough data segs", 0, nseg);
973        req->error_code = error;
974        req->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
975        return;
976    }
977
978    if ( req->flags & TWS_DIR_IN )
979        bus_dmamap_sync(req->sc->data_tag, req->dma_map,
980                                            BUS_DMASYNC_PREREAD);
981    if ( req->flags & TWS_DIR_OUT )
982        bus_dmamap_sync(req->sc->data_tag, req->dma_map,
983                                        BUS_DMASYNC_PREWRITE);
984    if ( segs ) {
985        if ( (req->type == TWS_REQ_TYPE_PASSTHRU &&
986             GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) !=
987                            TWS_FW_CMD_EXECUTE_SCSI) ||
988              req->type == TWS_REQ_TYPE_GETSET_PARAM) {
989            gcmd = &req->cmd_pkt->cmd.pkt_g.generic;
990            sgl_ptr = (u_int32_t *)(gcmd) + gcmd->size;
991            gcmd->size += sgls *
992                          ((req->sc->is64bit && !tws_use_32bit_sgls) ? 4 : 2 );
993            tws_fill_sg_list(req->sc, (void *)segs, sgl_ptr, sgls);
994
995        } else {
996            tws_fill_sg_list(req->sc, (void *)segs,
997                      (void *)&(req->cmd_pkt->cmd.pkt_a.sg_list), sgls);
998            req->cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= sgls ;
999        }
1000    }
1001
1002
1003    req->error_code = tws_submit_command(req->sc, req);
1004
1005}
1006
1007
1008static void
1009tws_fill_sg_list(struct tws_softc *sc, void *sgl_src, void *sgl_dest,
1010                          u_int16_t num_sgl_entries)
1011{
1012    int i;
1013
1014    if ( sc->is64bit ) {
1015        struct tws_sg_desc64 *sgl_s = (struct tws_sg_desc64 *)sgl_src;
1016
1017        if ( !tws_use_32bit_sgls ) {
1018            struct tws_sg_desc64 *sgl_d = (struct tws_sg_desc64 *)sgl_dest;
1019            if ( num_sgl_entries > TWS_MAX_64BIT_SG_ELEMENTS )
1020                TWS_TRACE(sc, "64bit sg overflow", num_sgl_entries, 0);
1021            for (i = 0; i < num_sgl_entries; i++) {
1022                sgl_d[i].address = sgl_s->address;
1023                sgl_d[i].length = sgl_s->length;
1024                sgl_d[i].flag = 0;
1025                sgl_d[i].reserved = 0;
1026                sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
1027                                               sizeof(bus_dma_segment_t));
1028            }
1029        } else {
1030            struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1031            if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1032                TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1033            for (i = 0; i < num_sgl_entries; i++) {
1034                sgl_d[i].address = sgl_s->address;
1035                sgl_d[i].length = sgl_s->length;
1036                sgl_d[i].flag = 0;
1037                sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
1038                                               sizeof(bus_dma_segment_t));
1039            }
1040        }
1041    } else {
1042        struct tws_sg_desc32 *sgl_s = (struct tws_sg_desc32 *)sgl_src;
1043        struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1044
1045        if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1046            TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1047
1048
1049        for (i = 0; i < num_sgl_entries; i++) {
1050            sgl_d[i].address = sgl_s[i].address;
1051            sgl_d[i].length = sgl_s[i].length;
1052            sgl_d[i].flag = 0;
1053        }
1054    }
1055}
1056
1057
1058void
1059tws_intr(void *arg)
1060{
1061    struct tws_softc *sc = (struct tws_softc *)arg;
1062    u_int32_t histat=0, db=0;
1063
1064    if (!(sc)) {
1065        device_printf(sc->tws_dev, "null softc!!!\n");
1066        return;
1067    }
1068
1069    if ( tws_get_state(sc) == TWS_RESET ) {
1070        return;
1071    }
1072
1073    if ( tws_get_state(sc) != TWS_ONLINE ) {
1074        return;
1075    }
1076
1077    sc->stats.num_intrs++;
1078    histat = tws_read_reg(sc, TWS_I2O0_HISTAT, 4);
1079    if ( histat & TWS_BIT2 ) {
1080        TWS_TRACE_DEBUG(sc, "door bell :)", histat, TWS_I2O0_HISTAT);
1081        db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1082        if ( db & TWS_BIT21 ) {
1083            tws_intr_attn_error(sc);
1084            return;
1085        }
1086        if ( db & TWS_BIT18 ) {
1087            tws_intr_attn_aen(sc);
1088        }
1089    }
1090
1091    if ( histat & TWS_BIT3 ) {
1092        tws_intr_resp(sc);
1093    }
1094}
1095
1096static void
1097tws_intr_attn_aen(struct tws_softc *sc)
1098{
1099    u_int32_t db=0;
1100
1101    /* maskoff db intrs untill all the aens are fetched */
1102    /* tws_disable_db_intr(sc); */
1103    tws_fetch_aen((void *)sc);
1104    tws_write_reg(sc, TWS_I2O0_HOBDBC, TWS_BIT18, 4);
1105    db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1106
1107}
1108
1109static void
1110tws_intr_attn_error(struct tws_softc *sc)
1111{
1112    u_int32_t db=0;
1113
1114    TWS_TRACE(sc, "attn error", 0, 0);
1115    tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
1116    db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1117    device_printf(sc->tws_dev, "Micro controller error.\n");
1118    tws_reset(sc);
1119}
1120
1121static void
1122tws_intr_resp(struct tws_softc *sc)
1123{
1124    u_int16_t req_id;
1125    u_int64_t mfa;
1126
1127    while ( tws_get_response(sc, &req_id, &mfa) ) {
1128        sc->stats.reqs_out++;
1129        if ( req_id == TWS_INVALID_REQID ) {
1130            TWS_TRACE_DEBUG(sc, "invalid req_id", mfa, req_id);
1131            sc->stats.reqs_errored++;
1132            tws_err_complete(sc, mfa);
1133            continue;
1134        }
1135        sc->reqs[req_id].cb(&sc->reqs[req_id]);
1136    }
1137
1138}
1139
1140
1141static void
1142tws_poll(struct cam_sim *sim)
1143{
1144    struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
1145    TWS_TRACE_DEBUG(sc, "entry", 0, 0);
1146    tws_intr((void *) sc);
1147}
1148
1149void
1150tws_timeout(void *arg)
1151{
1152    struct tws_request *req = (struct tws_request *)arg;
1153    struct tws_softc *sc = req->sc;
1154
1155
1156    if ( req->error_code == TWS_REQ_RET_RESET ) {
1157        return;
1158    }
1159
1160    mtx_lock(&sc->gen_lock);
1161    if ( req->error_code == TWS_REQ_RET_RESET ) {
1162        mtx_unlock(&sc->gen_lock);
1163        return;
1164    }
1165
1166    if ( tws_get_state(sc) == TWS_RESET ) {
1167        mtx_unlock(&sc->gen_lock);
1168        return;
1169    }
1170
1171    tws_teardown_intr(sc);
1172    xpt_freeze_simq(sc->sim, 1);
1173
1174    tws_send_event(sc, TWS_RESET_START);
1175
1176    if (req->type == TWS_REQ_TYPE_SCSI_IO) {
1177        device_printf(sc->tws_dev, "I/O Request timed out... Resetting controller\n");
1178    } else if (req->type == TWS_REQ_TYPE_PASSTHRU) {
1179        device_printf(sc->tws_dev, "IOCTL Request timed out... Resetting controller\n");
1180    } else {
1181        device_printf(sc->tws_dev, "Internal Request timed out... Resetting controller\n");
1182    }
1183
1184    tws_assert_soft_reset(sc);
1185    tws_turn_off_interrupts(sc);
1186    tws_reset_cb( (void*) sc );
1187    tws_reinit( (void*) sc );
1188
1189//  device_printf(sc->tws_dev,  "Controller Reset complete!\n");
1190    tws_send_event(sc, TWS_RESET_COMPLETE);
1191    mtx_unlock(&sc->gen_lock);
1192
1193    xpt_release_simq(sc->sim, 1);
1194    tws_setup_intr(sc, sc->irqs);
1195}
1196
1197void
1198tws_reset(void *arg)
1199{
1200    struct tws_softc *sc = (struct tws_softc *)arg;
1201
1202    mtx_lock(&sc->gen_lock);
1203    if ( tws_get_state(sc) == TWS_RESET ) {
1204        mtx_unlock(&sc->gen_lock);
1205        return;
1206    }
1207
1208    tws_teardown_intr(sc);
1209    xpt_freeze_simq(sc->sim, 1);
1210
1211    tws_send_event(sc, TWS_RESET_START);
1212
1213    device_printf(sc->tws_dev,  "Resetting controller\n");
1214
1215    tws_assert_soft_reset(sc);
1216    tws_turn_off_interrupts(sc);
1217    tws_reset_cb( (void*) sc );
1218    tws_reinit( (void*) sc );
1219
1220//  device_printf(sc->tws_dev,  "Controller Reset complete!\n");
1221    tws_send_event(sc, TWS_RESET_COMPLETE);
1222    mtx_unlock(&sc->gen_lock);
1223
1224    xpt_release_simq(sc->sim, 1);
1225    tws_setup_intr(sc, sc->irqs);
1226}
1227
1228static void
1229tws_reset_cb(void *arg)
1230{
1231    struct tws_softc *sc = (struct tws_softc *)arg;
1232    time_t endt;
1233    int found = 0;
1234    u_int32_t reg;
1235
1236    if ( tws_get_state(sc) != TWS_RESET ) {
1237        return;
1238    }
1239
1240//  device_printf(sc->tws_dev,  "Draining Busy Queue\n");
1241    tws_drain_busy_queue(sc);
1242//  device_printf(sc->tws_dev,  "Draining Reserved Reqs\n");
1243    tws_drain_reserved_reqs(sc);
1244//  device_printf(sc->tws_dev,  "Draining Response Queue\n");
1245    tws_drain_response_queue(sc);
1246
1247//  device_printf(sc->tws_dev,  "Looking for controller ready flag...\n");
1248    endt = TWS_LOCAL_TIME + TWS_POLL_TIMEOUT;
1249    while ((TWS_LOCAL_TIME <= endt) && (!found)) {
1250        reg = tws_read_reg(sc, TWS_I2O0_SCRPD3, 4);
1251        if ( reg & TWS_BIT13 ) {
1252            found = 1;
1253//          device_printf(sc->tws_dev,  " ... Got it!\n");
1254        }
1255    }
1256    if ( !found )
1257            device_printf(sc->tws_dev,  " ... Controller ready flag NOT found!\n");
1258}
1259
1260static void
1261tws_reinit(void *arg)
1262{
1263    struct tws_softc *sc = (struct tws_softc *)arg;
1264    int timeout_val=0;
1265    int try=2;
1266    int done=0;
1267
1268
1269//  device_printf(sc->tws_dev,  "Waiting for Controller Ready\n");
1270    while ( !done && try ) {
1271        if ( tws_ctlr_ready(sc) ) {
1272            done = 1;
1273            break;
1274        } else {
1275            timeout_val += 5;
1276            if ( timeout_val >= TWS_RESET_TIMEOUT ) {
1277               timeout_val = 0;
1278               if ( try )
1279                   tws_assert_soft_reset(sc);
1280               try--;
1281            }
1282            mtx_sleep(sc, &sc->gen_lock, 0, "tws_reinit", 5*hz);
1283        }
1284    }
1285
1286    if (!done) {
1287        device_printf(sc->tws_dev,  "FAILED to get Controller Ready!\n");
1288        return;
1289    }
1290
1291    sc->obfl_q_overrun = false;
1292//  device_printf(sc->tws_dev,  "Sending initConnect\n");
1293    if ( tws_init_connect(sc, tws_queue_depth) ) {
1294        TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit);
1295    }
1296    tws_init_obfl_q(sc);
1297
1298    tws_turn_on_interrupts(sc);
1299
1300    wakeup_one(sc->chan);
1301}
1302
1303
1304static void
1305tws_freeze_simq(struct tws_softc *sc, struct tws_request *req)
1306{
1307    /* Only for IO commands */
1308    if (req->type == TWS_REQ_TYPE_SCSI_IO) {
1309        union ccb   *ccb = (union ccb *)(req->ccb_ptr);
1310
1311        xpt_freeze_simq(sc->sim, 1);
1312        ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1313        ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1314    }
1315}
1316
1317
1318TUNABLE_INT("hw.tws.cam_depth", &tws_cam_depth);
1319