iir.c revision 275982
1/*-
2 *       Copyright (c) 2000-04 ICP vortex GmbH
3 *       Copyright (c) 2002-04 Intel Corporation
4 *       Copyright (c) 2003-04 Adaptec Inc.
5 *       All Rights Reserved
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32/*
33 * iir.c: SCSI dependant code for the Intel Integrated RAID Controller driver
34 *
35 * Written by: Achim Leubner <achim_leubner@adaptec.com>
36 * Fixes/Additions: Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com>
37 *
38 * credits:     Niklas Hallqvist;       OpenBSD driver for the ICP Controllers.
39 *              Mike Smith;             Some driver source code.
40 *              FreeBSD.ORG;            Great O/S to work on and for.
41 *
42 * $Id: iir.c 1.5 2004/03/30 10:17:53 achim Exp $"
43 */
44
45#include <sys/cdefs.h>
46__FBSDID("$FreeBSD: stable/10/sys/dev/iir/iir.c 275982 2014-12-21 03:06:11Z smh $");
47
48#define _IIR_C_
49
50/* #include "opt_iir.h" */
51#include <sys/param.h>
52#include <sys/systm.h>
53#include <sys/endian.h>
54#include <sys/eventhandler.h>
55#include <sys/malloc.h>
56#include <sys/kernel.h>
57#include <sys/bus.h>
58
59#include <machine/bus.h>
60#include <machine/stdarg.h>
61
62#include <cam/cam.h>
63#include <cam/cam_ccb.h>
64#include <cam/cam_sim.h>
65#include <cam/cam_xpt_sim.h>
66#include <cam/cam_debug.h>
67#include <cam/scsi/scsi_all.h>
68#include <cam/scsi/scsi_message.h>
69
70#include <dev/iir/iir.h>
71
72static MALLOC_DEFINE(M_GDTBUF, "iirbuf", "iir driver buffer");
73
74#ifdef GDT_DEBUG
75int     gdt_debug = GDT_DEBUG;
76#ifdef __SERIAL__
77#define MAX_SERBUF 160
78static void ser_init(void);
79static void ser_puts(char *str);
80static void ser_putc(int c);
81static char strbuf[MAX_SERBUF+1];
82#ifdef __COM2__
83#define COM_BASE 0x2f8
84#else
85#define COM_BASE 0x3f8
86#endif
87static void ser_init()
88{
89    unsigned port=COM_BASE;
90
91    outb(port+3, 0x80);
92    outb(port+1, 0);
93    /* 19200 Baud, if 9600: outb(12,port) */
94    outb(port, 6);
95    outb(port+3, 3);
96    outb(port+1, 0);
97}
98
99static void ser_puts(char *str)
100{
101    char *ptr;
102
103    ser_init();
104    for (ptr=str;*ptr;++ptr)
105        ser_putc((int)(*ptr));
106}
107
108static void ser_putc(int c)
109{
110    unsigned port=COM_BASE;
111
112    while ((inb(port+5) & 0x20)==0);
113    outb(port, c);
114    if (c==0x0a)
115    {
116        while ((inb(port+5) & 0x20)==0);
117        outb(port, 0x0d);
118    }
119}
120
121int ser_printf(const char *fmt, ...)
122{
123    va_list args;
124    int i;
125
126    va_start(args,fmt);
127    i = vsprintf(strbuf,fmt,args);
128    ser_puts(strbuf);
129    va_end(args);
130    return i;
131}
132#endif
133#endif
134
135/* controller cnt. */
136int gdt_cnt = 0;
137/* event buffer */
138static gdt_evt_str ebuffer[GDT_MAX_EVENTS];
139static int elastidx, eoldidx;
140static struct mtx elock;
141MTX_SYSINIT(iir_elock, &elock, "iir events", MTX_DEF);
142/* statistics */
143gdt_statist_t gdt_stat;
144
145/* Definitions for our use of the SIM private CCB area */
146#define ccb_sim_ptr     spriv_ptr0
147#define ccb_priority    spriv_field1
148
149static void     iir_action(struct cam_sim *sim, union ccb *ccb);
150static int	iir_intr_locked(struct gdt_softc *gdt);
151static void     iir_poll(struct cam_sim *sim);
152static void     iir_shutdown(void *arg, int howto);
153static void     iir_timeout(void *arg);
154
155static void     gdt_eval_mapping(u_int32_t size, int *cyls, int *heads,
156                                 int *secs);
157static int      gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
158                                 u_int8_t service, u_int16_t opcode,
159                                 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3);
160static int      gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *ccb,
161                         int timeout);
162
163static struct gdt_ccb *gdt_get_ccb(struct gdt_softc *gdt);
164
165static int      gdt_sync_event(struct gdt_softc *gdt, int service,
166                               u_int8_t index, struct gdt_ccb *gccb);
167static int      gdt_async_event(struct gdt_softc *gdt, int service);
168static struct gdt_ccb *gdt_raw_cmd(struct gdt_softc *gdt,
169                                   union ccb *ccb);
170static struct gdt_ccb *gdt_cache_cmd(struct gdt_softc *gdt,
171                                     union ccb *ccb);
172static struct gdt_ccb *gdt_ioctl_cmd(struct gdt_softc *gdt,
173                                     gdt_ucmd_t *ucmd);
174static void     gdt_internal_cache_cmd(struct gdt_softc *gdt, union ccb *ccb);
175
176static void     gdtmapmem(void *arg, bus_dma_segment_t *dm_segs,
177                          int nseg, int error);
178static void     gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs,
179                              int nseg, int error);
180
181int
182iir_init(struct gdt_softc *gdt)
183{
184    u_int16_t cdev_cnt;
185    int i, id, drv_cyls, drv_hds, drv_secs;
186    struct gdt_ccb *gccb;
187
188    GDT_DPRINTF(GDT_D_DEBUG, ("iir_init()\n"));
189
190    gdt->sc_state = GDT_POLLING;
191    gdt_clear_events();
192    bzero(&gdt_stat, sizeof(gdt_statist_t));
193
194    SLIST_INIT(&gdt->sc_free_gccb);
195    SLIST_INIT(&gdt->sc_pending_gccb);
196    TAILQ_INIT(&gdt->sc_ccb_queue);
197    TAILQ_INIT(&gdt->sc_ucmd_queue);
198
199    /* DMA tag for mapping buffers into device visible space. */
200    if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0,
201                           /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
202                           /*highaddr*/BUS_SPACE_MAXADDR,
203                           /*filter*/NULL, /*filterarg*/NULL,
204                           /*maxsize*/MAXBSIZE, /*nsegments*/GDT_MAXSG,
205                           /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
206                           /*flags*/BUS_DMA_ALLOCNOW,
207			   /*lockfunc*/busdma_lock_mutex,
208			   /*lockarg*/&gdt->sc_lock,
209                           &gdt->sc_buffer_dmat) != 0) {
210	device_printf(gdt->sc_devnode,
211	    "bus_dma_tag_create(..., gdt->sc_buffer_dmat) failed\n");
212        return (1);
213    }
214    gdt->sc_init_level++;
215
216    /* DMA tag for our ccb structures */
217    if (bus_dma_tag_create(gdt->sc_parent_dmat,
218			   /*alignment*/1,
219			   /*boundary*/0,
220                           /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
221                           /*highaddr*/BUS_SPACE_MAXADDR,
222                           /*filter*/NULL,
223			   /*filterarg*/NULL,
224                           GDT_MAXCMDS * GDT_SCRATCH_SZ, /* maxsize */
225                           /*nsegments*/1,
226                           /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
227			   /*flags*/0, /*lockfunc*/busdma_lock_mutex,
228			   /*lockarg*/&gdt->sc_lock,
229			   &gdt->sc_gcscratch_dmat) != 0) {
230        device_printf(gdt->sc_devnode,
231	    "bus_dma_tag_create(...,gdt->sc_gcscratch_dmat) failed\n");
232        return (1);
233    }
234    gdt->sc_init_level++;
235
236    /* Allocation for our ccb scratch area */
237    if (bus_dmamem_alloc(gdt->sc_gcscratch_dmat, (void **)&gdt->sc_gcscratch,
238                         BUS_DMA_NOWAIT, &gdt->sc_gcscratch_dmamap) != 0) {
239        device_printf(gdt->sc_devnode,
240	    "bus_dmamem_alloc(...,&gdt->sc_gccbs,...) failed\n");
241        return (1);
242    }
243    gdt->sc_init_level++;
244
245    /* And permanently map them */
246    bus_dmamap_load(gdt->sc_gcscratch_dmat, gdt->sc_gcscratch_dmamap,
247                    gdt->sc_gcscratch, GDT_MAXCMDS * GDT_SCRATCH_SZ,
248                    gdtmapmem, &gdt->sc_gcscratch_busbase, /*flags*/0);
249    gdt->sc_init_level++;
250
251    /* Clear them out. */
252    bzero(gdt->sc_gcscratch, GDT_MAXCMDS * GDT_SCRATCH_SZ);
253
254    /* Initialize the ccbs */
255    gdt->sc_gccbs = malloc(sizeof(struct gdt_ccb) * GDT_MAXCMDS, M_GDTBUF,
256        M_NOWAIT | M_ZERO);
257    if (gdt->sc_gccbs == NULL) {
258        device_printf(gdt->sc_devnode, "no memory for gccbs.\n");
259        return (1);
260    }
261    for (i = GDT_MAXCMDS-1; i >= 0; i--) {
262        gccb = &gdt->sc_gccbs[i];
263        gccb->gc_cmd_index = i + 2;
264        gccb->gc_flags = GDT_GCF_UNUSED;
265        gccb->gc_map_flag = FALSE;
266        if (bus_dmamap_create(gdt->sc_buffer_dmat, /*flags*/0,
267                              &gccb->gc_dmamap) != 0)
268            return(1);
269        gccb->gc_map_flag = TRUE;
270        gccb->gc_scratch = &gdt->sc_gcscratch[GDT_SCRATCH_SZ * i];
271        gccb->gc_scratch_busbase = gdt->sc_gcscratch_busbase + GDT_SCRATCH_SZ * i;
272	callout_init_mtx(&gccb->gc_timeout, &gdt->sc_lock, 0);
273        SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle);
274    }
275    gdt->sc_init_level++;
276
277    /* create the control device */
278    gdt->sc_dev = gdt_make_dev(gdt);
279
280    /* allocate ccb for gdt_internal_cmd() */
281    mtx_lock(&gdt->sc_lock);
282    gccb = gdt_get_ccb(gdt);
283    if (gccb == NULL) {
284	mtx_unlock(&gdt->sc_lock);
285        device_printf(gdt->sc_devnode, "No free command index found\n");
286        return (1);
287    }
288    bzero(gccb->gc_cmd, GDT_CMD_SZ);
289
290    if (!gdt_internal_cmd(gdt, gccb, GDT_SCREENSERVICE, GDT_INIT,
291                          0, 0, 0)) {
292        device_printf(gdt->sc_devnode,
293	    "Screen service initialization error %d\n", gdt->sc_status);
294        gdt_free_ccb(gdt, gccb);
295	mtx_unlock(&gdt->sc_lock);
296        return (1);
297    }
298
299    gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_UNFREEZE_IO,
300                     0, 0, 0);
301
302    if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
303                          GDT_LINUX_OS, 0, 0)) {
304        device_printf(gdt->sc_devnode, "Cache service initialization error %d\n",
305               gdt->sc_status);
306        gdt_free_ccb(gdt, gccb);
307	mtx_unlock(&gdt->sc_lock);
308        return (1);
309    }
310    cdev_cnt = (u_int16_t)gdt->sc_info;
311    gdt->sc_fw_vers = gdt->sc_service;
312
313    /* Detect number of buses */
314    gdt_enc32(gccb->gc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
315    gccb->gc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
316    gccb->gc_scratch[GDT_IOC_FIRST_CHAN] = 0;
317    gccb->gc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
318    gdt_enc32(gccb->gc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
319    if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
320                         GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
321                         GDT_IOC_HDR_SZ + GDT_MAXBUS * GDT_RAWIOC_SZ)) {
322        gdt->sc_bus_cnt = gccb->gc_scratch[GDT_IOC_CHAN_COUNT];
323        for (i = 0; i < gdt->sc_bus_cnt; i++) {
324            id = gccb->gc_scratch[GDT_IOC_HDR_SZ +
325                                 i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
326            gdt->sc_bus_id[i] = id < GDT_MAXID_FC ? id : 0xff;
327        }
328    } else {
329        /* New method failed, use fallback. */
330        for (i = 0; i < GDT_MAXBUS; i++) {
331            gdt_enc32(gccb->gc_scratch + GDT_GETCH_CHANNEL_NO, i);
332            if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
333                                  GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
334                                  GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
335                                  GDT_GETCH_SZ)) {
336                if (i == 0) {
337                    device_printf(gdt->sc_devnode, "Cannot get channel count, "
338                           "error %d\n", gdt->sc_status);
339                    gdt_free_ccb(gdt, gccb);
340		    mtx_unlock(&gdt->sc_lock);
341                    return (1);
342                }
343                break;
344            }
345            gdt->sc_bus_id[i] =
346                (gccb->gc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID_FC) ?
347                gccb->gc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
348        }
349        gdt->sc_bus_cnt = i;
350    }
351    /* add one "virtual" channel for the host drives */
352    gdt->sc_virt_bus = gdt->sc_bus_cnt;
353    gdt->sc_bus_cnt++;
354
355    if (!gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_INIT,
356                          0, 0, 0)) {
357            device_printf(gdt->sc_devnode,
358		"Raw service initialization error %d\n", gdt->sc_status);
359            gdt_free_ccb(gdt, gccb);
360	    mtx_unlock(&gdt->sc_lock);
361            return (1);
362    }
363
364    /* Set/get features raw service (scatter/gather) */
365    gdt->sc_raw_feat = 0;
366    if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
367                         GDT_SCATTER_GATHER, 0, 0)) {
368        if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_GET_FEAT,
369                             0, 0, 0)) {
370            gdt->sc_raw_feat = gdt->sc_info;
371            if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
372                panic("%s: Scatter/Gather Raw Service "
373		    "required but not supported!\n",
374		    device_get_nameunit(gdt->sc_devnode));
375                gdt_free_ccb(gdt, gccb);
376		mtx_unlock(&gdt->sc_lock);
377                return (1);
378            }
379        }
380    }
381
382    /* Set/get features cache service (scatter/gather) */
383    gdt->sc_cache_feat = 0;
384    if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_SET_FEAT,
385                         0, GDT_SCATTER_GATHER, 0)) {
386        if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_GET_FEAT,
387                             0, 0, 0)) {
388            gdt->sc_cache_feat = gdt->sc_info;
389            if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
390                panic("%s: Scatter/Gather Cache Service "
391		    "required but not supported!\n",
392		    device_get_nameunit(gdt->sc_devnode));
393                gdt_free_ccb(gdt, gccb);
394		mtx_unlock(&gdt->sc_lock);
395                return (1);
396            }
397        }
398    }
399
400    /* OEM */
401    gdt_enc32(gccb->gc_scratch + GDT_OEM_VERSION, 0x01);
402    gdt_enc32(gccb->gc_scratch + GDT_OEM_BUFSIZE, sizeof(gdt_oem_record_t));
403    if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
404                         GDT_OEM_STR_RECORD, GDT_INVALID_CHANNEL,
405                         sizeof(gdt_oem_str_record_t))) {
406	    strncpy(gdt->oem_name, ((gdt_oem_str_record_t *)
407            gccb->gc_scratch)->text.scsi_host_drive_inquiry_vendor_id, 7);
408		gdt->oem_name[7]='\0';
409	} else {
410		/* Old method, based on PCI ID */
411		if (gdt->sc_vendor == INTEL_VENDOR_ID_IIR)
412            strcpy(gdt->oem_name,"Intel  ");
413        else
414       	    strcpy(gdt->oem_name,"ICP    ");
415    }
416
417    /* Scan for cache devices */
418    for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++) {
419        if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INFO,
420                             i, 0, 0)) {
421            gdt->sc_hdr[i].hd_present = 1;
422            gdt->sc_hdr[i].hd_size = gdt->sc_info;
423
424            /*
425             * Evaluate mapping (sectors per head, heads per cyl)
426             */
427            gdt->sc_hdr[i].hd_size &= ~GDT_SECS32;
428            if (gdt->sc_info2 == 0)
429                gdt_eval_mapping(gdt->sc_hdr[i].hd_size,
430                                 &drv_cyls, &drv_hds, &drv_secs);
431            else {
432                drv_hds = gdt->sc_info2 & 0xff;
433                drv_secs = (gdt->sc_info2 >> 8) & 0xff;
434                drv_cyls = gdt->sc_hdr[i].hd_size / drv_hds /
435                    drv_secs;
436            }
437            gdt->sc_hdr[i].hd_heads = drv_hds;
438            gdt->sc_hdr[i].hd_secs = drv_secs;
439            /* Round the size */
440            gdt->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
441
442            if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE,
443                                 GDT_DEVTYPE, i, 0, 0))
444                gdt->sc_hdr[i].hd_devtype = gdt->sc_info;
445        }
446    }
447
448    GDT_DPRINTF(GDT_D_INIT, ("dpmem %x %d-bus %d cache device%s\n",
449                             gdt->sc_dpmembase,
450                             gdt->sc_bus_cnt, cdev_cnt,
451                             cdev_cnt == 1 ? "" : "s"));
452    gdt_free_ccb(gdt, gccb);
453    mtx_unlock(&gdt->sc_lock);
454
455    atomic_add_int(&gdt_cnt, 1);
456    return (0);
457}
458
459void
460iir_free(struct gdt_softc *gdt)
461{
462    int i;
463
464    GDT_DPRINTF(GDT_D_INIT, ("iir_free()\n"));
465
466    switch (gdt->sc_init_level) {
467      default:
468        gdt_destroy_dev(gdt->sc_dev);
469      case 5:
470        for (i = GDT_MAXCMDS-1; i >= 0; i--)
471            if (gdt->sc_gccbs[i].gc_map_flag) {
472		callout_drain(&gdt->sc_gccbs[i].gc_timeout);
473                bus_dmamap_destroy(gdt->sc_buffer_dmat,
474                                   gdt->sc_gccbs[i].gc_dmamap);
475	    }
476        bus_dmamap_unload(gdt->sc_gcscratch_dmat, gdt->sc_gcscratch_dmamap);
477        free(gdt->sc_gccbs, M_GDTBUF);
478      case 4:
479        bus_dmamem_free(gdt->sc_gcscratch_dmat, gdt->sc_gcscratch, gdt->sc_gcscratch_dmamap);
480      case 3:
481        bus_dma_tag_destroy(gdt->sc_gcscratch_dmat);
482      case 2:
483        bus_dma_tag_destroy(gdt->sc_buffer_dmat);
484      case 1:
485        bus_dma_tag_destroy(gdt->sc_parent_dmat);
486      case 0:
487        break;
488    }
489}
490
491void
492iir_attach(struct gdt_softc *gdt)
493{
494    struct cam_devq *devq;
495    int i;
496
497    GDT_DPRINTF(GDT_D_INIT, ("iir_attach()\n"));
498
499    /*
500     * Create the device queue for our SIM.
501     * XXX Throttle this down since the card has problems under load.
502     */
503    devq = cam_simq_alloc(32);
504    if (devq == NULL)
505        return;
506
507    for (i = 0; i < gdt->sc_bus_cnt; i++) {
508        /*
509         * Construct our SIM entry
510         */
511        gdt->sims[i] = cam_sim_alloc(iir_action, iir_poll, "iir",
512	    gdt, device_get_unit(gdt->sc_devnode), &gdt->sc_lock,
513	    /*untagged*/1, /*tagged*/GDT_MAXCMDS, devq);
514	mtx_lock(&gdt->sc_lock);
515        if (xpt_bus_register(gdt->sims[i], gdt->sc_devnode, i) != CAM_SUCCESS) {
516            cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
517	    mtx_unlock(&gdt->sc_lock);
518            break;
519        }
520
521        if (xpt_create_path(&gdt->paths[i], /*periph*/NULL,
522                            cam_sim_path(gdt->sims[i]),
523                            CAM_TARGET_WILDCARD,
524                            CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
525            xpt_bus_deregister(cam_sim_path(gdt->sims[i]));
526            cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
527	    mtx_unlock(&gdt->sc_lock);
528            break;
529        }
530	mtx_unlock(&gdt->sc_lock);
531    }
532    if (i > 0)
533        EVENTHANDLER_REGISTER(shutdown_final, iir_shutdown,
534                              gdt, SHUTDOWN_PRI_DEFAULT);
535    /* iir_watchdog(gdt); */
536    gdt->sc_state = GDT_NORMAL;
537}
538
539static void
540gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs)
541{
542    *cyls = size / GDT_HEADS / GDT_SECS;
543    if (*cyls < GDT_MAXCYLS) {
544        *heads = GDT_HEADS;
545        *secs = GDT_SECS;
546    } else {
547        /* Too high for 64 * 32 */
548        *cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
549        if (*cyls < GDT_MAXCYLS) {
550            *heads = GDT_MEDHEADS;
551            *secs = GDT_MEDSECS;
552        } else {
553            /* Too high for 127 * 63 */
554            *cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
555            *heads = GDT_BIGHEADS;
556            *secs = GDT_BIGSECS;
557        }
558    }
559}
560
561static int
562gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *gccb,
563         int timeout)
564{
565    int rv = 0;
566
567    GDT_DPRINTF(GDT_D_INIT,
568                ("gdt_wait(%p, %p, %d)\n", gdt, gccb, timeout));
569
570    gdt->sc_state |= GDT_POLL_WAIT;
571    do {
572        if (iir_intr_locked(gdt) == gccb->gc_cmd_index) {
573            rv = 1;
574            break;
575        }
576        DELAY(1);
577    } while (--timeout);
578    gdt->sc_state &= ~GDT_POLL_WAIT;
579
580    while (gdt->sc_test_busy(gdt))
581        DELAY(1);               /* XXX correct? */
582
583    return (rv);
584}
585
586static int
587gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
588                 u_int8_t service, u_int16_t opcode,
589                 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
590{
591    int retries;
592
593    GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d)\n",
594                            gdt, service, opcode, arg1, arg2, arg3));
595
596    bzero(gccb->gc_cmd, GDT_CMD_SZ);
597
598    for (retries = GDT_RETRIES; ; ) {
599        gccb->gc_service = service;
600        gccb->gc_flags = GDT_GCF_INTERNAL;
601
602        gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
603                  gccb->gc_cmd_index);
604        gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, opcode);
605
606        switch (service) {
607          case GDT_CACHESERVICE:
608            if (opcode == GDT_IOCTL) {
609                gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION +
610                          GDT_IOCTL_SUBFUNC, arg1);
611                gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION +
612                          GDT_IOCTL_CHANNEL, arg2);
613                gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION +
614                          GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
615                gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
616                          gccb->gc_scratch_busbase);
617            } else {
618                gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION +
619                          GDT_CACHE_DEVICENO, (u_int16_t)arg1);
620                gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION +
621                          GDT_CACHE_BLOCKNO, arg2);
622            }
623            break;
624
625          case GDT_SCSIRAWSERVICE:
626            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION +
627                      GDT_RAW_DIRECTION, arg1);
628            gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
629                (u_int8_t)arg2;
630            gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
631                (u_int8_t)arg3;
632            gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
633                (u_int8_t)(arg3 >> 8);
634        }
635
636        gdt->sc_set_sema0(gdt);
637        gccb->gc_cmd_len = GDT_CMD_SZ;
638        gdt->sc_cmd_off = 0;
639        gdt->sc_cmd_cnt = 0;
640        gdt->sc_copy_cmd(gdt, gccb);
641        gdt->sc_release_event(gdt);
642        DELAY(20);
643        if (!gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT))
644            return (0);
645        if (gdt->sc_status != GDT_S_BSY || --retries == 0)
646            break;
647        DELAY(1);
648    }
649    return (gdt->sc_status == GDT_S_OK);
650}
651
652static struct gdt_ccb *
653gdt_get_ccb(struct gdt_softc *gdt)
654{
655    struct gdt_ccb *gccb;
656
657    GDT_DPRINTF(GDT_D_QUEUE, ("gdt_get_ccb(%p)\n", gdt));
658
659    mtx_assert(&gdt->sc_lock, MA_OWNED);
660    gccb = SLIST_FIRST(&gdt->sc_free_gccb);
661    if (gccb != NULL) {
662        SLIST_REMOVE_HEAD(&gdt->sc_free_gccb, sle);
663        SLIST_INSERT_HEAD(&gdt->sc_pending_gccb, gccb, sle);
664        ++gdt_stat.cmd_index_act;
665        if (gdt_stat.cmd_index_act > gdt_stat.cmd_index_max)
666            gdt_stat.cmd_index_max = gdt_stat.cmd_index_act;
667    }
668    return (gccb);
669}
670
671void
672gdt_free_ccb(struct gdt_softc *gdt, struct gdt_ccb *gccb)
673{
674
675    GDT_DPRINTF(GDT_D_QUEUE, ("gdt_free_ccb(%p, %p)\n", gdt, gccb));
676
677    mtx_assert(&gdt->sc_lock, MA_OWNED);
678    gccb->gc_flags = GDT_GCF_UNUSED;
679    SLIST_REMOVE(&gdt->sc_pending_gccb, gccb, gdt_ccb, sle);
680    SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle);
681    --gdt_stat.cmd_index_act;
682    if (gdt->sc_state & GDT_SHUTDOWN)
683        wakeup(gccb);
684}
685
686void
687gdt_next(struct gdt_softc *gdt)
688{
689    union ccb *ccb;
690    gdt_ucmd_t *ucmd;
691    struct cam_sim *sim;
692    int bus, target, lun;
693    int next_cmd;
694
695    struct ccb_scsiio *csio;
696    struct ccb_hdr *ccbh;
697    struct gdt_ccb *gccb = NULL;
698    u_int8_t cmd;
699
700    GDT_DPRINTF(GDT_D_QUEUE, ("gdt_next(%p)\n", gdt));
701
702    mtx_assert(&gdt->sc_lock, MA_OWNED);
703    if (gdt->sc_test_busy(gdt)) {
704        if (!(gdt->sc_state & GDT_POLLING)) {
705            return;
706        }
707        while (gdt->sc_test_busy(gdt))
708            DELAY(1);
709    }
710
711    gdt->sc_cmd_cnt = gdt->sc_cmd_off = 0;
712    next_cmd = TRUE;
713    for (;;) {
714        /* I/Os in queue? controller ready? */
715        if (!TAILQ_FIRST(&gdt->sc_ucmd_queue) &&
716            !TAILQ_FIRST(&gdt->sc_ccb_queue))
717            break;
718
719        /* 1.: I/Os without ccb (IOCTLs) */
720        ucmd = TAILQ_FIRST(&gdt->sc_ucmd_queue);
721        if (ucmd != NULL) {
722            TAILQ_REMOVE(&gdt->sc_ucmd_queue, ucmd, links);
723            if ((gccb = gdt_ioctl_cmd(gdt, ucmd)) == NULL) {
724                TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
725                break;
726            }
727            break;
728            /* wenn mehrere Kdos. zulassen: if (!gdt_polling) continue; */
729        }
730
731        /* 2.: I/Os with ccb */
732        ccb = (union ccb *)TAILQ_FIRST(&gdt->sc_ccb_queue);
733        /* ist dann immer != NULL, da oben getestet */
734        sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
735        bus = cam_sim_bus(sim);
736        target = ccb->ccb_h.target_id;
737        lun = ccb->ccb_h.target_lun;
738
739        TAILQ_REMOVE(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
740        --gdt_stat.req_queue_act;
741        /* ccb->ccb_h.func_code is XPT_SCSI_IO */
742        GDT_DPRINTF(GDT_D_QUEUE, ("XPT_SCSI_IO flags 0x%x)\n",
743                                  ccb->ccb_h.flags));
744        csio = &ccb->csio;
745        ccbh = &ccb->ccb_h;
746        cmd  = csio->cdb_io.cdb_bytes[0];
747        /* Max CDB length is 12 bytes */
748        if (csio->cdb_len > 12) {
749            ccbh->status = CAM_REQ_INVALID;
750            --gdt_stat.io_count_act;
751            xpt_done(ccb);
752        } else if (bus != gdt->sc_virt_bus) {
753            /* raw service command */
754            if ((gccb = gdt_raw_cmd(gdt, ccb)) == NULL) {
755                TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
756                                  sim_links.tqe);
757                ++gdt_stat.req_queue_act;
758                if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
759                    gdt_stat.req_queue_max = gdt_stat.req_queue_act;
760                next_cmd = FALSE;
761            }
762        } else if (target >= GDT_MAX_HDRIVES ||
763                   !gdt->sc_hdr[target].hd_present || lun != 0) {
764            ccbh->status = CAM_DEV_NOT_THERE;
765            --gdt_stat.io_count_act;
766            xpt_done(ccb);
767        } else {
768            /* cache service command */
769            if (cmd == READ_6  || cmd == WRITE_6 ||
770                cmd == READ_10 || cmd == WRITE_10) {
771                if ((gccb = gdt_cache_cmd(gdt, ccb)) == NULL) {
772                    TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
773                                      sim_links.tqe);
774                    ++gdt_stat.req_queue_act;
775                    if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
776                        gdt_stat.req_queue_max = gdt_stat.req_queue_act;
777                    next_cmd = FALSE;
778                }
779            } else {
780                gdt_internal_cache_cmd(gdt, ccb);
781            }
782        }
783        if ((gdt->sc_state & GDT_POLLING) || !next_cmd)
784            break;
785    }
786    if (gdt->sc_cmd_cnt > 0)
787        gdt->sc_release_event(gdt);
788
789    if ((gdt->sc_state & GDT_POLLING) && gdt->sc_cmd_cnt > 0) {
790        gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT);
791    }
792}
793
794static struct gdt_ccb *
795gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb)
796{
797    struct gdt_ccb *gccb;
798    struct cam_sim *sim;
799    int error;
800
801    GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_cmd(%p, %p)\n", gdt, ccb));
802
803    if (roundup(GDT_CMD_UNION + GDT_RAW_SZ, sizeof(u_int32_t)) +
804        gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
805        gdt->sc_ic_all_size) {
806        GDT_DPRINTF(GDT_D_INVALID, ("%s: gdt_raw_cmd(): DPMEM overflow\n",
807		device_get_nameunit(gdt->sc_devnode)));
808        return (NULL);
809    }
810
811    gccb = gdt_get_ccb(gdt);
812    if (gccb == NULL) {
813        GDT_DPRINTF(GDT_D_INVALID, ("%s: No free command index found\n",
814		device_get_nameunit(gdt->sc_devnode)));
815        return (gccb);
816    }
817    bzero(gccb->gc_cmd, GDT_CMD_SZ);
818    sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
819    gccb->gc_ccb = ccb;
820    gccb->gc_service = GDT_SCSIRAWSERVICE;
821    gccb->gc_flags = GDT_GCF_SCSI;
822
823    if (gdt->sc_cmd_cnt == 0)
824        gdt->sc_set_sema0(gdt);
825    gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
826              gccb->gc_cmd_index);
827    gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
828
829    gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
830              (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
831              GDT_DATA_IN : GDT_DATA_OUT);
832    gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
833              ccb->csio.dxfer_len);
834    gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
835              ccb->csio.cdb_len);
836    bcopy(ccb->csio.cdb_io.cdb_bytes, gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
837          ccb->csio.cdb_len);
838    gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
839        ccb->ccb_h.target_id;
840    gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
841        ccb->ccb_h.target_lun;
842    gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
843        cam_sim_bus(sim);
844    gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
845              sizeof(struct scsi_sense_data));
846    gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
847              gccb->gc_scratch_busbase);
848
849    error = bus_dmamap_load_ccb(gdt->sc_buffer_dmat,
850			        gccb->gc_dmamap,
851			        ccb,
852			        gdtexecuteccb,
853			        gccb, /*flags*/0);
854    if (error == EINPROGRESS) {
855        xpt_freeze_simq(sim, 1);
856        gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
857    }
858
859    return (gccb);
860}
861
862static struct gdt_ccb *
863gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb)
864{
865    struct gdt_ccb *gccb;
866    struct cam_sim *sim;
867    u_int8_t *cmdp;
868    u_int16_t opcode;
869    u_int32_t blockno, blockcnt;
870    int error;
871
872    GDT_DPRINTF(GDT_D_CMD, ("gdt_cache_cmd(%p, %p)\n", gdt, ccb));
873
874    if (roundup(GDT_CMD_UNION + GDT_CACHE_SZ, sizeof(u_int32_t)) +
875        gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
876        gdt->sc_ic_all_size) {
877        GDT_DPRINTF(GDT_D_INVALID, ("%s: gdt_cache_cmd(): DPMEM overflow\n",
878		device_get_nameunit(gdt->sc_devnode)));
879        return (NULL);
880    }
881
882    gccb = gdt_get_ccb(gdt);
883    if (gccb == NULL) {
884        GDT_DPRINTF(GDT_D_DEBUG, ("%s: No free command index found\n",
885		device_get_nameunit(gdt->sc_devnode)));
886        return (gccb);
887    }
888    bzero(gccb->gc_cmd, GDT_CMD_SZ);
889    sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
890    gccb->gc_ccb = ccb;
891    gccb->gc_service = GDT_CACHESERVICE;
892    gccb->gc_flags = GDT_GCF_SCSI;
893
894    if (gdt->sc_cmd_cnt == 0)
895        gdt->sc_set_sema0(gdt);
896    gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
897              gccb->gc_cmd_index);
898    cmdp = ccb->csio.cdb_io.cdb_bytes;
899    opcode = (*cmdp == WRITE_6 || *cmdp == WRITE_10) ? GDT_WRITE : GDT_READ;
900    if ((gdt->sc_state & GDT_SHUTDOWN) && opcode == GDT_WRITE)
901        opcode = GDT_WRITE_THR;
902    gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, opcode);
903
904    gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
905              ccb->ccb_h.target_id);
906    if (ccb->csio.cdb_len == 6) {
907        struct scsi_rw_6 *rw = (struct scsi_rw_6 *)cmdp;
908        blockno = scsi_3btoul(rw->addr) & ((SRW_TOPADDR<<16) | 0xffff);
909        blockcnt = rw->length ? rw->length : 0x100;
910    } else {
911        struct scsi_rw_10 *rw = (struct scsi_rw_10 *)cmdp;
912        blockno = scsi_4btoul(rw->addr);
913        blockcnt = scsi_2btoul(rw->length);
914    }
915    gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
916              blockno);
917    gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
918              blockcnt);
919
920    error = bus_dmamap_load_ccb(gdt->sc_buffer_dmat,
921                                gccb->gc_dmamap,
922                                ccb,
923                                gdtexecuteccb,
924                                gccb, /*flags*/0);
925    if (error == EINPROGRESS) {
926        xpt_freeze_simq(sim, 1);
927        gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
928    }
929    return (gccb);
930}
931
932static struct gdt_ccb *
933gdt_ioctl_cmd(struct gdt_softc *gdt, gdt_ucmd_t *ucmd)
934{
935    struct gdt_ccb *gccb;
936    u_int32_t cnt;
937
938    GDT_DPRINTF(GDT_D_DEBUG, ("gdt_ioctl_cmd(%p, %p)\n", gdt, ucmd));
939
940    gccb = gdt_get_ccb(gdt);
941    if (gccb == NULL) {
942        GDT_DPRINTF(GDT_D_DEBUG, ("%s: No free command index found\n",
943		device_get_nameunit(gdt->sc_devnode)));
944        return (gccb);
945    }
946    bzero(gccb->gc_cmd, GDT_CMD_SZ);
947    gccb->gc_ucmd = ucmd;
948    gccb->gc_service = ucmd->service;
949    gccb->gc_flags = GDT_GCF_IOCTL;
950
951    /* check DPMEM space, copy data buffer from user space */
952    if (ucmd->service == GDT_CACHESERVICE) {
953        if (ucmd->OpCode == GDT_IOCTL) {
954            gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_IOCTL_SZ,
955                                      sizeof(u_int32_t));
956            cnt = ucmd->u.ioctl.param_size;
957            if (cnt > GDT_SCRATCH_SZ) {
958                device_printf(gdt->sc_devnode,
959		    "Scratch buffer too small (%d/%d)\n", GDT_SCRATCH_SZ, cnt);
960                gdt_free_ccb(gdt, gccb);
961                return (NULL);
962            }
963        } else {
964            gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
965                                      GDT_SG_SZ, sizeof(u_int32_t));
966            cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
967            if (cnt > GDT_SCRATCH_SZ) {
968                device_printf(gdt->sc_devnode,
969		    "Scratch buffer too small (%d/%d)\n", GDT_SCRATCH_SZ, cnt);
970                gdt_free_ccb(gdt, gccb);
971                return (NULL);
972            }
973        }
974    } else {
975        gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
976                                  GDT_SG_SZ, sizeof(u_int32_t));
977        cnt = ucmd->u.raw.sdlen;
978        if (cnt + ucmd->u.raw.sense_len > GDT_SCRATCH_SZ) {
979            device_printf(gdt->sc_devnode, "Scratch buffer too small (%d/%d)\n",
980		GDT_SCRATCH_SZ, cnt + ucmd->u.raw.sense_len);
981            gdt_free_ccb(gdt, gccb);
982            return (NULL);
983        }
984    }
985    if (cnt != 0)
986        bcopy(ucmd->data, gccb->gc_scratch, cnt);
987
988    if (gdt->sc_cmd_off + gccb->gc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
989        gdt->sc_ic_all_size) {
990        GDT_DPRINTF(GDT_D_INVALID, ("%s: gdt_ioctl_cmd(): DPMEM overflow\n",
991		device_get_nameunit(gdt->sc_devnode)));
992        gdt_free_ccb(gdt, gccb);
993        return (NULL);
994    }
995
996    if (gdt->sc_cmd_cnt == 0)
997        gdt->sc_set_sema0(gdt);
998
999    /* fill cmd structure */
1000    gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
1001              gccb->gc_cmd_index);
1002    gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE,
1003              ucmd->OpCode);
1004
1005    if (ucmd->service == GDT_CACHESERVICE) {
1006        if (ucmd->OpCode == GDT_IOCTL) {
1007            /* IOCTL */
1008            gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_PARAM_SIZE,
1009                      ucmd->u.ioctl.param_size);
1010            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_SUBFUNC,
1011                      ucmd->u.ioctl.subfunc);
1012            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_CHANNEL,
1013                      ucmd->u.ioctl.channel);
1014            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
1015                      gccb->gc_scratch_busbase);
1016        } else {
1017            /* cache service command */
1018            gdt_enc16(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
1019                      ucmd->u.cache.DeviceNo);
1020            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
1021                      ucmd->u.cache.BlockNo);
1022            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
1023                      ucmd->u.cache.BlockCnt);
1024            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1025                      0xffffffffUL);
1026            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1027                      1);
1028            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1029                      GDT_SG_PTR, gccb->gc_scratch_busbase);
1030            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1031                      GDT_SG_LEN, ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE);
1032        }
1033    } else {
1034        /* raw service command */
1035        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
1036                  ucmd->u.raw.direction);
1037        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1038                  0xffffffffUL);
1039        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
1040                  ucmd->u.raw.sdlen);
1041        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
1042                  ucmd->u.raw.clen);
1043        bcopy(ucmd->u.raw.cmd, gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
1044              12);
1045        gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
1046            ucmd->u.raw.target;
1047        gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
1048            ucmd->u.raw.lun;
1049        gccb->gc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
1050            ucmd->u.raw.bus;
1051        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
1052                  ucmd->u.raw.sense_len);
1053        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
1054                  gccb->gc_scratch_busbase + ucmd->u.raw.sdlen);
1055        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1056                  1);
1057        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1058                  GDT_SG_PTR, gccb->gc_scratch_busbase);
1059        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1060                  GDT_SG_LEN, ucmd->u.raw.sdlen);
1061    }
1062
1063    gdt_stat.sg_count_act = 1;
1064    gdt->sc_copy_cmd(gdt, gccb);
1065    return (gccb);
1066}
1067
1068static void
1069gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb)
1070{
1071    int t;
1072
1073    t = ccb->ccb_h.target_id;
1074    GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd(%p, %p, 0x%x, %d)\n",
1075        gdt, ccb, ccb->csio.cdb_io.cdb_bytes[0], t));
1076
1077    switch (ccb->csio.cdb_io.cdb_bytes[0]) {
1078      case TEST_UNIT_READY:
1079      case START_STOP:
1080        break;
1081      case REQUEST_SENSE:
1082        GDT_DPRINTF(GDT_D_MISC, ("REQUEST_SENSE\n"));
1083        break;
1084      case INQUIRY:
1085        {
1086            struct scsi_inquiry_data inq;
1087            size_t copylen = MIN(sizeof(inq), ccb->csio.dxfer_len);
1088
1089            bzero(&inq, sizeof(inq));
1090            inq.device = (gdt->sc_hdr[t].hd_devtype & 4) ?
1091                T_CDROM : T_DIRECT;
1092            inq.dev_qual2 = (gdt->sc_hdr[t].hd_devtype & 1) ? 0x80 : 0;
1093            inq.version = SCSI_REV_2;
1094            inq.response_format = 2;
1095            inq.additional_length = 32;
1096            inq.flags = SID_CmdQue | SID_Sync;
1097            strncpy(inq.vendor, gdt->oem_name, sizeof(inq.vendor));
1098            snprintf(inq.product, sizeof(inq.product),
1099                     "Host Drive   #%02d", t);
1100            strncpy(inq.revision, "   ", sizeof(inq.revision));
1101            bcopy(&inq, ccb->csio.data_ptr, copylen );
1102            if( ccb->csio.dxfer_len > copylen )
1103                bzero( ccb->csio.data_ptr+copylen,
1104                       ccb->csio.dxfer_len - copylen );
1105            break;
1106        }
1107      case MODE_SENSE_6:
1108        {
1109            struct mpd_data {
1110                struct scsi_mode_hdr_6 hd;
1111                struct scsi_mode_block_descr bd;
1112                struct scsi_control_page cp;
1113            } mpd;
1114            size_t copylen = MIN(sizeof(mpd), ccb->csio.dxfer_len);
1115            u_int8_t page;
1116
1117            /*mpd = (struct mpd_data *)ccb->csio.data_ptr;*/
1118            bzero(&mpd, sizeof(mpd));
1119            mpd.hd.datalen = sizeof(struct scsi_mode_hdr_6) +
1120                sizeof(struct scsi_mode_block_descr);
1121            mpd.hd.dev_specific = (gdt->sc_hdr[t].hd_devtype & 2) ? 0x80 : 0;
1122            mpd.hd.block_descr_len = sizeof(struct scsi_mode_block_descr);
1123            mpd.bd.block_len[0] = (GDT_SECTOR_SIZE & 0x00ff0000) >> 16;
1124            mpd.bd.block_len[1] = (GDT_SECTOR_SIZE & 0x0000ff00) >> 8;
1125            mpd.bd.block_len[2] = (GDT_SECTOR_SIZE & 0x000000ff);
1126
1127            bcopy(&mpd, ccb->csio.data_ptr, copylen );
1128            if( ccb->csio.dxfer_len > copylen )
1129                bzero( ccb->csio.data_ptr+copylen,
1130                       ccb->csio.dxfer_len - copylen );
1131            page=((struct scsi_mode_sense_6 *)ccb->csio.cdb_io.cdb_bytes)->page;
1132            switch (page) {
1133              default:
1134                GDT_DPRINTF(GDT_D_MISC, ("MODE_SENSE_6: page 0x%x\n", page));
1135                break;
1136            }
1137            break;
1138        }
1139      case READ_CAPACITY:
1140        {
1141            struct scsi_read_capacity_data rcd;
1142            size_t copylen = MIN(sizeof(rcd), ccb->csio.dxfer_len);
1143
1144            /*rcd = (struct scsi_read_capacity_data *)ccb->csio.data_ptr;*/
1145            bzero(&rcd, sizeof(rcd));
1146            scsi_ulto4b(gdt->sc_hdr[t].hd_size - 1, rcd.addr);
1147            scsi_ulto4b(GDT_SECTOR_SIZE, rcd.length);
1148            bcopy(&rcd, ccb->csio.data_ptr, copylen );
1149            if( ccb->csio.dxfer_len > copylen )
1150                bzero( ccb->csio.data_ptr+copylen,
1151                       ccb->csio.dxfer_len - copylen );
1152            break;
1153        }
1154      default:
1155        GDT_DPRINTF(GDT_D_MISC, ("gdt_internal_cache_cmd(%d) unknown\n",
1156                                    ccb->csio.cdb_io.cdb_bytes[0]));
1157        break;
1158    }
1159    ccb->ccb_h.status |= CAM_REQ_CMP;
1160    --gdt_stat.io_count_act;
1161    xpt_done(ccb);
1162}
1163
1164static void
1165gdtmapmem(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1166{
1167    bus_addr_t *busaddrp;
1168
1169    busaddrp = (bus_addr_t *)arg;
1170    *busaddrp = dm_segs->ds_addr;
1171}
1172
1173static void
1174gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1175{
1176    struct gdt_ccb *gccb;
1177    union ccb *ccb;
1178    struct gdt_softc *gdt;
1179    int i;
1180
1181    gccb = (struct gdt_ccb *)arg;
1182    ccb = gccb->gc_ccb;
1183    gdt = cam_sim_softc((struct cam_sim *)ccb->ccb_h.ccb_sim_ptr);
1184    mtx_assert(&gdt->sc_lock, MA_OWNED);
1185
1186    GDT_DPRINTF(GDT_D_CMD, ("gdtexecuteccb(%p, %p, %p, %d, %d)\n",
1187                            gdt, gccb, dm_segs, nseg, error));
1188    gdt_stat.sg_count_act = nseg;
1189    if (nseg > gdt_stat.sg_count_max)
1190        gdt_stat.sg_count_max = nseg;
1191
1192    /* Copy the segments into our SG list */
1193    if (gccb->gc_service == GDT_CACHESERVICE) {
1194        for (i = 0; i < nseg; ++i) {
1195            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1196                      i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1197            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1198                      i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1199            dm_segs++;
1200        }
1201        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1202                  nseg);
1203        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1204                  0xffffffffUL);
1205
1206        gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
1207                                  nseg * GDT_SG_SZ, sizeof(u_int32_t));
1208    } else {
1209        for (i = 0; i < nseg; ++i) {
1210            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1211                      i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1212            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1213                      i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1214            dm_segs++;
1215        }
1216        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1217                  nseg);
1218        gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1219                  0xffffffffUL);
1220
1221        gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
1222                                  nseg * GDT_SG_SZ, sizeof(u_int32_t));
1223    }
1224
1225    if (nseg != 0) {
1226        bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap,
1227            (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
1228            BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1229    }
1230
1231    /* We must NOT abort the command here if CAM_REQ_INPROG is not set,
1232     * because command semaphore is already set!
1233     */
1234
1235    ccb->ccb_h.status |= CAM_SIM_QUEUED;
1236    /* timeout handling */
1237    callout_reset_sbt(&gccb->gc_timeout, SBT_1MS * ccb->ccb_h.timeout, 0,
1238      iir_timeout, (caddr_t)gccb, 0);
1239
1240    gdt->sc_copy_cmd(gdt, gccb);
1241}
1242
1243
1244static void
1245iir_action( struct cam_sim *sim, union ccb *ccb )
1246{
1247    struct gdt_softc *gdt;
1248    int bus, target, lun;
1249
1250    gdt = (struct gdt_softc *)cam_sim_softc( sim );
1251    mtx_assert(&gdt->sc_lock, MA_OWNED);
1252    ccb->ccb_h.ccb_sim_ptr = sim;
1253    bus = cam_sim_bus(sim);
1254    target = ccb->ccb_h.target_id;
1255    lun = ccb->ccb_h.target_lun;
1256    GDT_DPRINTF(GDT_D_CMD,
1257                ("iir_action(%p) func 0x%x cmd 0x%x bus %d target %d lun %d\n",
1258                 gdt, ccb->ccb_h.func_code, ccb->csio.cdb_io.cdb_bytes[0],
1259                 bus, target, lun));
1260    ++gdt_stat.io_count_act;
1261    if (gdt_stat.io_count_act > gdt_stat.io_count_max)
1262        gdt_stat.io_count_max = gdt_stat.io_count_act;
1263
1264    switch (ccb->ccb_h.func_code) {
1265      case XPT_SCSI_IO:
1266        TAILQ_INSERT_TAIL(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1267        ++gdt_stat.req_queue_act;
1268        if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1269            gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1270        gdt_next(gdt);
1271        break;
1272      case XPT_RESET_DEV:   /* Bus Device Reset the specified SCSI device */
1273      case XPT_ABORT:                       /* Abort the specified CCB */
1274        /* XXX Implement */
1275        ccb->ccb_h.status = CAM_REQ_INVALID;
1276        --gdt_stat.io_count_act;
1277        xpt_done(ccb);
1278        break;
1279      case XPT_SET_TRAN_SETTINGS:
1280        ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1281        --gdt_stat.io_count_act;
1282        xpt_done(ccb);
1283        break;
1284      case XPT_GET_TRAN_SETTINGS:
1285        /* Get default/user set transfer settings for the target */
1286          {
1287              struct        ccb_trans_settings *cts = &ccb->cts;
1288              struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
1289              struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
1290
1291              cts->protocol = PROTO_SCSI;
1292              cts->protocol_version = SCSI_REV_2;
1293              cts->transport = XPORT_SPI;
1294              cts->transport_version = 2;
1295
1296              if (cts->type == CTS_TYPE_USER_SETTINGS) {
1297		  spi->flags = CTS_SPI_FLAGS_DISC_ENB;
1298                  scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1299                  spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1300                  spi->sync_period = 25; /* 10MHz */
1301                  if (spi->sync_period != 0)
1302                      spi->sync_offset = 15;
1303
1304                  spi->valid = CTS_SPI_VALID_SYNC_RATE
1305                      | CTS_SPI_VALID_SYNC_OFFSET
1306                      | CTS_SPI_VALID_BUS_WIDTH
1307                      | CTS_SPI_VALID_DISC;
1308                  scsi->valid = CTS_SCSI_VALID_TQ;
1309                  ccb->ccb_h.status = CAM_REQ_CMP;
1310              } else {
1311                  ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1312              }
1313              --gdt_stat.io_count_act;
1314              xpt_done(ccb);
1315              break;
1316          }
1317      case XPT_CALC_GEOMETRY:
1318          {
1319              struct ccb_calc_geometry *ccg;
1320              u_int32_t secs_per_cylinder;
1321
1322              ccg = &ccb->ccg;
1323              ccg->heads = gdt->sc_hdr[target].hd_heads;
1324              ccg->secs_per_track = gdt->sc_hdr[target].hd_secs;
1325              secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1326              ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1327              ccb->ccb_h.status = CAM_REQ_CMP;
1328              --gdt_stat.io_count_act;
1329              xpt_done(ccb);
1330              break;
1331          }
1332      case XPT_RESET_BUS:           /* Reset the specified SCSI bus */
1333          {
1334              /* XXX Implement */
1335              ccb->ccb_h.status = CAM_REQ_CMP;
1336              --gdt_stat.io_count_act;
1337              xpt_done(ccb);
1338              break;
1339          }
1340      case XPT_TERM_IO:             /* Terminate the I/O process */
1341        /* XXX Implement */
1342        ccb->ccb_h.status = CAM_REQ_INVALID;
1343        --gdt_stat.io_count_act;
1344        xpt_done(ccb);
1345        break;
1346      case XPT_PATH_INQ:            /* Path routing inquiry */
1347          {
1348              struct ccb_pathinq *cpi = &ccb->cpi;
1349
1350              cpi->version_num = 1;
1351              cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
1352              cpi->hba_inquiry |= PI_WIDE_16;
1353              cpi->target_sprt = 1;
1354              cpi->hba_misc = 0;
1355              cpi->hba_eng_cnt = 0;
1356              if (bus == gdt->sc_virt_bus)
1357                  cpi->max_target = GDT_MAX_HDRIVES - 1;
1358              else if (gdt->sc_class & GDT_FC)
1359                  cpi->max_target = GDT_MAXID_FC - 1;
1360              else
1361                  cpi->max_target = GDT_MAXID - 1;
1362              cpi->max_lun = 7;
1363              cpi->unit_number = cam_sim_unit(sim);
1364              cpi->bus_id = bus;
1365              cpi->initiator_id =
1366                  (bus == gdt->sc_virt_bus ? 127 : gdt->sc_bus_id[bus]);
1367              cpi->base_transfer_speed = 3300;
1368              strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1369              if (gdt->sc_vendor == INTEL_VENDOR_ID_IIR)
1370                  strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
1371              else
1372                  strncpy(cpi->hba_vid, "ICP vortex ", HBA_IDLEN);
1373              strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1374              cpi->transport = XPORT_SPI;
1375              cpi->transport_version = 2;
1376              cpi->protocol = PROTO_SCSI;
1377              cpi->protocol_version = SCSI_REV_2;
1378              cpi->ccb_h.status = CAM_REQ_CMP;
1379              --gdt_stat.io_count_act;
1380              xpt_done(ccb);
1381              break;
1382          }
1383      default:
1384        GDT_DPRINTF(GDT_D_INVALID, ("gdt_next(%p) cmd 0x%x invalid\n",
1385                                    gdt, ccb->ccb_h.func_code));
1386        ccb->ccb_h.status = CAM_REQ_INVALID;
1387        --gdt_stat.io_count_act;
1388        xpt_done(ccb);
1389        break;
1390    }
1391}
1392
1393static void
1394iir_poll( struct cam_sim *sim )
1395{
1396    struct gdt_softc *gdt;
1397
1398    gdt = (struct gdt_softc *)cam_sim_softc( sim );
1399    GDT_DPRINTF(GDT_D_CMD, ("iir_poll sim %p gdt %p\n", sim, gdt));
1400    iir_intr_locked(gdt);
1401}
1402
1403static void
1404iir_timeout(void *arg)
1405{
1406    GDT_DPRINTF(GDT_D_TIMEOUT, ("iir_timeout(%p)\n", gccb));
1407}
1408
1409static void
1410iir_shutdown( void *arg, int howto )
1411{
1412    struct gdt_softc *gdt;
1413    struct gdt_ccb *gccb;
1414    gdt_ucmd_t *ucmd;
1415    int i;
1416
1417    gdt = (struct gdt_softc *)arg;
1418    GDT_DPRINTF(GDT_D_CMD, ("iir_shutdown(%p, %d)\n", gdt, howto));
1419
1420    device_printf(gdt->sc_devnode,
1421	"Flushing all Host Drives. Please wait ...  ");
1422
1423    /* allocate ucmd buffer */
1424    ucmd = malloc(sizeof(gdt_ucmd_t), M_GDTBUF, M_NOWAIT);
1425    if (ucmd == NULL) {
1426	printf("\n");
1427        device_printf(gdt->sc_devnode,
1428	    "iir_shutdown(): Cannot allocate resource\n");
1429        return;
1430    }
1431    bzero(ucmd, sizeof(gdt_ucmd_t));
1432
1433    /* wait for pending IOs */
1434    mtx_lock(&gdt->sc_lock);
1435    gdt->sc_state = GDT_SHUTDOWN;
1436    if ((gccb = SLIST_FIRST(&gdt->sc_pending_gccb)) != NULL)
1437        mtx_sleep(gccb, &gdt->sc_lock, PCATCH | PRIBIO, "iirshw", 100 * hz);
1438
1439    /* flush */
1440    for (i = 0; i < GDT_MAX_HDRIVES; ++i) {
1441        if (gdt->sc_hdr[i].hd_present) {
1442            ucmd->service = GDT_CACHESERVICE;
1443            ucmd->OpCode = GDT_FLUSH;
1444            ucmd->u.cache.DeviceNo = i;
1445            TAILQ_INSERT_TAIL(&gdt->sc_ucmd_queue, ucmd, links);
1446            ucmd->complete_flag = FALSE;
1447            gdt_next(gdt);
1448            if (!ucmd->complete_flag)
1449                mtx_sleep(ucmd, &gdt->sc_lock, PCATCH | PRIBIO, "iirshw",
1450		    10 * hz);
1451        }
1452    }
1453    mtx_unlock(&gdt->sc_lock);
1454
1455    free(ucmd, M_DEVBUF);
1456    printf("Done.\n");
1457}
1458
1459void
1460iir_intr(void *arg)
1461{
1462    struct gdt_softc *gdt = arg;
1463
1464    mtx_lock(&gdt->sc_lock);
1465    iir_intr_locked(gdt);
1466    mtx_unlock(&gdt->sc_lock);
1467}
1468
1469int
1470iir_intr_locked(struct gdt_softc *gdt)
1471{
1472    struct gdt_intr_ctx ctx;
1473    struct gdt_ccb *gccb;
1474    gdt_ucmd_t *ucmd;
1475    u_int32_t cnt;
1476
1477    GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p)\n", gdt));
1478
1479    mtx_assert(&gdt->sc_lock, MA_OWNED);
1480
1481    /* If polling and we were not called from gdt_wait, just return */
1482    if ((gdt->sc_state & GDT_POLLING) &&
1483        !(gdt->sc_state & GDT_POLL_WAIT))
1484        return (0);
1485
1486    ctx.istatus = gdt->sc_get_status(gdt);
1487    if (ctx.istatus == 0x00) {
1488        gdt->sc_status = GDT_S_NO_STATUS;
1489        return (ctx.istatus);
1490    }
1491
1492    gdt->sc_intr(gdt, &ctx);
1493
1494    gdt->sc_status = ctx.cmd_status;
1495    gdt->sc_service = ctx.service;
1496    gdt->sc_info = ctx.info;
1497    gdt->sc_info2 = ctx.info2;
1498
1499    if (ctx.istatus == GDT_ASYNCINDEX) {
1500        gdt_async_event(gdt, ctx.service);
1501        return (ctx.istatus);
1502    }
1503    if (ctx.istatus == GDT_SPEZINDEX) {
1504        GDT_DPRINTF(GDT_D_INVALID,
1505                    ("%s: Service unknown or not initialized!\n",
1506		     device_get_nameunit(gdt->sc_devnode)));
1507        gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1508        gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1509        gdt_store_event(GDT_ES_DRIVER, 4, &gdt->sc_dvr);
1510        return (ctx.istatus);
1511    }
1512
1513    gccb = &gdt->sc_gccbs[ctx.istatus - 2];
1514    ctx.service = gccb->gc_service;
1515
1516    switch (gccb->gc_flags) {
1517      case GDT_GCF_UNUSED:
1518        GDT_DPRINTF(GDT_D_INVALID, ("%s: Index (%d) to unused command!\n",
1519		    device_get_nameunit(gdt->sc_devnode), ctx.istatus));
1520        gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1521        gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1522        gdt->sc_dvr.eu.driver.index = ctx.istatus;
1523        gdt_store_event(GDT_ES_DRIVER, 1, &gdt->sc_dvr);
1524        gdt_free_ccb(gdt, gccb);
1525	break;
1526
1527      case GDT_GCF_INTERNAL:
1528        break;
1529
1530      case GDT_GCF_IOCTL:
1531        ucmd = gccb->gc_ucmd;
1532        if (gdt->sc_status == GDT_S_BSY) {
1533            GDT_DPRINTF(GDT_D_DEBUG, ("iir_intr(%p) ioctl: gccb %p busy\n",
1534                                      gdt, gccb));
1535            TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
1536        } else {
1537            ucmd->status = gdt->sc_status;
1538            ucmd->info = gdt->sc_info;
1539            ucmd->complete_flag = TRUE;
1540            if (ucmd->service == GDT_CACHESERVICE) {
1541                if (ucmd->OpCode == GDT_IOCTL) {
1542                    cnt = ucmd->u.ioctl.param_size;
1543                    if (cnt != 0)
1544                        bcopy(gccb->gc_scratch, ucmd->data, cnt);
1545                } else {
1546                    cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
1547                    if (cnt != 0)
1548                        bcopy(gccb->gc_scratch, ucmd->data, cnt);
1549                }
1550            } else {
1551                cnt = ucmd->u.raw.sdlen;
1552                if (cnt != 0)
1553                    bcopy(gccb->gc_scratch, ucmd->data, cnt);
1554                if (ucmd->u.raw.sense_len != 0)
1555                    bcopy(gccb->gc_scratch, ucmd->data, cnt);
1556            }
1557            gdt_free_ccb(gdt, gccb);
1558            /* wakeup */
1559            wakeup(ucmd);
1560        }
1561        gdt_next(gdt);
1562        break;
1563
1564      default:
1565        gdt_free_ccb(gdt, gccb);
1566        gdt_sync_event(gdt, ctx.service, ctx.istatus, gccb);
1567        gdt_next(gdt);
1568        break;
1569    }
1570
1571    return (ctx.istatus);
1572}
1573
1574int
1575gdt_async_event(struct gdt_softc *gdt, int service)
1576{
1577    struct gdt_ccb *gccb;
1578
1579    GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d)\n", gdt, service));
1580
1581    if (service == GDT_SCREENSERVICE) {
1582        if (gdt->sc_status == GDT_MSG_REQUEST) {
1583            while (gdt->sc_test_busy(gdt))
1584                DELAY(1);
1585            gccb = gdt_get_ccb(gdt);
1586            if (gccb == NULL) {
1587                device_printf(gdt->sc_devnode, "No free command index found\n");
1588                return (1);
1589            }
1590            bzero(gccb->gc_cmd, GDT_CMD_SZ);
1591            gccb->gc_service = service;
1592            gccb->gc_flags = GDT_GCF_SCREEN;
1593            gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
1594                      gccb->gc_cmd_index);
1595            gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_READ);
1596            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1597                      GDT_MSG_INV_HANDLE);
1598            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1599                      gccb->gc_scratch_busbase);
1600            gdt->sc_set_sema0(gdt);
1601            gdt->sc_cmd_off = 0;
1602            gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1603                                      sizeof(u_int32_t));
1604            gdt->sc_cmd_cnt = 0;
1605            gdt->sc_copy_cmd(gdt, gccb);
1606            device_printf(gdt->sc_devnode, "[PCI %d/%d] ", gdt->sc_bus,
1607		gdt->sc_slot);
1608            gdt->sc_release_event(gdt);
1609        }
1610
1611    } else {
1612        if ((gdt->sc_fw_vers & 0xff) >= 0x1a) {
1613            gdt->sc_dvr.size = 0;
1614            gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
1615            gdt->sc_dvr.eu.async.status  = gdt->sc_status;
1616            /* severity and event_string already set! */
1617        } else {
1618            gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.async);
1619            gdt->sc_dvr.eu.async.ionode   = gdt->sc_hanum;
1620            gdt->sc_dvr.eu.async.service = service;
1621            gdt->sc_dvr.eu.async.status  = gdt->sc_status;
1622            gdt->sc_dvr.eu.async.info    = gdt->sc_info;
1623            *(u_int32_t *)gdt->sc_dvr.eu.async.scsi_coord  = gdt->sc_info2;
1624        }
1625        gdt_store_event(GDT_ES_ASYNC, service, &gdt->sc_dvr);
1626        device_printf(gdt->sc_devnode, "%s\n", gdt->sc_dvr.event_string);
1627    }
1628
1629    return (0);
1630}
1631
1632int
1633gdt_sync_event(struct gdt_softc *gdt, int service,
1634               u_int8_t index, struct gdt_ccb *gccb)
1635{
1636    union ccb *ccb;
1637
1638    GDT_DPRINTF(GDT_D_INTR,
1639                ("gdt_sync_event(%p, %d, %d, %p)\n", gdt,service,index,gccb));
1640
1641    ccb = gccb->gc_ccb;
1642
1643    if (service == GDT_SCREENSERVICE) {
1644        u_int32_t msg_len;
1645
1646        msg_len = gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_LEN);
1647        if (msg_len)
1648            if (!(gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1649                  gccb->gc_scratch[GDT_SCR_MSG_EXT])) {
1650                gccb->gc_scratch[GDT_SCR_MSG_TEXT + msg_len] = '\0';
1651                printf("%s",&gccb->gc_scratch[GDT_SCR_MSG_TEXT]);
1652            }
1653
1654        if (gccb->gc_scratch[GDT_SCR_MSG_EXT] &&
1655            !gccb->gc_scratch[GDT_SCR_MSG_ANSWER]) {
1656            while (gdt->sc_test_busy(gdt))
1657                DELAY(1);
1658            bzero(gccb->gc_cmd, GDT_CMD_SZ);
1659            gccb = gdt_get_ccb(gdt);
1660            if (gccb == NULL) {
1661                device_printf(gdt->sc_devnode, "No free command index found\n");
1662                return (1);
1663            }
1664            gccb->gc_service = service;
1665            gccb->gc_flags = GDT_GCF_SCREEN;
1666            gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
1667                      gccb->gc_cmd_index);
1668            gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_READ);
1669            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1670                      gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1671            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1672                      gccb->gc_scratch_busbase);
1673            gdt->sc_set_sema0(gdt);
1674            gdt->sc_cmd_off = 0;
1675            gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1676                                      sizeof(u_int32_t));
1677            gdt->sc_cmd_cnt = 0;
1678            gdt->sc_copy_cmd(gdt, gccb);
1679            gdt->sc_release_event(gdt);
1680            return (0);
1681        }
1682
1683        if (gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1684            gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN)) {
1685            /* default answers (getchar() not possible) */
1686            if (gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) == 1) {
1687                gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN, 0);
1688                gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 1);
1689                gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 0;
1690            } else {
1691                gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN,
1692                          gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) - 2);
1693                gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 2);
1694                gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 1;
1695                gccb->gc_scratch[GDT_SCR_MSG_TEXT + 1] = 0;
1696            }
1697            gccb->gc_scratch[GDT_SCR_MSG_EXT] = 0;
1698            gccb->gc_scratch[GDT_SCR_MSG_ANSWER] = 0;
1699            while (gdt->sc_test_busy(gdt))
1700                DELAY(1);
1701            bzero(gccb->gc_cmd, GDT_CMD_SZ);
1702            gccb = gdt_get_ccb(gdt);
1703            if (gccb == NULL) {
1704                device_printf(gdt->sc_devnode, "No free command index found\n");
1705                return (1);
1706            }
1707            gccb->gc_service = service;
1708            gccb->gc_flags = GDT_GCF_SCREEN;
1709            gdt_enc32(gccb->gc_cmd + GDT_CMD_COMMANDINDEX,
1710                      gccb->gc_cmd_index);
1711            gdt_enc16(gccb->gc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
1712            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1713                      gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1714            gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1715                      gccb->gc_scratch_busbase);
1716            gdt->sc_set_sema0(gdt);
1717            gdt->sc_cmd_off = 0;
1718            gccb->gc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1719                                      sizeof(u_int32_t));
1720            gdt->sc_cmd_cnt = 0;
1721            gdt->sc_copy_cmd(gdt, gccb);
1722            gdt->sc_release_event(gdt);
1723            return (0);
1724        }
1725        printf("\n");
1726        return (0);
1727    } else {
1728	callout_stop(&gccb->gc_timeout);
1729        if (gdt->sc_status == GDT_S_BSY) {
1730            GDT_DPRINTF(GDT_D_DEBUG, ("gdt_sync_event(%p) gccb %p busy\n",
1731                                      gdt, gccb));
1732            TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1733            ++gdt_stat.req_queue_act;
1734            if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1735                gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1736            return (2);
1737        }
1738
1739        bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap,
1740            (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
1741            BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1742        bus_dmamap_unload(gdt->sc_buffer_dmat, gccb->gc_dmamap);
1743
1744        ccb->csio.resid = 0;
1745        if (gdt->sc_status == GDT_S_OK) {
1746            ccb->ccb_h.status |= CAM_REQ_CMP;
1747            ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1748        } else {
1749            /* error */
1750            if (gccb->gc_service == GDT_CACHESERVICE) {
1751                struct scsi_sense_data *sense;
1752
1753                ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
1754                ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1755                ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1756                bzero(&ccb->csio.sense_data, ccb->csio.sense_len);
1757                sense = &ccb->csio.sense_data;
1758                scsi_set_sense_data(sense,
1759                                    /*sense_format*/ SSD_TYPE_NONE,
1760                                    /*current_error*/ 1,
1761                                    /*sense_key*/ SSD_KEY_NOT_READY,
1762                                    /*asc*/ 0x4,
1763                                    /*ascq*/ 0x01,
1764                                    SSD_ELEM_NONE);
1765
1766                gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.sync);
1767                gdt->sc_dvr.eu.sync.ionode  = gdt->sc_hanum;
1768                gdt->sc_dvr.eu.sync.service = service;
1769                gdt->sc_dvr.eu.sync.status  = gdt->sc_status;
1770                gdt->sc_dvr.eu.sync.info    = gdt->sc_info;
1771                gdt->sc_dvr.eu.sync.hostdrive = ccb->ccb_h.target_id;
1772                if (gdt->sc_status >= 0x8000)
1773                    gdt_store_event(GDT_ES_SYNC, 0, &gdt->sc_dvr);
1774                else
1775                    gdt_store_event(GDT_ES_SYNC, service, &gdt->sc_dvr);
1776            } else {
1777                /* raw service */
1778                if (gdt->sc_status != GDT_S_RAW_SCSI || gdt->sc_info >= 0x100) {
1779                    ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1780                } else {
1781                    ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR|CAM_AUTOSNS_VALID;
1782                    ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1783                    ccb->csio.scsi_status = gdt->sc_info;
1784                    bcopy(gccb->gc_scratch, &ccb->csio.sense_data,
1785                          ccb->csio.sense_len);
1786                }
1787            }
1788        }
1789        --gdt_stat.io_count_act;
1790        xpt_done(ccb);
1791    }
1792    return (0);
1793}
1794
1795/* Controller event handling functions */
1796void gdt_store_event(u_int16_t source, u_int16_t idx,
1797                             gdt_evt_data *evt)
1798{
1799    gdt_evt_str *e;
1800    struct timeval tv;
1801
1802    GDT_DPRINTF(GDT_D_MISC, ("gdt_store_event(%d, %d)\n", source, idx));
1803    if (source == 0)                        /* no source -> no event */
1804        return;
1805
1806    mtx_lock(&elock);
1807    if (ebuffer[elastidx].event_source == source &&
1808        ebuffer[elastidx].event_idx == idx &&
1809        ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
1810          !memcmp((char *)&ebuffer[elastidx].event_data.eu,
1811                  (char *)&evt->eu, evt->size)) ||
1812         (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
1813          !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
1814                  (char *)&evt->event_string)))) {
1815        e = &ebuffer[elastidx];
1816        getmicrotime(&tv);
1817        e->last_stamp = tv.tv_sec;
1818        ++e->same_count;
1819    } else {
1820        if (ebuffer[elastidx].event_source != 0) {  /* entry not free ? */
1821            ++elastidx;
1822            if (elastidx == GDT_MAX_EVENTS)
1823                elastidx = 0;
1824            if (elastidx == eoldidx) {              /* reached mark ? */
1825                ++eoldidx;
1826                if (eoldidx == GDT_MAX_EVENTS)
1827                    eoldidx = 0;
1828            }
1829        }
1830        e = &ebuffer[elastidx];
1831        e->event_source = source;
1832        e->event_idx = idx;
1833        getmicrotime(&tv);
1834        e->first_stamp = e->last_stamp = tv.tv_sec;
1835        e->same_count = 1;
1836        e->event_data = *evt;
1837        e->application = 0;
1838    }
1839    mtx_unlock(&elock);
1840}
1841
1842int gdt_read_event(int handle, gdt_evt_str *estr)
1843{
1844    gdt_evt_str *e;
1845    int eindex;
1846
1847    GDT_DPRINTF(GDT_D_MISC, ("gdt_read_event(%d)\n", handle));
1848    mtx_lock(&elock);
1849    if (handle == -1)
1850        eindex = eoldidx;
1851    else
1852        eindex = handle;
1853    estr->event_source = 0;
1854
1855    if (eindex >= GDT_MAX_EVENTS) {
1856	mtx_unlock(&elock);
1857        return eindex;
1858    }
1859    e = &ebuffer[eindex];
1860    if (e->event_source != 0) {
1861        if (eindex != elastidx) {
1862            if (++eindex == GDT_MAX_EVENTS)
1863                eindex = 0;
1864        } else {
1865            eindex = -1;
1866        }
1867        memcpy(estr, e, sizeof(gdt_evt_str));
1868    }
1869    mtx_unlock(&elock);
1870    return eindex;
1871}
1872
1873void gdt_readapp_event(u_int8_t application, gdt_evt_str *estr)
1874{
1875    gdt_evt_str *e;
1876    int found = FALSE;
1877    int eindex;
1878
1879    GDT_DPRINTF(GDT_D_MISC, ("gdt_readapp_event(%d)\n", application));
1880    mtx_lock(&elock);
1881    eindex = eoldidx;
1882    for (;;) {
1883        e = &ebuffer[eindex];
1884        if (e->event_source == 0)
1885            break;
1886        if ((e->application & application) == 0) {
1887            e->application |= application;
1888            found = TRUE;
1889            break;
1890        }
1891        if (eindex == elastidx)
1892            break;
1893        if (++eindex == GDT_MAX_EVENTS)
1894            eindex = 0;
1895    }
1896    if (found)
1897        memcpy(estr, e, sizeof(gdt_evt_str));
1898    else
1899        estr->event_source = 0;
1900    mtx_unlock(&elock);
1901}
1902
1903void gdt_clear_events()
1904{
1905    GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events\n"));
1906
1907    mtx_lock(&elock);
1908    eoldidx = elastidx = 0;
1909    ebuffer[0].event_source = 0;
1910    mtx_unlock(&elock);
1911}
1912