Deleted Added
full compact
iir.c (113350) iir.c (114001)
1/* $FreeBSD: head/sys/dev/iir/iir.c 113350 2003-04-10 23:50:06Z mux $ */
1/* $FreeBSD: head/sys/dev/iir/iir.c 114001 2003-04-25 05:37:04Z scottl $ */
2/*
2/*
3 * Copyright (c) 2000-01 Intel Corporation
3 * Copyright (c) 2000-03 Intel Corporation
4 * All Rights Reserved
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification, immediately at the beginning of the file.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31/*
32 * iir.c: SCSI dependant code for the Intel Integrated RAID Controller driver
33 *
34 * Written by: Achim Leubner <achim.leubner@intel.com>
35 * Fixes/Additions: Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com>
36 *
37 * credits: Niklas Hallqvist; OpenBSD driver for the ICP Controllers.
38 * Mike Smith; Some driver source code.
39 * FreeBSD.ORG; Great O/S to work on and for.
40 *
41 * TODO:
42 */
43
4 * All Rights Reserved
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification, immediately at the beginning of the file.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31/*
32 * iir.c: SCSI dependant code for the Intel Integrated RAID Controller driver
33 *
34 * Written by: Achim Leubner <achim.leubner@intel.com>
35 * Fixes/Additions: Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com>
36 *
37 * credits: Niklas Hallqvist; OpenBSD driver for the ICP Controllers.
38 * Mike Smith; Some driver source code.
39 * FreeBSD.ORG; Great O/S to work on and for.
40 *
41 * TODO:
42 */
43
44#ident "$Id: iir.c 1.2 2001/06/21 20:28:32 achim Exp $"
44#ident "$Id: iir.c 1.3 2003/03/21 16:28:32 achim Exp $"
45
46#define _IIR_C_
47
48/* #include "opt_iir.h" */
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/endian.h>
52#include <sys/eventhandler.h>
53#include <sys/malloc.h>
54#include <sys/kernel.h>
55#include <sys/bus.h>
56
57#include <machine/bus_memio.h>
58#include <machine/bus_pio.h>
59#include <machine/bus.h>
60#include <machine/clock.h>
61#include <machine/stdarg.h>
62
63#include <cam/cam.h>
64#include <cam/cam_ccb.h>
65#include <cam/cam_sim.h>
66#include <cam/cam_xpt_sim.h>
67#include <cam/cam_debug.h>
68#include <cam/scsi/scsi_all.h>
69#include <cam/scsi/scsi_message.h>
70
71#include <vm/vm.h>
72#include <vm/pmap.h>
73
74#include <dev/iir/iir.h>
75
76struct gdt_softc *gdt_wait_gdt;
77int gdt_wait_index;
78
79#ifdef GDT_DEBUG
80int gdt_debug = GDT_DEBUG;
81#ifdef __SERIAL__
82#define MAX_SERBUF 160
83static void ser_init(void);
84static void ser_puts(char *str);
85static void ser_putc(int c);
86static char strbuf[MAX_SERBUF+1];
87#ifdef __COM2__
88#define COM_BASE 0x2f8
89#else
90#define COM_BASE 0x3f8
91#endif
92static void ser_init()
93{
94 unsigned port=COM_BASE;
95
96 outb(port+3, 0x80);
97 outb(port+1, 0);
98 /* 19200 Baud, if 9600: outb(12,port) */
99 outb(port, 6);
100 outb(port+3, 3);
101 outb(port+1, 0);
102}
103
104static void ser_puts(char *str)
105{
106 char *ptr;
107
108 ser_init();
109 for (ptr=str;*ptr;++ptr)
110 ser_putc((int)(*ptr));
111}
112
113static void ser_putc(int c)
114{
115 unsigned port=COM_BASE;
116
117 while ((inb(port+5) & 0x20)==0);
118 outb(port, c);
119 if (c==0x0a)
120 {
121 while ((inb(port+5) & 0x20)==0);
122 outb(port, 0x0d);
123 }
124}
125
126int ser_printf(const char *fmt, ...)
127{
128 va_list args;
129 int i;
130
131 va_start(args,fmt);
132 i = vsprintf(strbuf,fmt,args);
133 ser_puts(strbuf);
134 va_end(args);
135 return i;
136}
137#endif
138#endif
139
140/* The linked list of softc structures */
141struct gdt_softc_list gdt_softcs = TAILQ_HEAD_INITIALIZER(gdt_softcs);
142/* controller cnt. */
143int gdt_cnt = 0;
144/* event buffer */
145static gdt_evt_str ebuffer[GDT_MAX_EVENTS];
146static int elastidx, eoldidx;
147/* statistics */
148gdt_statist_t gdt_stat;
149
150/* Definitions for our use of the SIM private CCB area */
151#define ccb_sim_ptr spriv_ptr0
152#define ccb_priority spriv_field1
153
154static void iir_action(struct cam_sim *sim, union ccb *ccb);
155static void iir_poll(struct cam_sim *sim);
156static void iir_shutdown(void *arg, int howto);
157static void iir_timeout(void *arg);
158static void iir_watchdog(void *arg);
159
160static void gdt_eval_mapping(u_int32_t size, int *cyls, int *heads,
161 int *secs);
162static int gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
163 u_int8_t service, u_int16_t opcode,
164 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3);
165static int gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *ccb,
166 int timeout);
167
168static struct gdt_ccb *gdt_get_ccb(struct gdt_softc *gdt);
169static u_int32_t gdt_ccb_vtop(struct gdt_softc *gdt,
170 struct gdt_ccb *gccb);
171
172static int gdt_sync_event(struct gdt_softc *gdt, int service,
173 u_int8_t index, struct gdt_ccb *gccb);
174static int gdt_async_event(struct gdt_softc *gdt, int service);
175static struct gdt_ccb *gdt_raw_cmd(struct gdt_softc *gdt,
176 union ccb *ccb, int *lock);
177static struct gdt_ccb *gdt_cache_cmd(struct gdt_softc *gdt,
178 union ccb *ccb, int *lock);
179static struct gdt_ccb *gdt_ioctl_cmd(struct gdt_softc *gdt,
180 gdt_ucmd_t *ucmd, int *lock);
181static void gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb);
182
183static void gdtmapmem(void *arg, bus_dma_segment_t *dm_segs,
184 int nseg, int error);
185static void gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs,
186 int nseg, int error);
187
188int
189iir_init(struct gdt_softc *gdt)
190{
191 u_int16_t cdev_cnt;
192 int i, id, drv_cyls, drv_hds, drv_secs;
193 struct gdt_ccb *gccb;
194
195 GDT_DPRINTF(GDT_D_DEBUG, ("iir_init()\n"));
196
197 gdt->sc_state = GDT_POLLING;
198 gdt_clear_events();
199 bzero(&gdt_stat, sizeof(gdt_statist_t));
200
201 SLIST_INIT(&gdt->sc_free_gccb);
202 SLIST_INIT(&gdt->sc_pending_gccb);
203 TAILQ_INIT(&gdt->sc_ccb_queue);
204 TAILQ_INIT(&gdt->sc_ucmd_queue);
205 TAILQ_INSERT_TAIL(&gdt_softcs, gdt, links);
206
207 /* DMA tag for mapping buffers into device visible space. */
208 if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0,
45
46#define _IIR_C_
47
48/* #include "opt_iir.h" */
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/endian.h>
52#include <sys/eventhandler.h>
53#include <sys/malloc.h>
54#include <sys/kernel.h>
55#include <sys/bus.h>
56
57#include <machine/bus_memio.h>
58#include <machine/bus_pio.h>
59#include <machine/bus.h>
60#include <machine/clock.h>
61#include <machine/stdarg.h>
62
63#include <cam/cam.h>
64#include <cam/cam_ccb.h>
65#include <cam/cam_sim.h>
66#include <cam/cam_xpt_sim.h>
67#include <cam/cam_debug.h>
68#include <cam/scsi/scsi_all.h>
69#include <cam/scsi/scsi_message.h>
70
71#include <vm/vm.h>
72#include <vm/pmap.h>
73
74#include <dev/iir/iir.h>
75
76struct gdt_softc *gdt_wait_gdt;
77int gdt_wait_index;
78
79#ifdef GDT_DEBUG
80int gdt_debug = GDT_DEBUG;
81#ifdef __SERIAL__
82#define MAX_SERBUF 160
83static void ser_init(void);
84static void ser_puts(char *str);
85static void ser_putc(int c);
86static char strbuf[MAX_SERBUF+1];
87#ifdef __COM2__
88#define COM_BASE 0x2f8
89#else
90#define COM_BASE 0x3f8
91#endif
92static void ser_init()
93{
94 unsigned port=COM_BASE;
95
96 outb(port+3, 0x80);
97 outb(port+1, 0);
98 /* 19200 Baud, if 9600: outb(12,port) */
99 outb(port, 6);
100 outb(port+3, 3);
101 outb(port+1, 0);
102}
103
104static void ser_puts(char *str)
105{
106 char *ptr;
107
108 ser_init();
109 for (ptr=str;*ptr;++ptr)
110 ser_putc((int)(*ptr));
111}
112
113static void ser_putc(int c)
114{
115 unsigned port=COM_BASE;
116
117 while ((inb(port+5) & 0x20)==0);
118 outb(port, c);
119 if (c==0x0a)
120 {
121 while ((inb(port+5) & 0x20)==0);
122 outb(port, 0x0d);
123 }
124}
125
126int ser_printf(const char *fmt, ...)
127{
128 va_list args;
129 int i;
130
131 va_start(args,fmt);
132 i = vsprintf(strbuf,fmt,args);
133 ser_puts(strbuf);
134 va_end(args);
135 return i;
136}
137#endif
138#endif
139
140/* The linked list of softc structures */
141struct gdt_softc_list gdt_softcs = TAILQ_HEAD_INITIALIZER(gdt_softcs);
142/* controller cnt. */
143int gdt_cnt = 0;
144/* event buffer */
145static gdt_evt_str ebuffer[GDT_MAX_EVENTS];
146static int elastidx, eoldidx;
147/* statistics */
148gdt_statist_t gdt_stat;
149
150/* Definitions for our use of the SIM private CCB area */
151#define ccb_sim_ptr spriv_ptr0
152#define ccb_priority spriv_field1
153
154static void iir_action(struct cam_sim *sim, union ccb *ccb);
155static void iir_poll(struct cam_sim *sim);
156static void iir_shutdown(void *arg, int howto);
157static void iir_timeout(void *arg);
158static void iir_watchdog(void *arg);
159
160static void gdt_eval_mapping(u_int32_t size, int *cyls, int *heads,
161 int *secs);
162static int gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
163 u_int8_t service, u_int16_t opcode,
164 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3);
165static int gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *ccb,
166 int timeout);
167
168static struct gdt_ccb *gdt_get_ccb(struct gdt_softc *gdt);
169static u_int32_t gdt_ccb_vtop(struct gdt_softc *gdt,
170 struct gdt_ccb *gccb);
171
172static int gdt_sync_event(struct gdt_softc *gdt, int service,
173 u_int8_t index, struct gdt_ccb *gccb);
174static int gdt_async_event(struct gdt_softc *gdt, int service);
175static struct gdt_ccb *gdt_raw_cmd(struct gdt_softc *gdt,
176 union ccb *ccb, int *lock);
177static struct gdt_ccb *gdt_cache_cmd(struct gdt_softc *gdt,
178 union ccb *ccb, int *lock);
179static struct gdt_ccb *gdt_ioctl_cmd(struct gdt_softc *gdt,
180 gdt_ucmd_t *ucmd, int *lock);
181static void gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb);
182
183static void gdtmapmem(void *arg, bus_dma_segment_t *dm_segs,
184 int nseg, int error);
185static void gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs,
186 int nseg, int error);
187
188int
189iir_init(struct gdt_softc *gdt)
190{
191 u_int16_t cdev_cnt;
192 int i, id, drv_cyls, drv_hds, drv_secs;
193 struct gdt_ccb *gccb;
194
195 GDT_DPRINTF(GDT_D_DEBUG, ("iir_init()\n"));
196
197 gdt->sc_state = GDT_POLLING;
198 gdt_clear_events();
199 bzero(&gdt_stat, sizeof(gdt_statist_t));
200
201 SLIST_INIT(&gdt->sc_free_gccb);
202 SLIST_INIT(&gdt->sc_pending_gccb);
203 TAILQ_INIT(&gdt->sc_ccb_queue);
204 TAILQ_INIT(&gdt->sc_ucmd_queue);
205 TAILQ_INSERT_TAIL(&gdt_softcs, gdt, links);
206
207 /* DMA tag for mapping buffers into device visible space. */
208 if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0,
209 /*lowaddr*/BUS_SPACE_MAXADDR,
209 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
210 /*highaddr*/BUS_SPACE_MAXADDR,
211 /*filter*/NULL, /*filterarg*/NULL,
212 /*maxsize*/MAXBSIZE, /*nsegments*/GDT_MAXSG,
213 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
214 /*flags*/BUS_DMA_ALLOCNOW,
215 &gdt->sc_buffer_dmat) != 0) {
216 printf("iir%d: bus_dma_tag_create(...,gdt->sc_buffer_dmat) failed\n",
217 gdt->sc_hanum);
218 return (1);
219 }
220 gdt->sc_init_level++;
221
222 /* DMA tag for our ccb structures */
210 /*highaddr*/BUS_SPACE_MAXADDR,
211 /*filter*/NULL, /*filterarg*/NULL,
212 /*maxsize*/MAXBSIZE, /*nsegments*/GDT_MAXSG,
213 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
214 /*flags*/BUS_DMA_ALLOCNOW,
215 &gdt->sc_buffer_dmat) != 0) {
216 printf("iir%d: bus_dma_tag_create(...,gdt->sc_buffer_dmat) failed\n",
217 gdt->sc_hanum);
218 return (1);
219 }
220 gdt->sc_init_level++;
221
222 /* DMA tag for our ccb structures */
223 if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0,
224 /*lowaddr*/BUS_SPACE_MAXADDR,
223 if (bus_dma_tag_create(gdt->sc_parent_dmat,
224 /*alignment*/1,
225 /*boundary*/0,
226 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
225 /*highaddr*/BUS_SPACE_MAXADDR,
227 /*highaddr*/BUS_SPACE_MAXADDR,
226 /*filter*/NULL, /*filterarg*/NULL,
227 GDT_MAXCMDS * sizeof(struct gdt_ccb),
228 /*filter*/NULL,
229 /*filterarg*/NULL,
230 GDT_MAXCMDS * sizeof(struct gdt_ccb), /* maxsize */
228 /*nsegments*/1,
229 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
230 /*flags*/0, &gdt->sc_gccb_dmat) != 0) {
231 printf("iir%d: bus_dma_tag_create(...,gdt->sc_gccb_dmat) failed\n",
232 gdt->sc_hanum);
233 return (1);
234 }
235 gdt->sc_init_level++;
236
237 /* Allocation for our ccbs */
238 if (bus_dmamem_alloc(gdt->sc_gccb_dmat, (void **)&gdt->sc_gccbs,
239 BUS_DMA_NOWAIT, &gdt->sc_gccb_dmamap) != 0) {
240 printf("iir%d: bus_dmamem_alloc(...,&gdt->sc_gccbs,...) failed\n",
241 gdt->sc_hanum);
242 return (1);
243 }
244 gdt->sc_init_level++;
245
246 /* And permanently map them */
247 bus_dmamap_load(gdt->sc_gccb_dmat, gdt->sc_gccb_dmamap,
248 gdt->sc_gccbs, GDT_MAXCMDS * sizeof(struct gdt_ccb),
249 gdtmapmem, &gdt->sc_gccb_busbase, /*flags*/0);
250 gdt->sc_init_level++;
251
252 /* Clear them out. */
253 bzero(gdt->sc_gccbs, GDT_MAXCMDS * sizeof(struct gdt_ccb));
254
255 /* Initialize the ccbs */
256 for (i = GDT_MAXCMDS-1; i >= 0; i--) {
257 gdt->sc_gccbs[i].gc_cmd_index = i + 2;
258 gdt->sc_gccbs[i].gc_flags = GDT_GCF_UNUSED;
259 gdt->sc_gccbs[i].gc_map_flag = FALSE;
260 if (bus_dmamap_create(gdt->sc_buffer_dmat, /*flags*/0,
261 &gdt->sc_gccbs[i].gc_dmamap) != 0)
262 return(1);
263 gdt->sc_gccbs[i].gc_map_flag = TRUE;
264 SLIST_INSERT_HEAD(&gdt->sc_free_gccb, &gdt->sc_gccbs[i], sle);
265 }
266 gdt->sc_init_level++;
267
268 /* create the control device */
269 gdt->sc_dev = gdt_make_dev(gdt->sc_hanum);
270
271 /* allocate ccb for gdt_internal_cmd() */
272 gccb = gdt_get_ccb(gdt);
273 if (gccb == NULL) {
274 printf("iir%d: No free command index found\n",
275 gdt->sc_hanum);
276 return (1);
277 }
278
279 if (!gdt_internal_cmd(gdt, gccb, GDT_SCREENSERVICE, GDT_INIT,
280 0, 0, 0)) {
281 printf("iir%d: Screen service initialization error %d\n",
282 gdt->sc_hanum, gdt->sc_status);
283 gdt_free_ccb(gdt, gccb);
284 return (1);
285 }
286
287 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
288 GDT_LINUX_OS, 0, 0)) {
289 printf("iir%d: Cache service initialization error %d\n",
290 gdt->sc_hanum, gdt->sc_status);
291 gdt_free_ccb(gdt, gccb);
292 return (1);
293 }
294 gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_UNFREEZE_IO,
295 0, 0, 0);
296
297 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_MOUNT,
298 0xffff, 1, 0)) {
299 printf("iir%d: Cache service mount error %d\n",
300 gdt->sc_hanum, gdt->sc_status);
301 gdt_free_ccb(gdt, gccb);
302 return (1);
303 }
304
305 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
306 GDT_LINUX_OS, 0, 0)) {
307 printf("iir%d: Cache service post-mount initialization error %d\n",
308 gdt->sc_hanum, gdt->sc_status);
309 gdt_free_ccb(gdt, gccb);
310 return (1);
311 }
312 cdev_cnt = (u_int16_t)gdt->sc_info;
313 gdt->sc_fw_vers = gdt->sc_service;
314
315 /* Detect number of buses */
316 gdt_enc32(gccb->gc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
317 gccb->gc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
318 gccb->gc_scratch[GDT_IOC_FIRST_CHAN] = 0;
319 gccb->gc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
320 gdt_enc32(gccb->gc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
321 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
322 GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
323 GDT_IOC_HDR_SZ + GDT_MAXBUS * GDT_RAWIOC_SZ)) {
324 gdt->sc_bus_cnt = gccb->gc_scratch[GDT_IOC_CHAN_COUNT];
325 for (i = 0; i < gdt->sc_bus_cnt; i++) {
326 id = gccb->gc_scratch[GDT_IOC_HDR_SZ +
327 i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
328 gdt->sc_bus_id[i] = id < GDT_MAXID_FC ? id : 0xff;
329 }
330 } else {
331 /* New method failed, use fallback. */
332 for (i = 0; i < GDT_MAXBUS; i++) {
333 gdt_enc32(gccb->gc_scratch + GDT_GETCH_CHANNEL_NO, i);
334 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
335 GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
336 GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
337 GDT_GETCH_SZ)) {
338 if (i == 0) {
339 printf("iir%d: Cannot get channel count, "
340 "error %d\n", gdt->sc_hanum, gdt->sc_status);
341 gdt_free_ccb(gdt, gccb);
342 return (1);
343 }
344 break;
345 }
346 gdt->sc_bus_id[i] =
347 (gccb->gc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID_FC) ?
348 gccb->gc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
349 }
350 gdt->sc_bus_cnt = i;
351 }
352 /* add one "virtual" channel for the host drives */
353 gdt->sc_virt_bus = gdt->sc_bus_cnt;
354 gdt->sc_bus_cnt++;
355
356 if (!gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_INIT,
357 0, 0, 0)) {
358 printf("iir%d: Raw service initialization error %d\n",
359 gdt->sc_hanum, gdt->sc_status);
360 gdt_free_ccb(gdt, gccb);
361 return (1);
362 }
363
364 /* Set/get features raw service (scatter/gather) */
365 gdt->sc_raw_feat = 0;
366 if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
367 GDT_SCATTER_GATHER, 0, 0)) {
368 if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_GET_FEAT,
369 0, 0, 0)) {
370 gdt->sc_raw_feat = gdt->sc_info;
371 if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
372 panic("iir%d: Scatter/Gather Raw Service "
373 "required but not supported!\n", gdt->sc_hanum);
374 gdt_free_ccb(gdt, gccb);
375 return (1);
376 }
377 }
378 }
379
380 /* Set/get features cache service (scatter/gather) */
381 gdt->sc_cache_feat = 0;
382 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_SET_FEAT,
383 0, GDT_SCATTER_GATHER, 0)) {
384 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_GET_FEAT,
385 0, 0, 0)) {
386 gdt->sc_cache_feat = gdt->sc_info;
387 if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
388 panic("iir%d: Scatter/Gather Cache Service "
389 "required but not supported!\n", gdt->sc_hanum);
390 gdt_free_ccb(gdt, gccb);
391 return (1);
392 }
393 }
394 }
395
231 /*nsegments*/1,
232 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
233 /*flags*/0, &gdt->sc_gccb_dmat) != 0) {
234 printf("iir%d: bus_dma_tag_create(...,gdt->sc_gccb_dmat) failed\n",
235 gdt->sc_hanum);
236 return (1);
237 }
238 gdt->sc_init_level++;
239
240 /* Allocation for our ccbs */
241 if (bus_dmamem_alloc(gdt->sc_gccb_dmat, (void **)&gdt->sc_gccbs,
242 BUS_DMA_NOWAIT, &gdt->sc_gccb_dmamap) != 0) {
243 printf("iir%d: bus_dmamem_alloc(...,&gdt->sc_gccbs,...) failed\n",
244 gdt->sc_hanum);
245 return (1);
246 }
247 gdt->sc_init_level++;
248
249 /* And permanently map them */
250 bus_dmamap_load(gdt->sc_gccb_dmat, gdt->sc_gccb_dmamap,
251 gdt->sc_gccbs, GDT_MAXCMDS * sizeof(struct gdt_ccb),
252 gdtmapmem, &gdt->sc_gccb_busbase, /*flags*/0);
253 gdt->sc_init_level++;
254
255 /* Clear them out. */
256 bzero(gdt->sc_gccbs, GDT_MAXCMDS * sizeof(struct gdt_ccb));
257
258 /* Initialize the ccbs */
259 for (i = GDT_MAXCMDS-1; i >= 0; i--) {
260 gdt->sc_gccbs[i].gc_cmd_index = i + 2;
261 gdt->sc_gccbs[i].gc_flags = GDT_GCF_UNUSED;
262 gdt->sc_gccbs[i].gc_map_flag = FALSE;
263 if (bus_dmamap_create(gdt->sc_buffer_dmat, /*flags*/0,
264 &gdt->sc_gccbs[i].gc_dmamap) != 0)
265 return(1);
266 gdt->sc_gccbs[i].gc_map_flag = TRUE;
267 SLIST_INSERT_HEAD(&gdt->sc_free_gccb, &gdt->sc_gccbs[i], sle);
268 }
269 gdt->sc_init_level++;
270
271 /* create the control device */
272 gdt->sc_dev = gdt_make_dev(gdt->sc_hanum);
273
274 /* allocate ccb for gdt_internal_cmd() */
275 gccb = gdt_get_ccb(gdt);
276 if (gccb == NULL) {
277 printf("iir%d: No free command index found\n",
278 gdt->sc_hanum);
279 return (1);
280 }
281
282 if (!gdt_internal_cmd(gdt, gccb, GDT_SCREENSERVICE, GDT_INIT,
283 0, 0, 0)) {
284 printf("iir%d: Screen service initialization error %d\n",
285 gdt->sc_hanum, gdt->sc_status);
286 gdt_free_ccb(gdt, gccb);
287 return (1);
288 }
289
290 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
291 GDT_LINUX_OS, 0, 0)) {
292 printf("iir%d: Cache service initialization error %d\n",
293 gdt->sc_hanum, gdt->sc_status);
294 gdt_free_ccb(gdt, gccb);
295 return (1);
296 }
297 gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_UNFREEZE_IO,
298 0, 0, 0);
299
300 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_MOUNT,
301 0xffff, 1, 0)) {
302 printf("iir%d: Cache service mount error %d\n",
303 gdt->sc_hanum, gdt->sc_status);
304 gdt_free_ccb(gdt, gccb);
305 return (1);
306 }
307
308 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
309 GDT_LINUX_OS, 0, 0)) {
310 printf("iir%d: Cache service post-mount initialization error %d\n",
311 gdt->sc_hanum, gdt->sc_status);
312 gdt_free_ccb(gdt, gccb);
313 return (1);
314 }
315 cdev_cnt = (u_int16_t)gdt->sc_info;
316 gdt->sc_fw_vers = gdt->sc_service;
317
318 /* Detect number of buses */
319 gdt_enc32(gccb->gc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
320 gccb->gc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
321 gccb->gc_scratch[GDT_IOC_FIRST_CHAN] = 0;
322 gccb->gc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
323 gdt_enc32(gccb->gc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
324 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
325 GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
326 GDT_IOC_HDR_SZ + GDT_MAXBUS * GDT_RAWIOC_SZ)) {
327 gdt->sc_bus_cnt = gccb->gc_scratch[GDT_IOC_CHAN_COUNT];
328 for (i = 0; i < gdt->sc_bus_cnt; i++) {
329 id = gccb->gc_scratch[GDT_IOC_HDR_SZ +
330 i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
331 gdt->sc_bus_id[i] = id < GDT_MAXID_FC ? id : 0xff;
332 }
333 } else {
334 /* New method failed, use fallback. */
335 for (i = 0; i < GDT_MAXBUS; i++) {
336 gdt_enc32(gccb->gc_scratch + GDT_GETCH_CHANNEL_NO, i);
337 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
338 GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
339 GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
340 GDT_GETCH_SZ)) {
341 if (i == 0) {
342 printf("iir%d: Cannot get channel count, "
343 "error %d\n", gdt->sc_hanum, gdt->sc_status);
344 gdt_free_ccb(gdt, gccb);
345 return (1);
346 }
347 break;
348 }
349 gdt->sc_bus_id[i] =
350 (gccb->gc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID_FC) ?
351 gccb->gc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
352 }
353 gdt->sc_bus_cnt = i;
354 }
355 /* add one "virtual" channel for the host drives */
356 gdt->sc_virt_bus = gdt->sc_bus_cnt;
357 gdt->sc_bus_cnt++;
358
359 if (!gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_INIT,
360 0, 0, 0)) {
361 printf("iir%d: Raw service initialization error %d\n",
362 gdt->sc_hanum, gdt->sc_status);
363 gdt_free_ccb(gdt, gccb);
364 return (1);
365 }
366
367 /* Set/get features raw service (scatter/gather) */
368 gdt->sc_raw_feat = 0;
369 if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
370 GDT_SCATTER_GATHER, 0, 0)) {
371 if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_GET_FEAT,
372 0, 0, 0)) {
373 gdt->sc_raw_feat = gdt->sc_info;
374 if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
375 panic("iir%d: Scatter/Gather Raw Service "
376 "required but not supported!\n", gdt->sc_hanum);
377 gdt_free_ccb(gdt, gccb);
378 return (1);
379 }
380 }
381 }
382
383 /* Set/get features cache service (scatter/gather) */
384 gdt->sc_cache_feat = 0;
385 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_SET_FEAT,
386 0, GDT_SCATTER_GATHER, 0)) {
387 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_GET_FEAT,
388 0, 0, 0)) {
389 gdt->sc_cache_feat = gdt->sc_info;
390 if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
391 panic("iir%d: Scatter/Gather Cache Service "
392 "required but not supported!\n", gdt->sc_hanum);
393 gdt_free_ccb(gdt, gccb);
394 return (1);
395 }
396 }
397 }
398
399 /* OEM */
400 gdt_enc32(gccb->gc_scratch + GDT_OEM_VERSION, 0x01);
401 gdt_enc32(gccb->gc_scratch + GDT_OEM_BUFSIZE, sizeof(gdt_oem_record_t));
402 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
403 GDT_OEM_STR_RECORD, GDT_INVALID_CHANNEL,
404 sizeof(gdt_oem_str_record_t))) {
405 strncpy(gdt->oem_name, ((gdt_oem_str_record_t *)
406 gccb->gc_scratch)->text.scsi_host_drive_inquiry_vendor_id, 7);
407 gdt->oem_name[7]='\0';
408 } else {
409 /* Old method, based on PCI ID */
410 if (gdt->sc_vendor == INTEL_VENDOR_ID)
411 strcpy(gdt->oem_name,"Intel ");
412 else
413 strcpy(gdt->oem_name,"ICP ");
414 }
415
396 /* Scan for cache devices */
397 for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++) {
398 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INFO,
399 i, 0, 0)) {
400 gdt->sc_hdr[i].hd_present = 1;
401 gdt->sc_hdr[i].hd_size = gdt->sc_info;
402
403 /*
404 * Evaluate mapping (sectors per head, heads per cyl)
405 */
406 gdt->sc_hdr[i].hd_size &= ~GDT_SECS32;
407 if (gdt->sc_info2 == 0)
408 gdt_eval_mapping(gdt->sc_hdr[i].hd_size,
409 &drv_cyls, &drv_hds, &drv_secs);
410 else {
411 drv_hds = gdt->sc_info2 & 0xff;
412 drv_secs = (gdt->sc_info2 >> 8) & 0xff;
413 drv_cyls = gdt->sc_hdr[i].hd_size / drv_hds /
414 drv_secs;
415 }
416 gdt->sc_hdr[i].hd_heads = drv_hds;
417 gdt->sc_hdr[i].hd_secs = drv_secs;
418 /* Round the size */
419 gdt->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
420
421 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE,
422 GDT_DEVTYPE, i, 0, 0))
423 gdt->sc_hdr[i].hd_devtype = gdt->sc_info;
424 }
425 }
426
427 GDT_DPRINTF(GDT_D_INIT, ("dpmem %x %d-bus %d cache device%s\n",
428 gdt->sc_dpmembase,
429 gdt->sc_bus_cnt, cdev_cnt,
430 cdev_cnt == 1 ? "" : "s"));
431 gdt_free_ccb(gdt, gccb);
432
433 gdt_cnt++;
434 return (0);
435}
436
437void
438iir_free(struct gdt_softc *gdt)
439{
440 int i;
441
442 GDT_DPRINTF(GDT_D_INIT, ("iir_free()\n"));
443
444 switch (gdt->sc_init_level) {
445 default:
446 gdt_destroy_dev(gdt->sc_dev);
447 case 5:
448 for (i = GDT_MAXCMDS-1; i >= 0; i--)
449 if (gdt->sc_gccbs[i].gc_map_flag)
450 bus_dmamap_destroy(gdt->sc_buffer_dmat,
451 gdt->sc_gccbs[i].gc_dmamap);
452 bus_dmamap_unload(gdt->sc_gccb_dmat, gdt->sc_gccb_dmamap);
453 case 4:
454 bus_dmamem_free(gdt->sc_gccb_dmat, gdt->sc_gccbs, gdt->sc_gccb_dmamap);
455 case 3:
456 bus_dma_tag_destroy(gdt->sc_gccb_dmat);
457 case 2:
458 bus_dma_tag_destroy(gdt->sc_buffer_dmat);
459 case 1:
460 bus_dma_tag_destroy(gdt->sc_parent_dmat);
461 case 0:
462 break;
463 }
464 TAILQ_REMOVE(&gdt_softcs, gdt, links);
465}
466
467void
468iir_attach(struct gdt_softc *gdt)
469{
470 struct cam_devq *devq;
471 int i;
472
473 GDT_DPRINTF(GDT_D_INIT, ("iir_attach()\n"));
474
475 /*
476 * Create the device queue for our SIM.
477 */
478 devq = cam_simq_alloc(GDT_MAXCMDS);
479 if (devq == NULL)
480 return;
481
482 for (i = 0; i < gdt->sc_bus_cnt; i++) {
483 /*
484 * Construct our SIM entry
485 */
486 gdt->sims[i] = cam_sim_alloc(iir_action, iir_poll, "iir",
487 gdt, gdt->sc_hanum, /*untagged*/2,
488 /*tagged*/GDT_MAXCMDS, devq);
489 if (xpt_bus_register(gdt->sims[i], i) != CAM_SUCCESS) {
490 cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
491 break;
492 }
493
494 if (xpt_create_path(&gdt->paths[i], /*periph*/NULL,
495 cam_sim_path(gdt->sims[i]),
496 CAM_TARGET_WILDCARD,
497 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
498 xpt_bus_deregister(cam_sim_path(gdt->sims[i]));
499 cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
500 break;
501 }
502 }
503 if (i > 0)
504 EVENTHANDLER_REGISTER(shutdown_final, iir_shutdown,
505 gdt, SHUTDOWN_PRI_DEFAULT);
506 /* iir_watchdog(gdt); */
507 gdt->sc_state = GDT_NORMAL;
508}
509
510static void
511gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs)
512{
513 *cyls = size / GDT_HEADS / GDT_SECS;
514 if (*cyls < GDT_MAXCYLS) {
515 *heads = GDT_HEADS;
516 *secs = GDT_SECS;
517 } else {
518 /* Too high for 64 * 32 */
519 *cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
520 if (*cyls < GDT_MAXCYLS) {
521 *heads = GDT_MEDHEADS;
522 *secs = GDT_MEDSECS;
523 } else {
524 /* Too high for 127 * 63 */
525 *cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
526 *heads = GDT_BIGHEADS;
527 *secs = GDT_BIGSECS;
528 }
529 }
530}
531
532static int
533gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *gccb,
534 int timeout)
535{
536 int rv = 0;
537
538 GDT_DPRINTF(GDT_D_INIT,
539 ("gdt_wait(%p, %p, %d)\n", gdt, gccb, timeout));
540
541 gdt->sc_state |= GDT_POLL_WAIT;
542 do {
543 iir_intr(gdt);
544 if (gdt == gdt_wait_gdt &&
545 gccb->gc_cmd_index == gdt_wait_index) {
546 rv = 1;
547 break;
548 }
549 DELAY(1);
550 } while (--timeout);
551 gdt->sc_state &= ~GDT_POLL_WAIT;
552
553 while (gdt->sc_test_busy(gdt))
554 DELAY(1); /* XXX correct? */
555
556 return (rv);
557}
558
559static int
560gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
561 u_int8_t service, u_int16_t opcode,
562 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
563{
564 int retries;
565
566 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d)\n",
567 gdt, service, opcode, arg1, arg2, arg3));
568
569 bzero(gdt->sc_cmd, GDT_CMD_SZ);
570
571 for (retries = GDT_RETRIES; ; ) {
572 gccb->gc_service = service;
573 gccb->gc_flags = GDT_GCF_INTERNAL;
574
575 gdt->sc_set_sema0(gdt);
576 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
577 gccb->gc_cmd_index);
578 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode);
579
580 switch (service) {
581 case GDT_CACHESERVICE:
582 if (opcode == GDT_IOCTL) {
583 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
584 GDT_IOCTL_SUBFUNC, arg1);
585 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
586 GDT_IOCTL_CHANNEL, arg2);
587 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
588 GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
589 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
590 gdt_ccb_vtop(gdt, gccb) +
591 offsetof(struct gdt_ccb, gc_scratch[0]));
592 } else {
593 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
594 GDT_CACHE_DEVICENO, (u_int16_t)arg1);
595 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
596 GDT_CACHE_BLOCKNO, arg2);
597 }
598 break;
599
600 case GDT_SCSIRAWSERVICE:
601 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
602 GDT_RAW_DIRECTION, arg1);
603 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
604 (u_int8_t)arg2;
605 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
606 (u_int8_t)arg3;
607 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
608 (u_int8_t)(arg3 >> 8);
609 }
610
611 gdt->sc_cmd_len = GDT_CMD_SZ;
612 gdt->sc_cmd_off = 0;
613 gdt->sc_cmd_cnt = 0;
614 gdt->sc_copy_cmd(gdt, gccb);
615 gdt->sc_release_event(gdt);
616 DELAY(20);
617 if (!gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT))
618 return (0);
619 if (gdt->sc_status != GDT_S_BSY || --retries == 0)
620 break;
621 DELAY(1);
622 }
623 return (gdt->sc_status == GDT_S_OK);
624}
625
626static struct gdt_ccb *
627gdt_get_ccb(struct gdt_softc *gdt)
628{
629 struct gdt_ccb *gccb;
630 int lock;
631
632 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_get_ccb(%p)\n", gdt));
633
634 lock = splcam();
635 gccb = SLIST_FIRST(&gdt->sc_free_gccb);
636 if (gccb != NULL) {
637 SLIST_REMOVE_HEAD(&gdt->sc_free_gccb, sle);
638 SLIST_INSERT_HEAD(&gdt->sc_pending_gccb, gccb, sle);
639 ++gdt_stat.cmd_index_act;
640 if (gdt_stat.cmd_index_act > gdt_stat.cmd_index_max)
641 gdt_stat.cmd_index_max = gdt_stat.cmd_index_act;
642 }
643 splx(lock);
644 return (gccb);
645}
646
647void
648gdt_free_ccb(struct gdt_softc *gdt, struct gdt_ccb *gccb)
649{
650 int lock;
651
652 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_free_ccb(%p, %p)\n", gdt, gccb));
653
654 lock = splcam();
655 gccb->gc_flags = GDT_GCF_UNUSED;
656 SLIST_REMOVE(&gdt->sc_pending_gccb, gccb, gdt_ccb, sle);
657 SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle);
658 --gdt_stat.cmd_index_act;
659 splx(lock);
660 if (gdt->sc_state & GDT_SHUTDOWN)
661 wakeup(gccb);
662}
663
664static u_int32_t
665gdt_ccb_vtop(struct gdt_softc *gdt, struct gdt_ccb *gccb)
666{
667 return (gdt->sc_gccb_busbase
668 + (u_int32_t)((caddr_t)gccb - (caddr_t)gdt->sc_gccbs));
669}
670
671void
672gdt_next(struct gdt_softc *gdt)
673{
674 int lock;
675 union ccb *ccb;
676 gdt_ucmd_t *ucmd;
677 struct cam_sim *sim;
678 int bus, target, lun;
679 int next_cmd;
680
681 struct ccb_scsiio *csio;
682 struct ccb_hdr *ccbh;
683 struct gdt_ccb *gccb = NULL;
684 u_int8_t cmd;
685
686 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_next(%p)\n", gdt));
687
688 lock = splcam();
689 if (gdt->sc_test_busy(gdt)) {
690 if (!(gdt->sc_state & GDT_POLLING)) {
691 splx(lock);
692 return;
693 }
694 while (gdt->sc_test_busy(gdt))
695 DELAY(1);
696 }
697
698 gdt->sc_cmd_cnt = gdt->sc_cmd_off = 0;
699 next_cmd = TRUE;
700 for (;;) {
701 /* I/Os in queue? controller ready? */
702 if (!TAILQ_FIRST(&gdt->sc_ucmd_queue) &&
703 !TAILQ_FIRST(&gdt->sc_ccb_queue))
704 break;
705
706 /* 1.: I/Os without ccb (IOCTLs) */
707 ucmd = TAILQ_FIRST(&gdt->sc_ucmd_queue);
708 if (ucmd != NULL) {
709 TAILQ_REMOVE(&gdt->sc_ucmd_queue, ucmd, links);
710 if ((gccb = gdt_ioctl_cmd(gdt, ucmd, &lock)) == NULL) {
711 TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
712 break;
713 }
714 break;
715 /* wenn mehrere Kdos. zulassen: if (!gdt_polling) continue; */
716 }
717
718 /* 2.: I/Os with ccb */
719 ccb = (union ccb *)TAILQ_FIRST(&gdt->sc_ccb_queue);
720 /* ist dann immer != NULL, da oben getestet */
721 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
722 bus = cam_sim_bus(sim);
723 target = ccb->ccb_h.target_id;
724 lun = ccb->ccb_h.target_lun;
725
726 TAILQ_REMOVE(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
727 --gdt_stat.req_queue_act;
728 /* ccb->ccb_h.func_code is XPT_SCSI_IO */
729 GDT_DPRINTF(GDT_D_QUEUE, ("XPT_SCSI_IO flags 0x%x)\n",
730 ccb->ccb_h.flags));
731 csio = &ccb->csio;
732 ccbh = &ccb->ccb_h;
733 cmd = csio->cdb_io.cdb_bytes[0];
734 /* Max CDB length is 12 bytes */
735 if (csio->cdb_len > 12) {
736 ccbh->status = CAM_REQ_INVALID;
737 --gdt_stat.io_count_act;
738 xpt_done(ccb);
739 } else if (bus != gdt->sc_virt_bus) {
740 /* raw service command */
741 if ((gccb = gdt_raw_cmd(gdt, ccb, &lock)) == NULL) {
742 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
743 sim_links.tqe);
744 ++gdt_stat.req_queue_act;
745 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
746 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
747 next_cmd = FALSE;
748 }
749 } else if (target >= GDT_MAX_HDRIVES ||
750 !gdt->sc_hdr[target].hd_present || lun != 0) {
416 /* Scan for cache devices */
417 for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++) {
418 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INFO,
419 i, 0, 0)) {
420 gdt->sc_hdr[i].hd_present = 1;
421 gdt->sc_hdr[i].hd_size = gdt->sc_info;
422
423 /*
424 * Evaluate mapping (sectors per head, heads per cyl)
425 */
426 gdt->sc_hdr[i].hd_size &= ~GDT_SECS32;
427 if (gdt->sc_info2 == 0)
428 gdt_eval_mapping(gdt->sc_hdr[i].hd_size,
429 &drv_cyls, &drv_hds, &drv_secs);
430 else {
431 drv_hds = gdt->sc_info2 & 0xff;
432 drv_secs = (gdt->sc_info2 >> 8) & 0xff;
433 drv_cyls = gdt->sc_hdr[i].hd_size / drv_hds /
434 drv_secs;
435 }
436 gdt->sc_hdr[i].hd_heads = drv_hds;
437 gdt->sc_hdr[i].hd_secs = drv_secs;
438 /* Round the size */
439 gdt->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
440
441 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE,
442 GDT_DEVTYPE, i, 0, 0))
443 gdt->sc_hdr[i].hd_devtype = gdt->sc_info;
444 }
445 }
446
447 GDT_DPRINTF(GDT_D_INIT, ("dpmem %x %d-bus %d cache device%s\n",
448 gdt->sc_dpmembase,
449 gdt->sc_bus_cnt, cdev_cnt,
450 cdev_cnt == 1 ? "" : "s"));
451 gdt_free_ccb(gdt, gccb);
452
453 gdt_cnt++;
454 return (0);
455}
456
457void
458iir_free(struct gdt_softc *gdt)
459{
460 int i;
461
462 GDT_DPRINTF(GDT_D_INIT, ("iir_free()\n"));
463
464 switch (gdt->sc_init_level) {
465 default:
466 gdt_destroy_dev(gdt->sc_dev);
467 case 5:
468 for (i = GDT_MAXCMDS-1; i >= 0; i--)
469 if (gdt->sc_gccbs[i].gc_map_flag)
470 bus_dmamap_destroy(gdt->sc_buffer_dmat,
471 gdt->sc_gccbs[i].gc_dmamap);
472 bus_dmamap_unload(gdt->sc_gccb_dmat, gdt->sc_gccb_dmamap);
473 case 4:
474 bus_dmamem_free(gdt->sc_gccb_dmat, gdt->sc_gccbs, gdt->sc_gccb_dmamap);
475 case 3:
476 bus_dma_tag_destroy(gdt->sc_gccb_dmat);
477 case 2:
478 bus_dma_tag_destroy(gdt->sc_buffer_dmat);
479 case 1:
480 bus_dma_tag_destroy(gdt->sc_parent_dmat);
481 case 0:
482 break;
483 }
484 TAILQ_REMOVE(&gdt_softcs, gdt, links);
485}
486
487void
488iir_attach(struct gdt_softc *gdt)
489{
490 struct cam_devq *devq;
491 int i;
492
493 GDT_DPRINTF(GDT_D_INIT, ("iir_attach()\n"));
494
495 /*
496 * Create the device queue for our SIM.
497 */
498 devq = cam_simq_alloc(GDT_MAXCMDS);
499 if (devq == NULL)
500 return;
501
502 for (i = 0; i < gdt->sc_bus_cnt; i++) {
503 /*
504 * Construct our SIM entry
505 */
506 gdt->sims[i] = cam_sim_alloc(iir_action, iir_poll, "iir",
507 gdt, gdt->sc_hanum, /*untagged*/2,
508 /*tagged*/GDT_MAXCMDS, devq);
509 if (xpt_bus_register(gdt->sims[i], i) != CAM_SUCCESS) {
510 cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
511 break;
512 }
513
514 if (xpt_create_path(&gdt->paths[i], /*periph*/NULL,
515 cam_sim_path(gdt->sims[i]),
516 CAM_TARGET_WILDCARD,
517 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
518 xpt_bus_deregister(cam_sim_path(gdt->sims[i]));
519 cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
520 break;
521 }
522 }
523 if (i > 0)
524 EVENTHANDLER_REGISTER(shutdown_final, iir_shutdown,
525 gdt, SHUTDOWN_PRI_DEFAULT);
526 /* iir_watchdog(gdt); */
527 gdt->sc_state = GDT_NORMAL;
528}
529
530static void
531gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs)
532{
533 *cyls = size / GDT_HEADS / GDT_SECS;
534 if (*cyls < GDT_MAXCYLS) {
535 *heads = GDT_HEADS;
536 *secs = GDT_SECS;
537 } else {
538 /* Too high for 64 * 32 */
539 *cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
540 if (*cyls < GDT_MAXCYLS) {
541 *heads = GDT_MEDHEADS;
542 *secs = GDT_MEDSECS;
543 } else {
544 /* Too high for 127 * 63 */
545 *cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
546 *heads = GDT_BIGHEADS;
547 *secs = GDT_BIGSECS;
548 }
549 }
550}
551
552static int
553gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *gccb,
554 int timeout)
555{
556 int rv = 0;
557
558 GDT_DPRINTF(GDT_D_INIT,
559 ("gdt_wait(%p, %p, %d)\n", gdt, gccb, timeout));
560
561 gdt->sc_state |= GDT_POLL_WAIT;
562 do {
563 iir_intr(gdt);
564 if (gdt == gdt_wait_gdt &&
565 gccb->gc_cmd_index == gdt_wait_index) {
566 rv = 1;
567 break;
568 }
569 DELAY(1);
570 } while (--timeout);
571 gdt->sc_state &= ~GDT_POLL_WAIT;
572
573 while (gdt->sc_test_busy(gdt))
574 DELAY(1); /* XXX correct? */
575
576 return (rv);
577}
578
579static int
580gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
581 u_int8_t service, u_int16_t opcode,
582 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
583{
584 int retries;
585
586 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d)\n",
587 gdt, service, opcode, arg1, arg2, arg3));
588
589 bzero(gdt->sc_cmd, GDT_CMD_SZ);
590
591 for (retries = GDT_RETRIES; ; ) {
592 gccb->gc_service = service;
593 gccb->gc_flags = GDT_GCF_INTERNAL;
594
595 gdt->sc_set_sema0(gdt);
596 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
597 gccb->gc_cmd_index);
598 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode);
599
600 switch (service) {
601 case GDT_CACHESERVICE:
602 if (opcode == GDT_IOCTL) {
603 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
604 GDT_IOCTL_SUBFUNC, arg1);
605 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
606 GDT_IOCTL_CHANNEL, arg2);
607 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
608 GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
609 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
610 gdt_ccb_vtop(gdt, gccb) +
611 offsetof(struct gdt_ccb, gc_scratch[0]));
612 } else {
613 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
614 GDT_CACHE_DEVICENO, (u_int16_t)arg1);
615 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
616 GDT_CACHE_BLOCKNO, arg2);
617 }
618 break;
619
620 case GDT_SCSIRAWSERVICE:
621 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
622 GDT_RAW_DIRECTION, arg1);
623 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
624 (u_int8_t)arg2;
625 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
626 (u_int8_t)arg3;
627 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
628 (u_int8_t)(arg3 >> 8);
629 }
630
631 gdt->sc_cmd_len = GDT_CMD_SZ;
632 gdt->sc_cmd_off = 0;
633 gdt->sc_cmd_cnt = 0;
634 gdt->sc_copy_cmd(gdt, gccb);
635 gdt->sc_release_event(gdt);
636 DELAY(20);
637 if (!gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT))
638 return (0);
639 if (gdt->sc_status != GDT_S_BSY || --retries == 0)
640 break;
641 DELAY(1);
642 }
643 return (gdt->sc_status == GDT_S_OK);
644}
645
646static struct gdt_ccb *
647gdt_get_ccb(struct gdt_softc *gdt)
648{
649 struct gdt_ccb *gccb;
650 int lock;
651
652 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_get_ccb(%p)\n", gdt));
653
654 lock = splcam();
655 gccb = SLIST_FIRST(&gdt->sc_free_gccb);
656 if (gccb != NULL) {
657 SLIST_REMOVE_HEAD(&gdt->sc_free_gccb, sle);
658 SLIST_INSERT_HEAD(&gdt->sc_pending_gccb, gccb, sle);
659 ++gdt_stat.cmd_index_act;
660 if (gdt_stat.cmd_index_act > gdt_stat.cmd_index_max)
661 gdt_stat.cmd_index_max = gdt_stat.cmd_index_act;
662 }
663 splx(lock);
664 return (gccb);
665}
666
667void
668gdt_free_ccb(struct gdt_softc *gdt, struct gdt_ccb *gccb)
669{
670 int lock;
671
672 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_free_ccb(%p, %p)\n", gdt, gccb));
673
674 lock = splcam();
675 gccb->gc_flags = GDT_GCF_UNUSED;
676 SLIST_REMOVE(&gdt->sc_pending_gccb, gccb, gdt_ccb, sle);
677 SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle);
678 --gdt_stat.cmd_index_act;
679 splx(lock);
680 if (gdt->sc_state & GDT_SHUTDOWN)
681 wakeup(gccb);
682}
683
684static u_int32_t
685gdt_ccb_vtop(struct gdt_softc *gdt, struct gdt_ccb *gccb)
686{
687 return (gdt->sc_gccb_busbase
688 + (u_int32_t)((caddr_t)gccb - (caddr_t)gdt->sc_gccbs));
689}
690
691void
692gdt_next(struct gdt_softc *gdt)
693{
694 int lock;
695 union ccb *ccb;
696 gdt_ucmd_t *ucmd;
697 struct cam_sim *sim;
698 int bus, target, lun;
699 int next_cmd;
700
701 struct ccb_scsiio *csio;
702 struct ccb_hdr *ccbh;
703 struct gdt_ccb *gccb = NULL;
704 u_int8_t cmd;
705
706 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_next(%p)\n", gdt));
707
708 lock = splcam();
709 if (gdt->sc_test_busy(gdt)) {
710 if (!(gdt->sc_state & GDT_POLLING)) {
711 splx(lock);
712 return;
713 }
714 while (gdt->sc_test_busy(gdt))
715 DELAY(1);
716 }
717
718 gdt->sc_cmd_cnt = gdt->sc_cmd_off = 0;
719 next_cmd = TRUE;
720 for (;;) {
721 /* I/Os in queue? controller ready? */
722 if (!TAILQ_FIRST(&gdt->sc_ucmd_queue) &&
723 !TAILQ_FIRST(&gdt->sc_ccb_queue))
724 break;
725
726 /* 1.: I/Os without ccb (IOCTLs) */
727 ucmd = TAILQ_FIRST(&gdt->sc_ucmd_queue);
728 if (ucmd != NULL) {
729 TAILQ_REMOVE(&gdt->sc_ucmd_queue, ucmd, links);
730 if ((gccb = gdt_ioctl_cmd(gdt, ucmd, &lock)) == NULL) {
731 TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
732 break;
733 }
734 break;
735 /* wenn mehrere Kdos. zulassen: if (!gdt_polling) continue; */
736 }
737
738 /* 2.: I/Os with ccb */
739 ccb = (union ccb *)TAILQ_FIRST(&gdt->sc_ccb_queue);
740 /* ist dann immer != NULL, da oben getestet */
741 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
742 bus = cam_sim_bus(sim);
743 target = ccb->ccb_h.target_id;
744 lun = ccb->ccb_h.target_lun;
745
746 TAILQ_REMOVE(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
747 --gdt_stat.req_queue_act;
748 /* ccb->ccb_h.func_code is XPT_SCSI_IO */
749 GDT_DPRINTF(GDT_D_QUEUE, ("XPT_SCSI_IO flags 0x%x)\n",
750 ccb->ccb_h.flags));
751 csio = &ccb->csio;
752 ccbh = &ccb->ccb_h;
753 cmd = csio->cdb_io.cdb_bytes[0];
754 /* Max CDB length is 12 bytes */
755 if (csio->cdb_len > 12) {
756 ccbh->status = CAM_REQ_INVALID;
757 --gdt_stat.io_count_act;
758 xpt_done(ccb);
759 } else if (bus != gdt->sc_virt_bus) {
760 /* raw service command */
761 if ((gccb = gdt_raw_cmd(gdt, ccb, &lock)) == NULL) {
762 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
763 sim_links.tqe);
764 ++gdt_stat.req_queue_act;
765 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
766 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
767 next_cmd = FALSE;
768 }
769 } else if (target >= GDT_MAX_HDRIVES ||
770 !gdt->sc_hdr[target].hd_present || lun != 0) {
751 ccbh->status = CAM_SEL_TIMEOUT;
771 ccbh->status = CAM_DEV_NOT_THERE;
752 --gdt_stat.io_count_act;
753 xpt_done(ccb);
754 } else {
755 /* cache service command */
756 if (cmd == READ_6 || cmd == WRITE_6 ||
757 cmd == READ_10 || cmd == WRITE_10) {
758 if ((gccb = gdt_cache_cmd(gdt, ccb, &lock)) == NULL) {
759 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
760 sim_links.tqe);
761 ++gdt_stat.req_queue_act;
762 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
763 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
764 next_cmd = FALSE;
765 }
766 } else {
767 splx(lock);
768 gdt_internal_cache_cmd(gdt, ccb);
769 lock = splcam();
770 }
771 }
772 if ((gdt->sc_state & GDT_POLLING) || !next_cmd)
773 break;
774 }
775 if (gdt->sc_cmd_cnt > 0)
776 gdt->sc_release_event(gdt);
777
778 splx(lock);
779
780 if ((gdt->sc_state & GDT_POLLING) && gdt->sc_cmd_cnt > 0) {
781 gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT);
782 }
783}
784
785static struct gdt_ccb *
786gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
787{
788 struct gdt_ccb *gccb;
789 struct cam_sim *sim;
790
791 GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_cmd(%p, %p)\n", gdt, ccb));
792
793 if (roundup(GDT_CMD_UNION + GDT_RAW_SZ, sizeof(u_int32_t)) +
794 gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
795 gdt->sc_ic_all_size) {
796 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_raw_cmd(): DPMEM overflow\n",
797 gdt->sc_hanum));
798 return (NULL);
799 }
800
801 bzero(gdt->sc_cmd, GDT_CMD_SZ);
802
803 gccb = gdt_get_ccb(gdt);
804 if (gccb == NULL) {
805 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: No free command index found\n",
806 gdt->sc_hanum));
807 return (gccb);
808 }
809 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
810 gccb->gc_ccb = ccb;
811 gccb->gc_service = GDT_SCSIRAWSERVICE;
812 gccb->gc_flags = GDT_GCF_SCSI;
813
814 if (gdt->sc_cmd_cnt == 0)
815 gdt->sc_set_sema0(gdt);
816 splx(*lock);
817 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
818 gccb->gc_cmd_index);
819 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
820
821 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
822 (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
823 GDT_DATA_IN : GDT_DATA_OUT);
824 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
825 ccb->csio.dxfer_len);
826 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
827 ccb->csio.cdb_len);
828 bcopy(ccb->csio.cdb_io.cdb_bytes, gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
829 ccb->csio.cdb_len);
830 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
831 ccb->ccb_h.target_id;
832 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
833 ccb->ccb_h.target_lun;
834 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
835 cam_sim_bus(sim);
836 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
837 sizeof(struct scsi_sense_data));
838 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
839 gdt_ccb_vtop(gdt, gccb) +
840 offsetof(struct gdt_ccb, gc_scratch[0]));
841
842 /*
843 * If we have any data to send with this command,
844 * map it into bus space.
845 */
846 /* Only use S/G if there is a transfer */
847 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
848 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
849 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
850 int s;
851 int error;
852
853 /* vorher unlock von splcam() ??? */
854 s = splsoftvm();
855 error =
856 bus_dmamap_load(gdt->sc_buffer_dmat,
857 gccb->gc_dmamap,
858 ccb->csio.data_ptr,
859 ccb->csio.dxfer_len,
860 gdtexecuteccb,
861 gccb, /*flags*/0);
862 if (error == EINPROGRESS) {
863 xpt_freeze_simq(sim, 1);
864 gccb->gc_state |= CAM_RELEASE_SIMQ;
865 }
866 splx(s);
867 } else {
868 struct bus_dma_segment seg;
869
870 /* Pointer to physical buffer */
871 seg.ds_addr =
872 (bus_addr_t)ccb->csio.data_ptr;
873 seg.ds_len = ccb->csio.dxfer_len;
874 gdtexecuteccb(gccb, &seg, 1, 0);
875 }
876 } else {
877 struct bus_dma_segment *segs;
878
879 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
880 panic("iir%d: iir_action - Physical "
881 "segment pointers unsupported", gdt->sc_hanum);
882
883 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
884 panic("iir%d: iir_action - Virtual "
885 "segment addresses unsupported", gdt->sc_hanum);
886
887 /* Just use the segments provided */
888 segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
889 gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
890 }
891 } else {
892 gdtexecuteccb(gccb, NULL, 0, 0);
893 }
894
895 *lock = splcam();
896 return (gccb);
897}
898
899static struct gdt_ccb *
900gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
901{
902 struct gdt_ccb *gccb;
903 struct cam_sim *sim;
904 u_int8_t *cmdp;
905 u_int16_t opcode;
906 u_int32_t blockno, blockcnt;
907
908 GDT_DPRINTF(GDT_D_CMD, ("gdt_cache_cmd(%p, %p)\n", gdt, ccb));
909
910 if (roundup(GDT_CMD_UNION + GDT_CACHE_SZ, sizeof(u_int32_t)) +
911 gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
912 gdt->sc_ic_all_size) {
913 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_cache_cmd(): DPMEM overflow\n",
914 gdt->sc_hanum));
915 return (NULL);
916 }
917
918 bzero(gdt->sc_cmd, GDT_CMD_SZ);
919
920 gccb = gdt_get_ccb(gdt);
921 if (gccb == NULL) {
922 GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n",
923 gdt->sc_hanum));
924 return (gccb);
925 }
926 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
927 gccb->gc_ccb = ccb;
928 gccb->gc_service = GDT_CACHESERVICE;
929 gccb->gc_flags = GDT_GCF_SCSI;
930
931 if (gdt->sc_cmd_cnt == 0)
932 gdt->sc_set_sema0(gdt);
933 splx(*lock);
934 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
935 gccb->gc_cmd_index);
936 cmdp = ccb->csio.cdb_io.cdb_bytes;
937 opcode = (*cmdp == WRITE_6 || *cmdp == WRITE_10) ? GDT_WRITE : GDT_READ;
938 if ((gdt->sc_state & GDT_SHUTDOWN) && opcode == GDT_WRITE)
939 opcode = GDT_WRITE_THR;
940 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode);
941
942 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
943 ccb->ccb_h.target_id);
944 if (ccb->csio.cdb_len == 6) {
945 struct scsi_rw_6 *rw = (struct scsi_rw_6 *)cmdp;
946 blockno = scsi_3btoul(rw->addr) & ((SRW_TOPADDR<<16) | 0xffff);
947 blockcnt = rw->length ? rw->length : 0x100;
948 } else {
949 struct scsi_rw_10 *rw = (struct scsi_rw_10 *)cmdp;
950 blockno = scsi_4btoul(rw->addr);
951 blockcnt = scsi_2btoul(rw->length);
952 }
953 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
954 blockno);
955 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
956 blockcnt);
957
958 /*
959 * If we have any data to send with this command,
960 * map it into bus space.
961 */
962 /* Only use S/G if there is a transfer */
963 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
964 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
965 int s;
966 int error;
967
968 /* vorher unlock von splcam() ??? */
969 s = splsoftvm();
970 error =
971 bus_dmamap_load(gdt->sc_buffer_dmat,
972 gccb->gc_dmamap,
973 ccb->csio.data_ptr,
974 ccb->csio.dxfer_len,
975 gdtexecuteccb,
976 gccb, /*flags*/0);
977 if (error == EINPROGRESS) {
978 xpt_freeze_simq(sim, 1);
979 gccb->gc_state |= CAM_RELEASE_SIMQ;
980 }
981 splx(s);
982 } else {
983 struct bus_dma_segment seg;
984
985 /* Pointer to physical buffer */
986 seg.ds_addr =
987 (bus_addr_t)ccb->csio.data_ptr;
988 seg.ds_len = ccb->csio.dxfer_len;
989 gdtexecuteccb(gccb, &seg, 1, 0);
990 }
991 } else {
992 struct bus_dma_segment *segs;
993
994 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
995 panic("iir%d: iir_action - Physical "
996 "segment pointers unsupported", gdt->sc_hanum);
997
998 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
999 panic("iir%d: iir_action - Virtual "
1000 "segment addresses unsupported", gdt->sc_hanum);
1001
1002 /* Just use the segments provided */
1003 segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
1004 gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
1005 }
1006
1007 *lock = splcam();
1008 return (gccb);
1009}
1010
1011static struct gdt_ccb *
1012gdt_ioctl_cmd(struct gdt_softc *gdt, gdt_ucmd_t *ucmd, int *lock)
1013{
1014 struct gdt_ccb *gccb;
1015 u_int32_t cnt;
1016
1017 GDT_DPRINTF(GDT_D_DEBUG, ("gdt_ioctl_cmd(%p, %p)\n", gdt, ucmd));
1018
1019 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1020
1021 gccb = gdt_get_ccb(gdt);
1022 if (gccb == NULL) {
1023 GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n",
1024 gdt->sc_hanum));
1025 return (gccb);
1026 }
1027 gccb->gc_ucmd = ucmd;
1028 gccb->gc_service = ucmd->service;
1029 gccb->gc_flags = GDT_GCF_IOCTL;
1030
1031 /* check DPMEM space, copy data buffer from user space */
1032 if (ucmd->service == GDT_CACHESERVICE) {
1033 if (ucmd->OpCode == GDT_IOCTL) {
1034 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_IOCTL_SZ,
1035 sizeof(u_int32_t));
1036 cnt = ucmd->u.ioctl.param_size;
1037 if (cnt > GDT_SCRATCH_SZ) {
1038 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1039 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt);
1040 gdt_free_ccb(gdt, gccb);
1041 return (NULL);
1042 }
1043 } else {
1044 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
1045 GDT_SG_SZ, sizeof(u_int32_t));
1046 cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
1047 if (cnt > GDT_SCRATCH_SZ) {
1048 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1049 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt);
1050 gdt_free_ccb(gdt, gccb);
1051 return (NULL);
1052 }
1053 }
1054 } else {
1055 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
1056 GDT_SG_SZ, sizeof(u_int32_t));
1057 cnt = ucmd->u.raw.sdlen;
1058 if (cnt + ucmd->u.raw.sense_len > GDT_SCRATCH_SZ) {
1059 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1060 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt + ucmd->u.raw.sense_len);
1061 gdt_free_ccb(gdt, gccb);
1062 return (NULL);
1063 }
1064 }
1065 if (cnt != 0)
1066 bcopy(ucmd->data, gccb->gc_scratch, cnt);
1067
1068 if (gdt->sc_cmd_off + gdt->sc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
1069 gdt->sc_ic_all_size) {
1070 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_ioctl_cmd(): DPMEM overflow\n",
1071 gdt->sc_hanum));
1072 gdt_free_ccb(gdt, gccb);
1073 return (NULL);
1074 }
1075
1076 if (gdt->sc_cmd_cnt == 0)
1077 gdt->sc_set_sema0(gdt);
1078 splx(*lock);
1079
1080 /* fill cmd structure */
1081 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1082 gccb->gc_cmd_index);
1083 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE,
1084 ucmd->OpCode);
1085
1086 if (ucmd->service == GDT_CACHESERVICE) {
1087 if (ucmd->OpCode == GDT_IOCTL) {
1088 /* IOCTL */
1089 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_PARAM_SIZE,
1090 ucmd->u.ioctl.param_size);
1091 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_SUBFUNC,
1092 ucmd->u.ioctl.subfunc);
1093 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_CHANNEL,
1094 ucmd->u.ioctl.channel);
1095 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
1096 gdt_ccb_vtop(gdt, gccb) +
1097 offsetof(struct gdt_ccb, gc_scratch[0]));
1098 } else {
1099 /* cache service command */
1100 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
1101 ucmd->u.cache.DeviceNo);
1102 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
1103 ucmd->u.cache.BlockNo);
1104 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
1105 ucmd->u.cache.BlockCnt);
1106 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1107 0xffffffffUL);
1108 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1109 1);
1110 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1111 GDT_SG_PTR, gdt_ccb_vtop(gdt, gccb) +
1112 offsetof(struct gdt_ccb, gc_scratch[0]));
1113 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1114 GDT_SG_LEN, ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE);
1115 }
1116 } else {
1117 /* raw service command */
1118 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
1119 ucmd->u.raw.direction);
1120 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1121 0xffffffffUL);
1122 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
1123 ucmd->u.raw.sdlen);
1124 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
1125 ucmd->u.raw.clen);
1126 bcopy(ucmd->u.raw.cmd, gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
1127 12);
1128 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
1129 ucmd->u.raw.target;
1130 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
1131 ucmd->u.raw.lun;
1132 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
1133 ucmd->u.raw.bus;
1134 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
1135 ucmd->u.raw.sense_len);
1136 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
1137 gdt_ccb_vtop(gdt, gccb) +
1138 offsetof(struct gdt_ccb, gc_scratch[ucmd->u.raw.sdlen]));
1139 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1140 1);
1141 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1142 GDT_SG_PTR, gdt_ccb_vtop(gdt, gccb) +
1143 offsetof(struct gdt_ccb, gc_scratch[0]));
1144 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1145 GDT_SG_LEN, ucmd->u.raw.sdlen);
1146 }
1147
1148 *lock = splcam();
1149 gdt_stat.sg_count_act = 1;
1150 gdt->sc_copy_cmd(gdt, gccb);
1151 return (gccb);
1152}
1153
1154static void
1155gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb)
1156{
1157 int t;
1158
1159 t = ccb->ccb_h.target_id;
1160 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd(%p, %p, 0x%x, %d)\n",
1161 gdt, ccb, ccb->csio.cdb_io.cdb_bytes[0], t));
1162
1163 switch (ccb->csio.cdb_io.cdb_bytes[0]) {
1164 case TEST_UNIT_READY:
1165 case START_STOP:
1166 break;
1167 case REQUEST_SENSE:
1168 GDT_DPRINTF(GDT_D_MISC, ("REQUEST_SENSE\n"));
1169 break;
1170 case INQUIRY:
1171 {
1172 struct scsi_inquiry_data *inq;
1173
1174 inq = (struct scsi_inquiry_data *)ccb->csio.data_ptr;
1175 bzero(inq, sizeof(struct scsi_inquiry_data));
1176 inq->device = (gdt->sc_hdr[t].hd_devtype & 4) ?
1177 T_CDROM : T_DIRECT;
1178 inq->dev_qual2 = (gdt->sc_hdr[t].hd_devtype & 1) ? 0x80 : 0;
1179 inq->version = SCSI_REV_2;
1180 inq->response_format = 2;
1181 inq->additional_length = 32;
1182 inq->flags = SID_CmdQue | SID_Sync;
772 --gdt_stat.io_count_act;
773 xpt_done(ccb);
774 } else {
775 /* cache service command */
776 if (cmd == READ_6 || cmd == WRITE_6 ||
777 cmd == READ_10 || cmd == WRITE_10) {
778 if ((gccb = gdt_cache_cmd(gdt, ccb, &lock)) == NULL) {
779 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
780 sim_links.tqe);
781 ++gdt_stat.req_queue_act;
782 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
783 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
784 next_cmd = FALSE;
785 }
786 } else {
787 splx(lock);
788 gdt_internal_cache_cmd(gdt, ccb);
789 lock = splcam();
790 }
791 }
792 if ((gdt->sc_state & GDT_POLLING) || !next_cmd)
793 break;
794 }
795 if (gdt->sc_cmd_cnt > 0)
796 gdt->sc_release_event(gdt);
797
798 splx(lock);
799
800 if ((gdt->sc_state & GDT_POLLING) && gdt->sc_cmd_cnt > 0) {
801 gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT);
802 }
803}
804
805static struct gdt_ccb *
806gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
807{
808 struct gdt_ccb *gccb;
809 struct cam_sim *sim;
810
811 GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_cmd(%p, %p)\n", gdt, ccb));
812
813 if (roundup(GDT_CMD_UNION + GDT_RAW_SZ, sizeof(u_int32_t)) +
814 gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
815 gdt->sc_ic_all_size) {
816 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_raw_cmd(): DPMEM overflow\n",
817 gdt->sc_hanum));
818 return (NULL);
819 }
820
821 bzero(gdt->sc_cmd, GDT_CMD_SZ);
822
823 gccb = gdt_get_ccb(gdt);
824 if (gccb == NULL) {
825 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: No free command index found\n",
826 gdt->sc_hanum));
827 return (gccb);
828 }
829 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
830 gccb->gc_ccb = ccb;
831 gccb->gc_service = GDT_SCSIRAWSERVICE;
832 gccb->gc_flags = GDT_GCF_SCSI;
833
834 if (gdt->sc_cmd_cnt == 0)
835 gdt->sc_set_sema0(gdt);
836 splx(*lock);
837 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
838 gccb->gc_cmd_index);
839 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
840
841 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
842 (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
843 GDT_DATA_IN : GDT_DATA_OUT);
844 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
845 ccb->csio.dxfer_len);
846 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
847 ccb->csio.cdb_len);
848 bcopy(ccb->csio.cdb_io.cdb_bytes, gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
849 ccb->csio.cdb_len);
850 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
851 ccb->ccb_h.target_id;
852 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
853 ccb->ccb_h.target_lun;
854 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
855 cam_sim_bus(sim);
856 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
857 sizeof(struct scsi_sense_data));
858 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
859 gdt_ccb_vtop(gdt, gccb) +
860 offsetof(struct gdt_ccb, gc_scratch[0]));
861
862 /*
863 * If we have any data to send with this command,
864 * map it into bus space.
865 */
866 /* Only use S/G if there is a transfer */
867 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
868 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
869 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
870 int s;
871 int error;
872
873 /* vorher unlock von splcam() ??? */
874 s = splsoftvm();
875 error =
876 bus_dmamap_load(gdt->sc_buffer_dmat,
877 gccb->gc_dmamap,
878 ccb->csio.data_ptr,
879 ccb->csio.dxfer_len,
880 gdtexecuteccb,
881 gccb, /*flags*/0);
882 if (error == EINPROGRESS) {
883 xpt_freeze_simq(sim, 1);
884 gccb->gc_state |= CAM_RELEASE_SIMQ;
885 }
886 splx(s);
887 } else {
888 struct bus_dma_segment seg;
889
890 /* Pointer to physical buffer */
891 seg.ds_addr =
892 (bus_addr_t)ccb->csio.data_ptr;
893 seg.ds_len = ccb->csio.dxfer_len;
894 gdtexecuteccb(gccb, &seg, 1, 0);
895 }
896 } else {
897 struct bus_dma_segment *segs;
898
899 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
900 panic("iir%d: iir_action - Physical "
901 "segment pointers unsupported", gdt->sc_hanum);
902
903 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
904 panic("iir%d: iir_action - Virtual "
905 "segment addresses unsupported", gdt->sc_hanum);
906
907 /* Just use the segments provided */
908 segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
909 gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
910 }
911 } else {
912 gdtexecuteccb(gccb, NULL, 0, 0);
913 }
914
915 *lock = splcam();
916 return (gccb);
917}
918
919static struct gdt_ccb *
920gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
921{
922 struct gdt_ccb *gccb;
923 struct cam_sim *sim;
924 u_int8_t *cmdp;
925 u_int16_t opcode;
926 u_int32_t blockno, blockcnt;
927
928 GDT_DPRINTF(GDT_D_CMD, ("gdt_cache_cmd(%p, %p)\n", gdt, ccb));
929
930 if (roundup(GDT_CMD_UNION + GDT_CACHE_SZ, sizeof(u_int32_t)) +
931 gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
932 gdt->sc_ic_all_size) {
933 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_cache_cmd(): DPMEM overflow\n",
934 gdt->sc_hanum));
935 return (NULL);
936 }
937
938 bzero(gdt->sc_cmd, GDT_CMD_SZ);
939
940 gccb = gdt_get_ccb(gdt);
941 if (gccb == NULL) {
942 GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n",
943 gdt->sc_hanum));
944 return (gccb);
945 }
946 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
947 gccb->gc_ccb = ccb;
948 gccb->gc_service = GDT_CACHESERVICE;
949 gccb->gc_flags = GDT_GCF_SCSI;
950
951 if (gdt->sc_cmd_cnt == 0)
952 gdt->sc_set_sema0(gdt);
953 splx(*lock);
954 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
955 gccb->gc_cmd_index);
956 cmdp = ccb->csio.cdb_io.cdb_bytes;
957 opcode = (*cmdp == WRITE_6 || *cmdp == WRITE_10) ? GDT_WRITE : GDT_READ;
958 if ((gdt->sc_state & GDT_SHUTDOWN) && opcode == GDT_WRITE)
959 opcode = GDT_WRITE_THR;
960 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode);
961
962 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
963 ccb->ccb_h.target_id);
964 if (ccb->csio.cdb_len == 6) {
965 struct scsi_rw_6 *rw = (struct scsi_rw_6 *)cmdp;
966 blockno = scsi_3btoul(rw->addr) & ((SRW_TOPADDR<<16) | 0xffff);
967 blockcnt = rw->length ? rw->length : 0x100;
968 } else {
969 struct scsi_rw_10 *rw = (struct scsi_rw_10 *)cmdp;
970 blockno = scsi_4btoul(rw->addr);
971 blockcnt = scsi_2btoul(rw->length);
972 }
973 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
974 blockno);
975 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
976 blockcnt);
977
978 /*
979 * If we have any data to send with this command,
980 * map it into bus space.
981 */
982 /* Only use S/G if there is a transfer */
983 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
984 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
985 int s;
986 int error;
987
988 /* vorher unlock von splcam() ??? */
989 s = splsoftvm();
990 error =
991 bus_dmamap_load(gdt->sc_buffer_dmat,
992 gccb->gc_dmamap,
993 ccb->csio.data_ptr,
994 ccb->csio.dxfer_len,
995 gdtexecuteccb,
996 gccb, /*flags*/0);
997 if (error == EINPROGRESS) {
998 xpt_freeze_simq(sim, 1);
999 gccb->gc_state |= CAM_RELEASE_SIMQ;
1000 }
1001 splx(s);
1002 } else {
1003 struct bus_dma_segment seg;
1004
1005 /* Pointer to physical buffer */
1006 seg.ds_addr =
1007 (bus_addr_t)ccb->csio.data_ptr;
1008 seg.ds_len = ccb->csio.dxfer_len;
1009 gdtexecuteccb(gccb, &seg, 1, 0);
1010 }
1011 } else {
1012 struct bus_dma_segment *segs;
1013
1014 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
1015 panic("iir%d: iir_action - Physical "
1016 "segment pointers unsupported", gdt->sc_hanum);
1017
1018 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
1019 panic("iir%d: iir_action - Virtual "
1020 "segment addresses unsupported", gdt->sc_hanum);
1021
1022 /* Just use the segments provided */
1023 segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
1024 gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
1025 }
1026
1027 *lock = splcam();
1028 return (gccb);
1029}
1030
1031static struct gdt_ccb *
1032gdt_ioctl_cmd(struct gdt_softc *gdt, gdt_ucmd_t *ucmd, int *lock)
1033{
1034 struct gdt_ccb *gccb;
1035 u_int32_t cnt;
1036
1037 GDT_DPRINTF(GDT_D_DEBUG, ("gdt_ioctl_cmd(%p, %p)\n", gdt, ucmd));
1038
1039 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1040
1041 gccb = gdt_get_ccb(gdt);
1042 if (gccb == NULL) {
1043 GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n",
1044 gdt->sc_hanum));
1045 return (gccb);
1046 }
1047 gccb->gc_ucmd = ucmd;
1048 gccb->gc_service = ucmd->service;
1049 gccb->gc_flags = GDT_GCF_IOCTL;
1050
1051 /* check DPMEM space, copy data buffer from user space */
1052 if (ucmd->service == GDT_CACHESERVICE) {
1053 if (ucmd->OpCode == GDT_IOCTL) {
1054 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_IOCTL_SZ,
1055 sizeof(u_int32_t));
1056 cnt = ucmd->u.ioctl.param_size;
1057 if (cnt > GDT_SCRATCH_SZ) {
1058 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1059 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt);
1060 gdt_free_ccb(gdt, gccb);
1061 return (NULL);
1062 }
1063 } else {
1064 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
1065 GDT_SG_SZ, sizeof(u_int32_t));
1066 cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
1067 if (cnt > GDT_SCRATCH_SZ) {
1068 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1069 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt);
1070 gdt_free_ccb(gdt, gccb);
1071 return (NULL);
1072 }
1073 }
1074 } else {
1075 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
1076 GDT_SG_SZ, sizeof(u_int32_t));
1077 cnt = ucmd->u.raw.sdlen;
1078 if (cnt + ucmd->u.raw.sense_len > GDT_SCRATCH_SZ) {
1079 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1080 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt + ucmd->u.raw.sense_len);
1081 gdt_free_ccb(gdt, gccb);
1082 return (NULL);
1083 }
1084 }
1085 if (cnt != 0)
1086 bcopy(ucmd->data, gccb->gc_scratch, cnt);
1087
1088 if (gdt->sc_cmd_off + gdt->sc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
1089 gdt->sc_ic_all_size) {
1090 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_ioctl_cmd(): DPMEM overflow\n",
1091 gdt->sc_hanum));
1092 gdt_free_ccb(gdt, gccb);
1093 return (NULL);
1094 }
1095
1096 if (gdt->sc_cmd_cnt == 0)
1097 gdt->sc_set_sema0(gdt);
1098 splx(*lock);
1099
1100 /* fill cmd structure */
1101 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1102 gccb->gc_cmd_index);
1103 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE,
1104 ucmd->OpCode);
1105
1106 if (ucmd->service == GDT_CACHESERVICE) {
1107 if (ucmd->OpCode == GDT_IOCTL) {
1108 /* IOCTL */
1109 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_PARAM_SIZE,
1110 ucmd->u.ioctl.param_size);
1111 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_SUBFUNC,
1112 ucmd->u.ioctl.subfunc);
1113 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_CHANNEL,
1114 ucmd->u.ioctl.channel);
1115 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
1116 gdt_ccb_vtop(gdt, gccb) +
1117 offsetof(struct gdt_ccb, gc_scratch[0]));
1118 } else {
1119 /* cache service command */
1120 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
1121 ucmd->u.cache.DeviceNo);
1122 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
1123 ucmd->u.cache.BlockNo);
1124 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
1125 ucmd->u.cache.BlockCnt);
1126 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1127 0xffffffffUL);
1128 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1129 1);
1130 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1131 GDT_SG_PTR, gdt_ccb_vtop(gdt, gccb) +
1132 offsetof(struct gdt_ccb, gc_scratch[0]));
1133 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1134 GDT_SG_LEN, ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE);
1135 }
1136 } else {
1137 /* raw service command */
1138 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
1139 ucmd->u.raw.direction);
1140 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1141 0xffffffffUL);
1142 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
1143 ucmd->u.raw.sdlen);
1144 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
1145 ucmd->u.raw.clen);
1146 bcopy(ucmd->u.raw.cmd, gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
1147 12);
1148 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
1149 ucmd->u.raw.target;
1150 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
1151 ucmd->u.raw.lun;
1152 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
1153 ucmd->u.raw.bus;
1154 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
1155 ucmd->u.raw.sense_len);
1156 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
1157 gdt_ccb_vtop(gdt, gccb) +
1158 offsetof(struct gdt_ccb, gc_scratch[ucmd->u.raw.sdlen]));
1159 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1160 1);
1161 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1162 GDT_SG_PTR, gdt_ccb_vtop(gdt, gccb) +
1163 offsetof(struct gdt_ccb, gc_scratch[0]));
1164 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1165 GDT_SG_LEN, ucmd->u.raw.sdlen);
1166 }
1167
1168 *lock = splcam();
1169 gdt_stat.sg_count_act = 1;
1170 gdt->sc_copy_cmd(gdt, gccb);
1171 return (gccb);
1172}
1173
1174static void
1175gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb)
1176{
1177 int t;
1178
1179 t = ccb->ccb_h.target_id;
1180 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd(%p, %p, 0x%x, %d)\n",
1181 gdt, ccb, ccb->csio.cdb_io.cdb_bytes[0], t));
1182
1183 switch (ccb->csio.cdb_io.cdb_bytes[0]) {
1184 case TEST_UNIT_READY:
1185 case START_STOP:
1186 break;
1187 case REQUEST_SENSE:
1188 GDT_DPRINTF(GDT_D_MISC, ("REQUEST_SENSE\n"));
1189 break;
1190 case INQUIRY:
1191 {
1192 struct scsi_inquiry_data *inq;
1193
1194 inq = (struct scsi_inquiry_data *)ccb->csio.data_ptr;
1195 bzero(inq, sizeof(struct scsi_inquiry_data));
1196 inq->device = (gdt->sc_hdr[t].hd_devtype & 4) ?
1197 T_CDROM : T_DIRECT;
1198 inq->dev_qual2 = (gdt->sc_hdr[t].hd_devtype & 1) ? 0x80 : 0;
1199 inq->version = SCSI_REV_2;
1200 inq->response_format = 2;
1201 inq->additional_length = 32;
1202 inq->flags = SID_CmdQue | SID_Sync;
1183 strcpy(inq->vendor, "IIR ");
1203 strcpy(inq->vendor, gdt->oem_name);
1184 sprintf(inq->product, "Host Drive #%02d", t);
1185 strcpy(inq->revision, " ");
1186 break;
1187 }
1188 case MODE_SENSE_6:
1189 {
1190 struct mpd_data {
1191 struct scsi_mode_hdr_6 hd;
1192 struct scsi_mode_block_descr bd;
1193 struct scsi_control_page cp;
1194 } *mpd;
1195 u_int8_t page;
1196
1197 mpd = (struct mpd_data *)ccb->csio.data_ptr;
1198 bzero(mpd, sizeof(struct mpd_data));
1199 mpd->hd.datalen = sizeof(struct scsi_mode_hdr_6) +
1200 sizeof(struct scsi_mode_block_descr);
1201 mpd->hd.dev_specific = (gdt->sc_hdr[t].hd_devtype & 2) ? 0x80 : 0;
1202 mpd->hd.block_descr_len = sizeof(struct scsi_mode_block_descr);
1203 mpd->bd.block_len[0] = (GDT_SECTOR_SIZE & 0x00ff0000) >> 16;
1204 mpd->bd.block_len[1] = (GDT_SECTOR_SIZE & 0x0000ff00) >> 8;
1205 mpd->bd.block_len[2] = (GDT_SECTOR_SIZE & 0x000000ff);
1206 page=((struct scsi_mode_sense_6 *)ccb->csio.cdb_io.cdb_bytes)->page;
1207 switch (page) {
1208 default:
1209 GDT_DPRINTF(GDT_D_MISC, ("MODE_SENSE_6: page 0x%x\n", page));
1210 break;
1211 }
1212 break;
1213 }
1214 case READ_CAPACITY:
1215 {
1216 struct scsi_read_capacity_data *rcd;
1217
1218 rcd = (struct scsi_read_capacity_data *)ccb->csio.data_ptr;
1219 bzero(rcd, sizeof(struct scsi_read_capacity_data));
1220 scsi_ulto4b(gdt->sc_hdr[t].hd_size - 1, rcd->addr);
1221 scsi_ulto4b(GDT_SECTOR_SIZE, rcd->length);
1222 break;
1223 }
1224 default:
1225 GDT_DPRINTF(GDT_D_MISC, ("gdt_internal_cache_cmd(%d) unknown\n",
1226 ccb->csio.cdb_io.cdb_bytes[0]));
1227 break;
1228 }
1229 ccb->ccb_h.status = CAM_REQ_CMP;
1230 --gdt_stat.io_count_act;
1231 xpt_done(ccb);
1232}
1233
1234static void
1235gdtmapmem(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1236{
1237 bus_addr_t *busaddrp;
1238
1239 busaddrp = (bus_addr_t *)arg;
1240 *busaddrp = dm_segs->ds_addr;
1241}
1242
1243static void
1244gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1245{
1246 struct gdt_ccb *gccb;
1247 union ccb *ccb;
1248 struct gdt_softc *gdt;
1249 int i, lock;
1250
1251 lock = splcam();
1252
1253 gccb = (struct gdt_ccb *)arg;
1254 ccb = gccb->gc_ccb;
1255 gdt = cam_sim_softc((struct cam_sim *)ccb->ccb_h.ccb_sim_ptr);
1256
1257 GDT_DPRINTF(GDT_D_CMD, ("gdtexecuteccb(%p, %p, %p, %d, %d)\n",
1258 gdt, gccb, dm_segs, nseg, error));
1259 gdt_stat.sg_count_act = nseg;
1260 if (nseg > gdt_stat.sg_count_max)
1261 gdt_stat.sg_count_max = nseg;
1262
1263 /* Copy the segments into our SG list */
1264 if (gccb->gc_service == GDT_CACHESERVICE) {
1265 for (i = 0; i < nseg; ++i) {
1266 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1267 i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1268 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1269 i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1270 dm_segs++;
1271 }
1272 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1273 nseg);
1274 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1275 0xffffffffUL);
1276
1277 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
1278 nseg * GDT_SG_SZ, sizeof(u_int32_t));
1279 } else {
1280 for (i = 0; i < nseg; ++i) {
1281 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1282 i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1283 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1284 i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1285 dm_segs++;
1286 }
1287 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1288 nseg);
1289 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1290 0xffffffffUL);
1291
1292 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
1293 nseg * GDT_SG_SZ, sizeof(u_int32_t));
1294 }
1295
1296 if (nseg != 0) {
1204 sprintf(inq->product, "Host Drive #%02d", t);
1205 strcpy(inq->revision, " ");
1206 break;
1207 }
1208 case MODE_SENSE_6:
1209 {
1210 struct mpd_data {
1211 struct scsi_mode_hdr_6 hd;
1212 struct scsi_mode_block_descr bd;
1213 struct scsi_control_page cp;
1214 } *mpd;
1215 u_int8_t page;
1216
1217 mpd = (struct mpd_data *)ccb->csio.data_ptr;
1218 bzero(mpd, sizeof(struct mpd_data));
1219 mpd->hd.datalen = sizeof(struct scsi_mode_hdr_6) +
1220 sizeof(struct scsi_mode_block_descr);
1221 mpd->hd.dev_specific = (gdt->sc_hdr[t].hd_devtype & 2) ? 0x80 : 0;
1222 mpd->hd.block_descr_len = sizeof(struct scsi_mode_block_descr);
1223 mpd->bd.block_len[0] = (GDT_SECTOR_SIZE & 0x00ff0000) >> 16;
1224 mpd->bd.block_len[1] = (GDT_SECTOR_SIZE & 0x0000ff00) >> 8;
1225 mpd->bd.block_len[2] = (GDT_SECTOR_SIZE & 0x000000ff);
1226 page=((struct scsi_mode_sense_6 *)ccb->csio.cdb_io.cdb_bytes)->page;
1227 switch (page) {
1228 default:
1229 GDT_DPRINTF(GDT_D_MISC, ("MODE_SENSE_6: page 0x%x\n", page));
1230 break;
1231 }
1232 break;
1233 }
1234 case READ_CAPACITY:
1235 {
1236 struct scsi_read_capacity_data *rcd;
1237
1238 rcd = (struct scsi_read_capacity_data *)ccb->csio.data_ptr;
1239 bzero(rcd, sizeof(struct scsi_read_capacity_data));
1240 scsi_ulto4b(gdt->sc_hdr[t].hd_size - 1, rcd->addr);
1241 scsi_ulto4b(GDT_SECTOR_SIZE, rcd->length);
1242 break;
1243 }
1244 default:
1245 GDT_DPRINTF(GDT_D_MISC, ("gdt_internal_cache_cmd(%d) unknown\n",
1246 ccb->csio.cdb_io.cdb_bytes[0]));
1247 break;
1248 }
1249 ccb->ccb_h.status = CAM_REQ_CMP;
1250 --gdt_stat.io_count_act;
1251 xpt_done(ccb);
1252}
1253
1254static void
1255gdtmapmem(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1256{
1257 bus_addr_t *busaddrp;
1258
1259 busaddrp = (bus_addr_t *)arg;
1260 *busaddrp = dm_segs->ds_addr;
1261}
1262
1263static void
1264gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1265{
1266 struct gdt_ccb *gccb;
1267 union ccb *ccb;
1268 struct gdt_softc *gdt;
1269 int i, lock;
1270
1271 lock = splcam();
1272
1273 gccb = (struct gdt_ccb *)arg;
1274 ccb = gccb->gc_ccb;
1275 gdt = cam_sim_softc((struct cam_sim *)ccb->ccb_h.ccb_sim_ptr);
1276
1277 GDT_DPRINTF(GDT_D_CMD, ("gdtexecuteccb(%p, %p, %p, %d, %d)\n",
1278 gdt, gccb, dm_segs, nseg, error));
1279 gdt_stat.sg_count_act = nseg;
1280 if (nseg > gdt_stat.sg_count_max)
1281 gdt_stat.sg_count_max = nseg;
1282
1283 /* Copy the segments into our SG list */
1284 if (gccb->gc_service == GDT_CACHESERVICE) {
1285 for (i = 0; i < nseg; ++i) {
1286 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1287 i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1288 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1289 i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1290 dm_segs++;
1291 }
1292 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1293 nseg);
1294 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1295 0xffffffffUL);
1296
1297 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
1298 nseg * GDT_SG_SZ, sizeof(u_int32_t));
1299 } else {
1300 for (i = 0; i < nseg; ++i) {
1301 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1302 i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1303 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1304 i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1305 dm_segs++;
1306 }
1307 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1308 nseg);
1309 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1310 0xffffffffUL);
1311
1312 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
1313 nseg * GDT_SG_SZ, sizeof(u_int32_t));
1314 }
1315
1316 if (nseg != 0) {
1297 int op;
1298
1299 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1300 op = BUS_DMASYNC_PREREAD;
1301 else
1302 op = BUS_DMASYNC_PREWRITE;
1303 bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap, op);
1317 bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap,
1318 (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
1319 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1304 }
1305
1306 /* We must NOT abort the command here if CAM_REQ_INPROG is not set,
1307 * because command semaphore is already set!
1308 */
1309
1310 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1311 /* timeout handling */
1312 ccb->ccb_h.timeout_ch =
1313 timeout(iir_timeout, (caddr_t)gccb,
1314 (ccb->ccb_h.timeout * hz) / 1000);
1315
1316 gdt->sc_copy_cmd(gdt, gccb);
1317 splx(lock);
1318}
1319
1320
1321static void
1322iir_action( struct cam_sim *sim, union ccb *ccb )
1323{
1324 struct gdt_softc *gdt;
1325 int lock, bus, target, lun;
1326
1327 gdt = (struct gdt_softc *)cam_sim_softc( sim );
1328 ccb->ccb_h.ccb_sim_ptr = sim;
1329 bus = cam_sim_bus(sim);
1330 target = ccb->ccb_h.target_id;
1331 lun = ccb->ccb_h.target_lun;
1332 GDT_DPRINTF(GDT_D_CMD,
1333 ("iir_action(%p) func 0x%x cmd 0x%x bus %d target %d lun %d\n",
1334 gdt, ccb->ccb_h.func_code, ccb->csio.cdb_io.cdb_bytes[0],
1335 bus, target, lun));
1336 ++gdt_stat.io_count_act;
1337 if (gdt_stat.io_count_act > gdt_stat.io_count_max)
1338 gdt_stat.io_count_max = gdt_stat.io_count_act;
1339
1340 switch (ccb->ccb_h.func_code) {
1341 case XPT_SCSI_IO:
1342 lock = splcam();
1343 TAILQ_INSERT_TAIL(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1344 ++gdt_stat.req_queue_act;
1345 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1346 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1347 splx(lock);
1348 gdt_next(gdt);
1349 break;
1350 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
1351 case XPT_ABORT: /* Abort the specified CCB */
1352 /* XXX Implement */
1353 ccb->ccb_h.status = CAM_REQ_INVALID;
1354 --gdt_stat.io_count_act;
1355 xpt_done(ccb);
1356 break;
1357 case XPT_SET_TRAN_SETTINGS:
1358 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1359 --gdt_stat.io_count_act;
1360 xpt_done(ccb);
1361 break;
1362 case XPT_GET_TRAN_SETTINGS:
1363 /* Get default/user set transfer settings for the target */
1364 {
1365 struct ccb_trans_settings *cts;
1366 u_int target_mask;
1367
1368 cts = &ccb->cts;
1369 target_mask = 0x01 << target;
1370 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
1371 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
1372 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1373 cts->sync_period = 25; /* 10MHz */
1374 if (cts->sync_period != 0)
1375 cts->sync_offset = 15;
1376
1377 cts->valid = CCB_TRANS_SYNC_RATE_VALID
1378 | CCB_TRANS_SYNC_OFFSET_VALID
1379 | CCB_TRANS_BUS_WIDTH_VALID
1380 | CCB_TRANS_DISC_VALID
1381 | CCB_TRANS_TQ_VALID;
1382 ccb->ccb_h.status = CAM_REQ_CMP;
1383 } else {
1384 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1385 }
1386 --gdt_stat.io_count_act;
1387 xpt_done(ccb);
1388 break;
1389 }
1390 case XPT_CALC_GEOMETRY:
1391 {
1392 struct ccb_calc_geometry *ccg;
1393 u_int32_t secs_per_cylinder;
1394
1395 ccg = &ccb->ccg;
1396 ccg->heads = gdt->sc_hdr[target].hd_heads;
1397 ccg->secs_per_track = gdt->sc_hdr[target].hd_secs;
1398 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1399 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1400 ccb->ccb_h.status = CAM_REQ_CMP;
1401 --gdt_stat.io_count_act;
1402 xpt_done(ccb);
1403 break;
1404 }
1405 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
1406 {
1407 /* XXX Implement */
1408 ccb->ccb_h.status = CAM_REQ_CMP;
1409 --gdt_stat.io_count_act;
1410 xpt_done(ccb);
1411 break;
1412 }
1413 case XPT_TERM_IO: /* Terminate the I/O process */
1414 /* XXX Implement */
1415 ccb->ccb_h.status = CAM_REQ_INVALID;
1416 --gdt_stat.io_count_act;
1417 xpt_done(ccb);
1418 break;
1419 case XPT_PATH_INQ: /* Path routing inquiry */
1420 {
1421 struct ccb_pathinq *cpi = &ccb->cpi;
1422
1423 cpi->version_num = 1;
1424 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
1425 cpi->hba_inquiry |= PI_WIDE_16;
1426 cpi->target_sprt = 1;
1427 cpi->hba_misc = 0;
1428 cpi->hba_eng_cnt = 0;
1429 if (bus == gdt->sc_virt_bus)
1430 cpi->max_target = GDT_MAX_HDRIVES - 1;
1431 else if (gdt->sc_class & GDT_FC)
1432 cpi->max_target = GDT_MAXID_FC - 1;
1433 else
1434 cpi->max_target = GDT_MAXID - 1;
1435 cpi->max_lun = 7;
1436 cpi->unit_number = cam_sim_unit(sim);
1437 cpi->bus_id = bus;
1438 cpi->initiator_id =
1439 (bus == gdt->sc_virt_bus ? 127 : gdt->sc_bus_id[bus]);
1440 cpi->base_transfer_speed = 3300;
1441 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1442 strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
1443 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1444 cpi->ccb_h.status = CAM_REQ_CMP;
1445 --gdt_stat.io_count_act;
1446 xpt_done(ccb);
1447 break;
1448 }
1449 default:
1450 GDT_DPRINTF(GDT_D_INVALID, ("gdt_next(%p) cmd 0x%x invalid\n",
1451 gdt, ccb->ccb_h.func_code));
1452 ccb->ccb_h.status = CAM_REQ_INVALID;
1453 --gdt_stat.io_count_act;
1454 xpt_done(ccb);
1455 break;
1456 }
1457}
1458
1459static void
1460iir_poll( struct cam_sim *sim )
1461{
1462 struct gdt_softc *gdt;
1463
1464 gdt = (struct gdt_softc *)cam_sim_softc( sim );
1465 GDT_DPRINTF(GDT_D_CMD, ("iir_poll sim %p gdt %p\n", sim, gdt));
1466 iir_intr(gdt);
1467}
1468
1469static void
1470iir_timeout(void *arg)
1471{
1472 GDT_DPRINTF(GDT_D_TIMEOUT, ("iir_timeout(%p)\n", arg));
1473}
1474
1475static void
1476iir_watchdog(void *arg)
1477{
1478 struct gdt_softc *gdt;
1479
1480 gdt = (struct gdt_softc *)arg;
1481 GDT_DPRINTF(GDT_D_DEBUG, ("iir_watchdog(%p)\n", gdt));
1482
1483 {
1484 int ccbs = 0, ucmds = 0, frees = 0, pends = 0;
1485 struct gdt_ccb *p;
1486 struct ccb_hdr *h;
1487 struct gdt_ucmd *u;
1488
1489 for (h = TAILQ_FIRST(&gdt->sc_ccb_queue); h != NULL;
1490 h = TAILQ_NEXT(h, sim_links.tqe))
1491 ccbs++;
1492 for (u = TAILQ_FIRST(&gdt->sc_ucmd_queue); u != NULL;
1493 u = TAILQ_NEXT(u, links))
1494 ucmds++;
1495 for (p = SLIST_FIRST(&gdt->sc_free_gccb); p != NULL;
1496 p = SLIST_NEXT(p, sle))
1497 frees++;
1498 for (p = SLIST_FIRST(&gdt->sc_pending_gccb); p != NULL;
1499 p = SLIST_NEXT(p, sle))
1500 pends++;
1501
1502 GDT_DPRINTF(GDT_D_TIMEOUT, ("ccbs %d ucmds %d frees %d pends %d\n",
1503 ccbs, ucmds, frees, pends));
1504 }
1505
1506 timeout(iir_watchdog, (caddr_t)gdt, hz * 15);
1507}
1508
1509static void
1510iir_shutdown( void *arg, int howto )
1511{
1512 struct gdt_softc *gdt;
1513 struct gdt_ccb *gccb;
1514 gdt_ucmd_t *ucmd;
1515 int lock, i;
1516
1517 gdt = (struct gdt_softc *)arg;
1518 GDT_DPRINTF(GDT_D_CMD, ("iir_shutdown(%p, %d)\n", gdt, howto));
1519
1520 printf("iir%d: Flushing all Host Drives. Please wait ... ",
1521 gdt->sc_hanum);
1522
1523 /* allocate ucmd buffer */
1524 ucmd = malloc(sizeof(gdt_ucmd_t), M_DEVBUF, M_NOWAIT);
1525 if (ucmd == NULL) {
1526 printf("iir%d: iir_shutdown(): Cannot allocate resource\n",
1527 gdt->sc_hanum);
1528 return;
1529 }
1530 bzero(ucmd, sizeof(gdt_ucmd_t));
1531
1532 /* wait for pending IOs */
1533 lock = splcam();
1534 gdt->sc_state = GDT_SHUTDOWN;
1535 splx(lock);
1536 if ((gccb = SLIST_FIRST(&gdt->sc_pending_gccb)) != NULL)
1537 (void) tsleep((void *)gccb, PCATCH | PRIBIO, "iirshw", 100 * hz);
1538
1539 /* flush */
1540 for (i = 0; i < GDT_MAX_HDRIVES; ++i) {
1541 if (gdt->sc_hdr[i].hd_present) {
1542 ucmd->service = GDT_CACHESERVICE;
1543 ucmd->OpCode = GDT_FLUSH;
1544 ucmd->u.cache.DeviceNo = i;
1545 lock = splcam();
1546 TAILQ_INSERT_TAIL(&gdt->sc_ucmd_queue, ucmd, links);
1547 ucmd->complete_flag = FALSE;
1548 splx(lock);
1549 gdt_next(gdt);
1550 if (!ucmd->complete_flag)
1551 (void) tsleep((void *)ucmd, PCATCH|PRIBIO, "iirshw", 10*hz);
1552 }
1553 }
1554
1555 free(ucmd, M_DEVBUF);
1556 printf("Done.\n");
1557}
1558
1559void
1560iir_intr(void *arg)
1561{
1562 struct gdt_softc *gdt = arg;
1563 struct gdt_intr_ctx ctx;
1564 int lock = 0;
1565 struct gdt_ccb *gccb;
1566 gdt_ucmd_t *ucmd;
1567 u_int32_t cnt;
1568
1569 GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p)\n", gdt));
1570
1571 /* If polling and we were not called from gdt_wait, just return */
1572 if ((gdt->sc_state & GDT_POLLING) &&
1573 !(gdt->sc_state & GDT_POLL_WAIT))
1574 return;
1575
1576 if (!(gdt->sc_state & GDT_POLLING))
1577 lock = splcam();
1578 gdt_wait_index = 0;
1579
1580 ctx.istatus = gdt->sc_get_status(gdt);
1581 if (!ctx.istatus) {
1582 if (!(gdt->sc_state & GDT_POLLING))
1583 splx(lock);
1584 gdt->sc_status = GDT_S_NO_STATUS;
1585 return;
1586 }
1587
1588 gdt->sc_intr(gdt, &ctx);
1589
1590 gdt->sc_status = ctx.cmd_status;
1591 gdt->sc_service = ctx.service;
1592 gdt->sc_info = ctx.info;
1593 gdt->sc_info2 = ctx.info2;
1594
1595 if (gdt->sc_state & GDT_POLL_WAIT) {
1596 gdt_wait_gdt = gdt;
1597 gdt_wait_index = ctx.istatus;
1598 }
1599
1600 if (ctx.istatus == GDT_ASYNCINDEX) {
1601 gdt_async_event(gdt, ctx.service);
1602 if (!(gdt->sc_state & GDT_POLLING))
1603 splx(lock);
1604 return;
1605 }
1606 if (ctx.istatus == GDT_SPEZINDEX) {
1607 GDT_DPRINTF(GDT_D_INVALID,
1608 ("iir%d: Service unknown or not initialized!\n",
1609 gdt->sc_hanum));
1610 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1611 gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1612 gdt_store_event(GDT_ES_DRIVER, 4, &gdt->sc_dvr);
1613 if (!(gdt->sc_state & GDT_POLLING))
1614 splx(lock);
1615 return;
1616 }
1617
1618 gccb = &gdt->sc_gccbs[ctx.istatus - 2];
1619 ctx.service = gccb->gc_service;
1620
1621 switch (gccb->gc_flags) {
1622 case GDT_GCF_UNUSED:
1623 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: Index (%d) to unused command!\n",
1624 gdt->sc_hanum, ctx.istatus));
1625 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1626 gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1627 gdt->sc_dvr.eu.driver.index = ctx.istatus;
1628 gdt_store_event(GDT_ES_DRIVER, 1, &gdt->sc_dvr);
1629 gdt_free_ccb(gdt, gccb);
1630 /* fallthrough */
1631
1632 case GDT_GCF_INTERNAL:
1633 if (!(gdt->sc_state & GDT_POLLING))
1634 splx(lock);
1635 break;
1636
1637 case GDT_GCF_IOCTL:
1638 ucmd = gccb->gc_ucmd;
1639 if (gdt->sc_status == GDT_S_BSY) {
1640 GDT_DPRINTF(GDT_D_DEBUG, ("iir_intr(%p) ioctl: gccb %p busy\n",
1641 gdt, gccb));
1642 TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
1643 if (!(gdt->sc_state & GDT_POLLING))
1644 splx(lock);
1645 } else {
1646 ucmd->status = gdt->sc_status;
1647 ucmd->info = gdt->sc_info;
1648 ucmd->complete_flag = TRUE;
1649 if (ucmd->service == GDT_CACHESERVICE) {
1650 if (ucmd->OpCode == GDT_IOCTL) {
1651 cnt = ucmd->u.ioctl.param_size;
1652 if (cnt != 0)
1653 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1654 } else {
1655 cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
1656 if (cnt != 0)
1657 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1658 }
1659 } else {
1660 cnt = ucmd->u.raw.sdlen;
1661 if (cnt != 0)
1662 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1663 if (ucmd->u.raw.sense_len != 0)
1664 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1665 }
1666 gdt_free_ccb(gdt, gccb);
1667 if (!(gdt->sc_state & GDT_POLLING))
1668 splx(lock);
1669 /* wakeup */
1670 wakeup(ucmd);
1671 }
1672 gdt_next(gdt);
1673 break;
1674
1675 default:
1676 gdt_free_ccb(gdt, gccb);
1677 gdt_sync_event(gdt, ctx.service, ctx.istatus, gccb);
1678 if (!(gdt->sc_state & GDT_POLLING))
1679 splx(lock);
1680 gdt_next(gdt);
1681 break;
1682 }
1683}
1684
1320 }
1321
1322 /* We must NOT abort the command here if CAM_REQ_INPROG is not set,
1323 * because command semaphore is already set!
1324 */
1325
1326 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1327 /* timeout handling */
1328 ccb->ccb_h.timeout_ch =
1329 timeout(iir_timeout, (caddr_t)gccb,
1330 (ccb->ccb_h.timeout * hz) / 1000);
1331
1332 gdt->sc_copy_cmd(gdt, gccb);
1333 splx(lock);
1334}
1335
1336
1337static void
1338iir_action( struct cam_sim *sim, union ccb *ccb )
1339{
1340 struct gdt_softc *gdt;
1341 int lock, bus, target, lun;
1342
1343 gdt = (struct gdt_softc *)cam_sim_softc( sim );
1344 ccb->ccb_h.ccb_sim_ptr = sim;
1345 bus = cam_sim_bus(sim);
1346 target = ccb->ccb_h.target_id;
1347 lun = ccb->ccb_h.target_lun;
1348 GDT_DPRINTF(GDT_D_CMD,
1349 ("iir_action(%p) func 0x%x cmd 0x%x bus %d target %d lun %d\n",
1350 gdt, ccb->ccb_h.func_code, ccb->csio.cdb_io.cdb_bytes[0],
1351 bus, target, lun));
1352 ++gdt_stat.io_count_act;
1353 if (gdt_stat.io_count_act > gdt_stat.io_count_max)
1354 gdt_stat.io_count_max = gdt_stat.io_count_act;
1355
1356 switch (ccb->ccb_h.func_code) {
1357 case XPT_SCSI_IO:
1358 lock = splcam();
1359 TAILQ_INSERT_TAIL(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1360 ++gdt_stat.req_queue_act;
1361 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1362 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1363 splx(lock);
1364 gdt_next(gdt);
1365 break;
1366 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
1367 case XPT_ABORT: /* Abort the specified CCB */
1368 /* XXX Implement */
1369 ccb->ccb_h.status = CAM_REQ_INVALID;
1370 --gdt_stat.io_count_act;
1371 xpt_done(ccb);
1372 break;
1373 case XPT_SET_TRAN_SETTINGS:
1374 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1375 --gdt_stat.io_count_act;
1376 xpt_done(ccb);
1377 break;
1378 case XPT_GET_TRAN_SETTINGS:
1379 /* Get default/user set transfer settings for the target */
1380 {
1381 struct ccb_trans_settings *cts;
1382 u_int target_mask;
1383
1384 cts = &ccb->cts;
1385 target_mask = 0x01 << target;
1386 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
1387 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
1388 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1389 cts->sync_period = 25; /* 10MHz */
1390 if (cts->sync_period != 0)
1391 cts->sync_offset = 15;
1392
1393 cts->valid = CCB_TRANS_SYNC_RATE_VALID
1394 | CCB_TRANS_SYNC_OFFSET_VALID
1395 | CCB_TRANS_BUS_WIDTH_VALID
1396 | CCB_TRANS_DISC_VALID
1397 | CCB_TRANS_TQ_VALID;
1398 ccb->ccb_h.status = CAM_REQ_CMP;
1399 } else {
1400 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1401 }
1402 --gdt_stat.io_count_act;
1403 xpt_done(ccb);
1404 break;
1405 }
1406 case XPT_CALC_GEOMETRY:
1407 {
1408 struct ccb_calc_geometry *ccg;
1409 u_int32_t secs_per_cylinder;
1410
1411 ccg = &ccb->ccg;
1412 ccg->heads = gdt->sc_hdr[target].hd_heads;
1413 ccg->secs_per_track = gdt->sc_hdr[target].hd_secs;
1414 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1415 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1416 ccb->ccb_h.status = CAM_REQ_CMP;
1417 --gdt_stat.io_count_act;
1418 xpt_done(ccb);
1419 break;
1420 }
1421 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
1422 {
1423 /* XXX Implement */
1424 ccb->ccb_h.status = CAM_REQ_CMP;
1425 --gdt_stat.io_count_act;
1426 xpt_done(ccb);
1427 break;
1428 }
1429 case XPT_TERM_IO: /* Terminate the I/O process */
1430 /* XXX Implement */
1431 ccb->ccb_h.status = CAM_REQ_INVALID;
1432 --gdt_stat.io_count_act;
1433 xpt_done(ccb);
1434 break;
1435 case XPT_PATH_INQ: /* Path routing inquiry */
1436 {
1437 struct ccb_pathinq *cpi = &ccb->cpi;
1438
1439 cpi->version_num = 1;
1440 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
1441 cpi->hba_inquiry |= PI_WIDE_16;
1442 cpi->target_sprt = 1;
1443 cpi->hba_misc = 0;
1444 cpi->hba_eng_cnt = 0;
1445 if (bus == gdt->sc_virt_bus)
1446 cpi->max_target = GDT_MAX_HDRIVES - 1;
1447 else if (gdt->sc_class & GDT_FC)
1448 cpi->max_target = GDT_MAXID_FC - 1;
1449 else
1450 cpi->max_target = GDT_MAXID - 1;
1451 cpi->max_lun = 7;
1452 cpi->unit_number = cam_sim_unit(sim);
1453 cpi->bus_id = bus;
1454 cpi->initiator_id =
1455 (bus == gdt->sc_virt_bus ? 127 : gdt->sc_bus_id[bus]);
1456 cpi->base_transfer_speed = 3300;
1457 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1458 strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
1459 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1460 cpi->ccb_h.status = CAM_REQ_CMP;
1461 --gdt_stat.io_count_act;
1462 xpt_done(ccb);
1463 break;
1464 }
1465 default:
1466 GDT_DPRINTF(GDT_D_INVALID, ("gdt_next(%p) cmd 0x%x invalid\n",
1467 gdt, ccb->ccb_h.func_code));
1468 ccb->ccb_h.status = CAM_REQ_INVALID;
1469 --gdt_stat.io_count_act;
1470 xpt_done(ccb);
1471 break;
1472 }
1473}
1474
1475static void
1476iir_poll( struct cam_sim *sim )
1477{
1478 struct gdt_softc *gdt;
1479
1480 gdt = (struct gdt_softc *)cam_sim_softc( sim );
1481 GDT_DPRINTF(GDT_D_CMD, ("iir_poll sim %p gdt %p\n", sim, gdt));
1482 iir_intr(gdt);
1483}
1484
1485static void
1486iir_timeout(void *arg)
1487{
1488 GDT_DPRINTF(GDT_D_TIMEOUT, ("iir_timeout(%p)\n", arg));
1489}
1490
1491static void
1492iir_watchdog(void *arg)
1493{
1494 struct gdt_softc *gdt;
1495
1496 gdt = (struct gdt_softc *)arg;
1497 GDT_DPRINTF(GDT_D_DEBUG, ("iir_watchdog(%p)\n", gdt));
1498
1499 {
1500 int ccbs = 0, ucmds = 0, frees = 0, pends = 0;
1501 struct gdt_ccb *p;
1502 struct ccb_hdr *h;
1503 struct gdt_ucmd *u;
1504
1505 for (h = TAILQ_FIRST(&gdt->sc_ccb_queue); h != NULL;
1506 h = TAILQ_NEXT(h, sim_links.tqe))
1507 ccbs++;
1508 for (u = TAILQ_FIRST(&gdt->sc_ucmd_queue); u != NULL;
1509 u = TAILQ_NEXT(u, links))
1510 ucmds++;
1511 for (p = SLIST_FIRST(&gdt->sc_free_gccb); p != NULL;
1512 p = SLIST_NEXT(p, sle))
1513 frees++;
1514 for (p = SLIST_FIRST(&gdt->sc_pending_gccb); p != NULL;
1515 p = SLIST_NEXT(p, sle))
1516 pends++;
1517
1518 GDT_DPRINTF(GDT_D_TIMEOUT, ("ccbs %d ucmds %d frees %d pends %d\n",
1519 ccbs, ucmds, frees, pends));
1520 }
1521
1522 timeout(iir_watchdog, (caddr_t)gdt, hz * 15);
1523}
1524
1525static void
1526iir_shutdown( void *arg, int howto )
1527{
1528 struct gdt_softc *gdt;
1529 struct gdt_ccb *gccb;
1530 gdt_ucmd_t *ucmd;
1531 int lock, i;
1532
1533 gdt = (struct gdt_softc *)arg;
1534 GDT_DPRINTF(GDT_D_CMD, ("iir_shutdown(%p, %d)\n", gdt, howto));
1535
1536 printf("iir%d: Flushing all Host Drives. Please wait ... ",
1537 gdt->sc_hanum);
1538
1539 /* allocate ucmd buffer */
1540 ucmd = malloc(sizeof(gdt_ucmd_t), M_DEVBUF, M_NOWAIT);
1541 if (ucmd == NULL) {
1542 printf("iir%d: iir_shutdown(): Cannot allocate resource\n",
1543 gdt->sc_hanum);
1544 return;
1545 }
1546 bzero(ucmd, sizeof(gdt_ucmd_t));
1547
1548 /* wait for pending IOs */
1549 lock = splcam();
1550 gdt->sc_state = GDT_SHUTDOWN;
1551 splx(lock);
1552 if ((gccb = SLIST_FIRST(&gdt->sc_pending_gccb)) != NULL)
1553 (void) tsleep((void *)gccb, PCATCH | PRIBIO, "iirshw", 100 * hz);
1554
1555 /* flush */
1556 for (i = 0; i < GDT_MAX_HDRIVES; ++i) {
1557 if (gdt->sc_hdr[i].hd_present) {
1558 ucmd->service = GDT_CACHESERVICE;
1559 ucmd->OpCode = GDT_FLUSH;
1560 ucmd->u.cache.DeviceNo = i;
1561 lock = splcam();
1562 TAILQ_INSERT_TAIL(&gdt->sc_ucmd_queue, ucmd, links);
1563 ucmd->complete_flag = FALSE;
1564 splx(lock);
1565 gdt_next(gdt);
1566 if (!ucmd->complete_flag)
1567 (void) tsleep((void *)ucmd, PCATCH|PRIBIO, "iirshw", 10*hz);
1568 }
1569 }
1570
1571 free(ucmd, M_DEVBUF);
1572 printf("Done.\n");
1573}
1574
1575void
1576iir_intr(void *arg)
1577{
1578 struct gdt_softc *gdt = arg;
1579 struct gdt_intr_ctx ctx;
1580 int lock = 0;
1581 struct gdt_ccb *gccb;
1582 gdt_ucmd_t *ucmd;
1583 u_int32_t cnt;
1584
1585 GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p)\n", gdt));
1586
1587 /* If polling and we were not called from gdt_wait, just return */
1588 if ((gdt->sc_state & GDT_POLLING) &&
1589 !(gdt->sc_state & GDT_POLL_WAIT))
1590 return;
1591
1592 if (!(gdt->sc_state & GDT_POLLING))
1593 lock = splcam();
1594 gdt_wait_index = 0;
1595
1596 ctx.istatus = gdt->sc_get_status(gdt);
1597 if (!ctx.istatus) {
1598 if (!(gdt->sc_state & GDT_POLLING))
1599 splx(lock);
1600 gdt->sc_status = GDT_S_NO_STATUS;
1601 return;
1602 }
1603
1604 gdt->sc_intr(gdt, &ctx);
1605
1606 gdt->sc_status = ctx.cmd_status;
1607 gdt->sc_service = ctx.service;
1608 gdt->sc_info = ctx.info;
1609 gdt->sc_info2 = ctx.info2;
1610
1611 if (gdt->sc_state & GDT_POLL_WAIT) {
1612 gdt_wait_gdt = gdt;
1613 gdt_wait_index = ctx.istatus;
1614 }
1615
1616 if (ctx.istatus == GDT_ASYNCINDEX) {
1617 gdt_async_event(gdt, ctx.service);
1618 if (!(gdt->sc_state & GDT_POLLING))
1619 splx(lock);
1620 return;
1621 }
1622 if (ctx.istatus == GDT_SPEZINDEX) {
1623 GDT_DPRINTF(GDT_D_INVALID,
1624 ("iir%d: Service unknown or not initialized!\n",
1625 gdt->sc_hanum));
1626 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1627 gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1628 gdt_store_event(GDT_ES_DRIVER, 4, &gdt->sc_dvr);
1629 if (!(gdt->sc_state & GDT_POLLING))
1630 splx(lock);
1631 return;
1632 }
1633
1634 gccb = &gdt->sc_gccbs[ctx.istatus - 2];
1635 ctx.service = gccb->gc_service;
1636
1637 switch (gccb->gc_flags) {
1638 case GDT_GCF_UNUSED:
1639 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: Index (%d) to unused command!\n",
1640 gdt->sc_hanum, ctx.istatus));
1641 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1642 gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1643 gdt->sc_dvr.eu.driver.index = ctx.istatus;
1644 gdt_store_event(GDT_ES_DRIVER, 1, &gdt->sc_dvr);
1645 gdt_free_ccb(gdt, gccb);
1646 /* fallthrough */
1647
1648 case GDT_GCF_INTERNAL:
1649 if (!(gdt->sc_state & GDT_POLLING))
1650 splx(lock);
1651 break;
1652
1653 case GDT_GCF_IOCTL:
1654 ucmd = gccb->gc_ucmd;
1655 if (gdt->sc_status == GDT_S_BSY) {
1656 GDT_DPRINTF(GDT_D_DEBUG, ("iir_intr(%p) ioctl: gccb %p busy\n",
1657 gdt, gccb));
1658 TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
1659 if (!(gdt->sc_state & GDT_POLLING))
1660 splx(lock);
1661 } else {
1662 ucmd->status = gdt->sc_status;
1663 ucmd->info = gdt->sc_info;
1664 ucmd->complete_flag = TRUE;
1665 if (ucmd->service == GDT_CACHESERVICE) {
1666 if (ucmd->OpCode == GDT_IOCTL) {
1667 cnt = ucmd->u.ioctl.param_size;
1668 if (cnt != 0)
1669 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1670 } else {
1671 cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
1672 if (cnt != 0)
1673 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1674 }
1675 } else {
1676 cnt = ucmd->u.raw.sdlen;
1677 if (cnt != 0)
1678 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1679 if (ucmd->u.raw.sense_len != 0)
1680 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1681 }
1682 gdt_free_ccb(gdt, gccb);
1683 if (!(gdt->sc_state & GDT_POLLING))
1684 splx(lock);
1685 /* wakeup */
1686 wakeup(ucmd);
1687 }
1688 gdt_next(gdt);
1689 break;
1690
1691 default:
1692 gdt_free_ccb(gdt, gccb);
1693 gdt_sync_event(gdt, ctx.service, ctx.istatus, gccb);
1694 if (!(gdt->sc_state & GDT_POLLING))
1695 splx(lock);
1696 gdt_next(gdt);
1697 break;
1698 }
1699}
1700
1685static int
1701int
1686gdt_async_event(struct gdt_softc *gdt, int service)
1687{
1688 struct gdt_ccb *gccb;
1689
1690 GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d)\n", gdt, service));
1691
1692 if (service == GDT_SCREENSERVICE) {
1693 if (gdt->sc_status == GDT_MSG_REQUEST) {
1694 while (gdt->sc_test_busy(gdt))
1695 DELAY(1);
1696 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1697 gccb = gdt_get_ccb(gdt);
1698 if (gccb == NULL) {
1699 printf("iir%d: No free command index found\n",
1700 gdt->sc_hanum);
1701 return (1);
1702 }
1703 gccb->gc_service = service;
1704 gccb->gc_flags = GDT_GCF_SCREEN;
1705 gdt->sc_set_sema0(gdt);
1706 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1707 gccb->gc_cmd_index);
1708 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_READ);
1709 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1710 GDT_MSG_INV_HANDLE);
1711 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1712 gdt_ccb_vtop(gdt, gccb) +
1713 offsetof(struct gdt_ccb, gc_scratch[0]));
1714 gdt->sc_cmd_off = 0;
1715 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1716 sizeof(u_int32_t));
1717 gdt->sc_cmd_cnt = 0;
1718 gdt->sc_copy_cmd(gdt, gccb);
1719 printf("iir%d: [PCI %d/%d] ",
1720 gdt->sc_hanum,gdt->sc_bus,gdt->sc_slot);
1721 gdt->sc_release_event(gdt);
1722 }
1723
1724 } else {
1725 if ((gdt->sc_fw_vers & 0xff) >= 0x1a) {
1726 gdt->sc_dvr.size = 0;
1727 gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
1728 gdt->sc_dvr.eu.async.status = gdt->sc_status;
1729 /* severity and event_string already set! */
1730 } else {
1731 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.async);
1732 gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
1733 gdt->sc_dvr.eu.async.service = service;
1734 gdt->sc_dvr.eu.async.status = gdt->sc_status;
1735 gdt->sc_dvr.eu.async.info = gdt->sc_info;
1736 *(u_int32_t *)gdt->sc_dvr.eu.async.scsi_coord = gdt->sc_info2;
1737 }
1738 gdt_store_event(GDT_ES_ASYNC, service, &gdt->sc_dvr);
1739 printf("iir%d: %s\n", gdt->sc_hanum, gdt->sc_dvr.event_string);
1740 }
1741
1742 return (0);
1743}
1744
1702gdt_async_event(struct gdt_softc *gdt, int service)
1703{
1704 struct gdt_ccb *gccb;
1705
1706 GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d)\n", gdt, service));
1707
1708 if (service == GDT_SCREENSERVICE) {
1709 if (gdt->sc_status == GDT_MSG_REQUEST) {
1710 while (gdt->sc_test_busy(gdt))
1711 DELAY(1);
1712 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1713 gccb = gdt_get_ccb(gdt);
1714 if (gccb == NULL) {
1715 printf("iir%d: No free command index found\n",
1716 gdt->sc_hanum);
1717 return (1);
1718 }
1719 gccb->gc_service = service;
1720 gccb->gc_flags = GDT_GCF_SCREEN;
1721 gdt->sc_set_sema0(gdt);
1722 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1723 gccb->gc_cmd_index);
1724 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_READ);
1725 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1726 GDT_MSG_INV_HANDLE);
1727 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1728 gdt_ccb_vtop(gdt, gccb) +
1729 offsetof(struct gdt_ccb, gc_scratch[0]));
1730 gdt->sc_cmd_off = 0;
1731 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1732 sizeof(u_int32_t));
1733 gdt->sc_cmd_cnt = 0;
1734 gdt->sc_copy_cmd(gdt, gccb);
1735 printf("iir%d: [PCI %d/%d] ",
1736 gdt->sc_hanum,gdt->sc_bus,gdt->sc_slot);
1737 gdt->sc_release_event(gdt);
1738 }
1739
1740 } else {
1741 if ((gdt->sc_fw_vers & 0xff) >= 0x1a) {
1742 gdt->sc_dvr.size = 0;
1743 gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
1744 gdt->sc_dvr.eu.async.status = gdt->sc_status;
1745 /* severity and event_string already set! */
1746 } else {
1747 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.async);
1748 gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
1749 gdt->sc_dvr.eu.async.service = service;
1750 gdt->sc_dvr.eu.async.status = gdt->sc_status;
1751 gdt->sc_dvr.eu.async.info = gdt->sc_info;
1752 *(u_int32_t *)gdt->sc_dvr.eu.async.scsi_coord = gdt->sc_info2;
1753 }
1754 gdt_store_event(GDT_ES_ASYNC, service, &gdt->sc_dvr);
1755 printf("iir%d: %s\n", gdt->sc_hanum, gdt->sc_dvr.event_string);
1756 }
1757
1758 return (0);
1759}
1760
1745static int
1761int
1746gdt_sync_event(struct gdt_softc *gdt, int service,
1747 u_int8_t index, struct gdt_ccb *gccb)
1748{
1749 union ccb *ccb;
1762gdt_sync_event(struct gdt_softc *gdt, int service,
1763 u_int8_t index, struct gdt_ccb *gccb)
1764{
1765 union ccb *ccb;
1750 int op;
1751
1752 GDT_DPRINTF(GDT_D_INTR,
1753 ("gdt_sync_event(%p, %d, %d, %p)\n", gdt,service,index,gccb));
1754
1755 ccb = gccb->gc_ccb;
1756
1757 if (service == GDT_SCREENSERVICE) {
1758 u_int32_t msg_len;
1759
1760 msg_len = gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_LEN);
1761 if (msg_len)
1762 if (!(gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1763 gccb->gc_scratch[GDT_SCR_MSG_EXT])) {
1764 gccb->gc_scratch[GDT_SCR_MSG_TEXT + msg_len] = '\0';
1765 printf("%s",&gccb->gc_scratch[GDT_SCR_MSG_TEXT]);
1766 }
1767
1768 if (gccb->gc_scratch[GDT_SCR_MSG_EXT] &&
1769 !gccb->gc_scratch[GDT_SCR_MSG_ANSWER]) {
1770 while (gdt->sc_test_busy(gdt))
1771 DELAY(1);
1772 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1773 gccb = gdt_get_ccb(gdt);
1774 if (gccb == NULL) {
1775 printf("iir%d: No free command index found\n",
1776 gdt->sc_hanum);
1777 return (1);
1778 }
1779 gccb->gc_service = service;
1780 gccb->gc_flags = GDT_GCF_SCREEN;
1781 gdt->sc_set_sema0(gdt);
1782 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1783 gccb->gc_cmd_index);
1784 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_READ);
1785 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1786 gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1787 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1788 gdt_ccb_vtop(gdt, gccb) +
1789 offsetof(struct gdt_ccb, gc_scratch[0]));
1790 gdt->sc_cmd_off = 0;
1791 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1792 sizeof(u_int32_t));
1793 gdt->sc_cmd_cnt = 0;
1794 gdt->sc_copy_cmd(gdt, gccb);
1795 gdt->sc_release_event(gdt);
1796 return (0);
1797 }
1798
1799 if (gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1800 gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN)) {
1801 /* default answers (getchar() not possible) */
1802 if (gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) == 1) {
1803 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN, 0);
1804 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 1);
1805 gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 0;
1806 } else {
1807 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN,
1808 gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) - 2);
1809 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 2);
1810 gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 1;
1811 gccb->gc_scratch[GDT_SCR_MSG_TEXT + 1] = 0;
1812 }
1813 gccb->gc_scratch[GDT_SCR_MSG_EXT] = 0;
1814 gccb->gc_scratch[GDT_SCR_MSG_ANSWER] = 0;
1815 while (gdt->sc_test_busy(gdt))
1816 DELAY(1);
1817 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1818 gccb = gdt_get_ccb(gdt);
1819 if (gccb == NULL) {
1820 printf("iir%d: No free command index found\n",
1821 gdt->sc_hanum);
1822 return (1);
1823 }
1824 gccb->gc_service = service;
1825 gccb->gc_flags = GDT_GCF_SCREEN;
1826 gdt->sc_set_sema0(gdt);
1827 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1828 gccb->gc_cmd_index);
1829 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
1830 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1831 gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1832 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1833 gdt_ccb_vtop(gdt, gccb) +
1834 offsetof(struct gdt_ccb, gc_scratch[0]));
1835 gdt->sc_cmd_off = 0;
1836 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1837 sizeof(u_int32_t));
1838 gdt->sc_cmd_cnt = 0;
1839 gdt->sc_copy_cmd(gdt, gccb);
1840 gdt->sc_release_event(gdt);
1841 return (0);
1842 }
1843 printf("\n");
1844 return (0);
1845 } else {
1846 untimeout(iir_timeout, gccb, ccb->ccb_h.timeout_ch);
1847 if (gdt->sc_status == GDT_S_BSY) {
1848 GDT_DPRINTF(GDT_D_DEBUG, ("gdt_sync_event(%p) gccb %p busy\n",
1849 gdt, gccb));
1850 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1851 ++gdt_stat.req_queue_act;
1852 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1853 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1854 return (2);
1855 }
1856
1766
1767 GDT_DPRINTF(GDT_D_INTR,
1768 ("gdt_sync_event(%p, %d, %d, %p)\n", gdt,service,index,gccb));
1769
1770 ccb = gccb->gc_ccb;
1771
1772 if (service == GDT_SCREENSERVICE) {
1773 u_int32_t msg_len;
1774
1775 msg_len = gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_LEN);
1776 if (msg_len)
1777 if (!(gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1778 gccb->gc_scratch[GDT_SCR_MSG_EXT])) {
1779 gccb->gc_scratch[GDT_SCR_MSG_TEXT + msg_len] = '\0';
1780 printf("%s",&gccb->gc_scratch[GDT_SCR_MSG_TEXT]);
1781 }
1782
1783 if (gccb->gc_scratch[GDT_SCR_MSG_EXT] &&
1784 !gccb->gc_scratch[GDT_SCR_MSG_ANSWER]) {
1785 while (gdt->sc_test_busy(gdt))
1786 DELAY(1);
1787 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1788 gccb = gdt_get_ccb(gdt);
1789 if (gccb == NULL) {
1790 printf("iir%d: No free command index found\n",
1791 gdt->sc_hanum);
1792 return (1);
1793 }
1794 gccb->gc_service = service;
1795 gccb->gc_flags = GDT_GCF_SCREEN;
1796 gdt->sc_set_sema0(gdt);
1797 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1798 gccb->gc_cmd_index);
1799 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_READ);
1800 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1801 gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1802 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1803 gdt_ccb_vtop(gdt, gccb) +
1804 offsetof(struct gdt_ccb, gc_scratch[0]));
1805 gdt->sc_cmd_off = 0;
1806 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1807 sizeof(u_int32_t));
1808 gdt->sc_cmd_cnt = 0;
1809 gdt->sc_copy_cmd(gdt, gccb);
1810 gdt->sc_release_event(gdt);
1811 return (0);
1812 }
1813
1814 if (gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1815 gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN)) {
1816 /* default answers (getchar() not possible) */
1817 if (gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) == 1) {
1818 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN, 0);
1819 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 1);
1820 gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 0;
1821 } else {
1822 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN,
1823 gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) - 2);
1824 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 2);
1825 gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 1;
1826 gccb->gc_scratch[GDT_SCR_MSG_TEXT + 1] = 0;
1827 }
1828 gccb->gc_scratch[GDT_SCR_MSG_EXT] = 0;
1829 gccb->gc_scratch[GDT_SCR_MSG_ANSWER] = 0;
1830 while (gdt->sc_test_busy(gdt))
1831 DELAY(1);
1832 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1833 gccb = gdt_get_ccb(gdt);
1834 if (gccb == NULL) {
1835 printf("iir%d: No free command index found\n",
1836 gdt->sc_hanum);
1837 return (1);
1838 }
1839 gccb->gc_service = service;
1840 gccb->gc_flags = GDT_GCF_SCREEN;
1841 gdt->sc_set_sema0(gdt);
1842 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1843 gccb->gc_cmd_index);
1844 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
1845 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1846 gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1847 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1848 gdt_ccb_vtop(gdt, gccb) +
1849 offsetof(struct gdt_ccb, gc_scratch[0]));
1850 gdt->sc_cmd_off = 0;
1851 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1852 sizeof(u_int32_t));
1853 gdt->sc_cmd_cnt = 0;
1854 gdt->sc_copy_cmd(gdt, gccb);
1855 gdt->sc_release_event(gdt);
1856 return (0);
1857 }
1858 printf("\n");
1859 return (0);
1860 } else {
1861 untimeout(iir_timeout, gccb, ccb->ccb_h.timeout_ch);
1862 if (gdt->sc_status == GDT_S_BSY) {
1863 GDT_DPRINTF(GDT_D_DEBUG, ("gdt_sync_event(%p) gccb %p busy\n",
1864 gdt, gccb));
1865 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1866 ++gdt_stat.req_queue_act;
1867 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1868 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1869 return (2);
1870 }
1871
1857 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1858 op = BUS_DMASYNC_POSTREAD;
1859 else
1860 op = BUS_DMASYNC_POSTWRITE;
1861 bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap, op);
1872 bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap,
1873 (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
1874 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1862
1863 ccb->csio.resid = 0;
1864 if (gdt->sc_status == GDT_S_OK) {
1865 ccb->ccb_h.status = CAM_REQ_CMP;
1866 } else {
1867 /* error */
1868 if (gccb->gc_service == GDT_CACHESERVICE) {
1869 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
1870 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1871 bzero(&ccb->csio.sense_data, ccb->csio.sense_len);
1872 ccb->csio.sense_data.error_code =
1873 SSD_CURRENT_ERROR | SSD_ERRCODE_VALID;
1874 ccb->csio.sense_data.flags = SSD_KEY_NOT_READY;
1875
1876 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.sync);
1877 gdt->sc_dvr.eu.sync.ionode = gdt->sc_hanum;
1878 gdt->sc_dvr.eu.sync.service = service;
1879 gdt->sc_dvr.eu.sync.status = gdt->sc_status;
1880 gdt->sc_dvr.eu.sync.info = gdt->sc_info;
1881 gdt->sc_dvr.eu.sync.hostdrive = ccb->ccb_h.target_id;
1882 if (gdt->sc_status >= 0x8000)
1883 gdt_store_event(GDT_ES_SYNC, 0, &gdt->sc_dvr);
1884 else
1885 gdt_store_event(GDT_ES_SYNC, service, &gdt->sc_dvr);
1886 } else {
1887 /* raw service */
1888 if (gdt->sc_status != GDT_S_RAW_SCSI || gdt->sc_info >= 0x100) {
1875
1876 ccb->csio.resid = 0;
1877 if (gdt->sc_status == GDT_S_OK) {
1878 ccb->ccb_h.status = CAM_REQ_CMP;
1879 } else {
1880 /* error */
1881 if (gccb->gc_service == GDT_CACHESERVICE) {
1882 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
1883 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1884 bzero(&ccb->csio.sense_data, ccb->csio.sense_len);
1885 ccb->csio.sense_data.error_code =
1886 SSD_CURRENT_ERROR | SSD_ERRCODE_VALID;
1887 ccb->csio.sense_data.flags = SSD_KEY_NOT_READY;
1888
1889 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.sync);
1890 gdt->sc_dvr.eu.sync.ionode = gdt->sc_hanum;
1891 gdt->sc_dvr.eu.sync.service = service;
1892 gdt->sc_dvr.eu.sync.status = gdt->sc_status;
1893 gdt->sc_dvr.eu.sync.info = gdt->sc_info;
1894 gdt->sc_dvr.eu.sync.hostdrive = ccb->ccb_h.target_id;
1895 if (gdt->sc_status >= 0x8000)
1896 gdt_store_event(GDT_ES_SYNC, 0, &gdt->sc_dvr);
1897 else
1898 gdt_store_event(GDT_ES_SYNC, service, &gdt->sc_dvr);
1899 } else {
1900 /* raw service */
1901 if (gdt->sc_status != GDT_S_RAW_SCSI || gdt->sc_info >= 0x100) {
1889 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1902 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1890 } else {
1891 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR|CAM_AUTOSNS_VALID;
1892 ccb->csio.scsi_status = gdt->sc_info;
1893 bcopy(gccb->gc_scratch, &ccb->csio.sense_data,
1894 ccb->csio.sense_len);
1895 }
1896 }
1897 }
1898 --gdt_stat.io_count_act;
1899 xpt_done(ccb);
1900 }
1901 return (0);
1902}
1903
1904/* Controller event handling functions */
1905gdt_evt_str *gdt_store_event(u_int16_t source, u_int16_t idx,
1906 gdt_evt_data *evt)
1907{
1908 gdt_evt_str *e;
1909 struct timeval tv;
1910
1911 GDT_DPRINTF(GDT_D_MISC, ("gdt_store_event(%d, %d)\n", source, idx));
1912 if (source == 0) /* no source -> no event */
1913 return 0;
1914
1915 if (ebuffer[elastidx].event_source == source &&
1916 ebuffer[elastidx].event_idx == idx &&
1917 ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
1918 !memcmp((char *)&ebuffer[elastidx].event_data.eu,
1919 (char *)&evt->eu, evt->size)) ||
1920 (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
1921 !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
1922 (char *)&evt->event_string)))) {
1923 e = &ebuffer[elastidx];
1924 getmicrotime(&tv);
1925 e->last_stamp = tv.tv_sec;
1926 ++e->same_count;
1927 } else {
1928 if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
1929 ++elastidx;
1930 if (elastidx == GDT_MAX_EVENTS)
1931 elastidx = 0;
1932 if (elastidx == eoldidx) { /* reached mark ? */
1933 ++eoldidx;
1934 if (eoldidx == GDT_MAX_EVENTS)
1935 eoldidx = 0;
1936 }
1937 }
1938 e = &ebuffer[elastidx];
1939 e->event_source = source;
1940 e->event_idx = idx;
1941 getmicrotime(&tv);
1942 e->first_stamp = e->last_stamp = tv.tv_sec;
1943 e->same_count = 1;
1944 e->event_data = *evt;
1945 e->application = 0;
1946 }
1947 return e;
1948}
1949
1950int gdt_read_event(int handle, gdt_evt_str *estr)
1951{
1952 gdt_evt_str *e;
1953 int eindex, lock;
1954
1955 GDT_DPRINTF(GDT_D_MISC, ("gdt_read_event(%d)\n", handle));
1956 lock = splcam();
1957 if (handle == -1)
1958 eindex = eoldidx;
1959 else
1960 eindex = handle;
1961 estr->event_source = 0;
1962
1963 if (eindex >= GDT_MAX_EVENTS) {
1964 splx(lock);
1965 return eindex;
1966 }
1967 e = &ebuffer[eindex];
1968 if (e->event_source != 0) {
1969 if (eindex != elastidx) {
1970 if (++eindex == GDT_MAX_EVENTS)
1971 eindex = 0;
1972 } else {
1973 eindex = -1;
1974 }
1975 memcpy(estr, e, sizeof(gdt_evt_str));
1976 }
1977 splx(lock);
1978 return eindex;
1979}
1980
1981void gdt_readapp_event(u_int8_t application, gdt_evt_str *estr)
1982{
1983 gdt_evt_str *e;
1984 int found = FALSE;
1985 int eindex, lock;
1986
1987 GDT_DPRINTF(GDT_D_MISC, ("gdt_readapp_event(%d)\n", application));
1988 lock = splcam();
1989 eindex = eoldidx;
1990 for (;;) {
1991 e = &ebuffer[eindex];
1992 if (e->event_source == 0)
1993 break;
1994 if ((e->application & application) == 0) {
1995 e->application |= application;
1996 found = TRUE;
1997 break;
1998 }
1999 if (eindex == elastidx)
2000 break;
2001 if (++eindex == GDT_MAX_EVENTS)
2002 eindex = 0;
2003 }
2004 if (found)
2005 memcpy(estr, e, sizeof(gdt_evt_str));
2006 else
2007 estr->event_source = 0;
2008 splx(lock);
2009}
2010
2011void gdt_clear_events()
2012{
2013 GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events\n"));
2014
2015 eoldidx = elastidx = 0;
2016 ebuffer[0].event_source = 0;
2017}
1903 } else {
1904 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR|CAM_AUTOSNS_VALID;
1905 ccb->csio.scsi_status = gdt->sc_info;
1906 bcopy(gccb->gc_scratch, &ccb->csio.sense_data,
1907 ccb->csio.sense_len);
1908 }
1909 }
1910 }
1911 --gdt_stat.io_count_act;
1912 xpt_done(ccb);
1913 }
1914 return (0);
1915}
1916
1917/* Controller event handling functions */
1918gdt_evt_str *gdt_store_event(u_int16_t source, u_int16_t idx,
1919 gdt_evt_data *evt)
1920{
1921 gdt_evt_str *e;
1922 struct timeval tv;
1923
1924 GDT_DPRINTF(GDT_D_MISC, ("gdt_store_event(%d, %d)\n", source, idx));
1925 if (source == 0) /* no source -> no event */
1926 return 0;
1927
1928 if (ebuffer[elastidx].event_source == source &&
1929 ebuffer[elastidx].event_idx == idx &&
1930 ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
1931 !memcmp((char *)&ebuffer[elastidx].event_data.eu,
1932 (char *)&evt->eu, evt->size)) ||
1933 (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
1934 !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
1935 (char *)&evt->event_string)))) {
1936 e = &ebuffer[elastidx];
1937 getmicrotime(&tv);
1938 e->last_stamp = tv.tv_sec;
1939 ++e->same_count;
1940 } else {
1941 if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
1942 ++elastidx;
1943 if (elastidx == GDT_MAX_EVENTS)
1944 elastidx = 0;
1945 if (elastidx == eoldidx) { /* reached mark ? */
1946 ++eoldidx;
1947 if (eoldidx == GDT_MAX_EVENTS)
1948 eoldidx = 0;
1949 }
1950 }
1951 e = &ebuffer[elastidx];
1952 e->event_source = source;
1953 e->event_idx = idx;
1954 getmicrotime(&tv);
1955 e->first_stamp = e->last_stamp = tv.tv_sec;
1956 e->same_count = 1;
1957 e->event_data = *evt;
1958 e->application = 0;
1959 }
1960 return e;
1961}
1962
1963int gdt_read_event(int handle, gdt_evt_str *estr)
1964{
1965 gdt_evt_str *e;
1966 int eindex, lock;
1967
1968 GDT_DPRINTF(GDT_D_MISC, ("gdt_read_event(%d)\n", handle));
1969 lock = splcam();
1970 if (handle == -1)
1971 eindex = eoldidx;
1972 else
1973 eindex = handle;
1974 estr->event_source = 0;
1975
1976 if (eindex >= GDT_MAX_EVENTS) {
1977 splx(lock);
1978 return eindex;
1979 }
1980 e = &ebuffer[eindex];
1981 if (e->event_source != 0) {
1982 if (eindex != elastidx) {
1983 if (++eindex == GDT_MAX_EVENTS)
1984 eindex = 0;
1985 } else {
1986 eindex = -1;
1987 }
1988 memcpy(estr, e, sizeof(gdt_evt_str));
1989 }
1990 splx(lock);
1991 return eindex;
1992}
1993
1994void gdt_readapp_event(u_int8_t application, gdt_evt_str *estr)
1995{
1996 gdt_evt_str *e;
1997 int found = FALSE;
1998 int eindex, lock;
1999
2000 GDT_DPRINTF(GDT_D_MISC, ("gdt_readapp_event(%d)\n", application));
2001 lock = splcam();
2002 eindex = eoldidx;
2003 for (;;) {
2004 e = &ebuffer[eindex];
2005 if (e->event_source == 0)
2006 break;
2007 if ((e->application & application) == 0) {
2008 e->application |= application;
2009 found = TRUE;
2010 break;
2011 }
2012 if (eindex == elastidx)
2013 break;
2014 if (++eindex == GDT_MAX_EVENTS)
2015 eindex = 0;
2016 }
2017 if (found)
2018 memcpy(estr, e, sizeof(gdt_evt_str));
2019 else
2020 estr->event_source = 0;
2021 splx(lock);
2022}
2023
2024void gdt_clear_events()
2025{
2026 GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events\n"));
2027
2028 eoldidx = elastidx = 0;
2029 ebuffer[0].event_source = 0;
2030}