Deleted Added
full compact
iir.c (119418) iir.c (120477)
1/*
1/*
2 * Copyright (c) 2000-03 Intel Corporation
2 * Copyright (c) 2000-03 ICP vortex GmbH
3 * Copyright (c) 2002-03 Intel Corporation
4 * Copyright (c) 2003 Adaptec Inc.
3 * All Rights Reserved
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * iir.c: SCSI dependant code for the Intel Integrated RAID Controller driver
32 *
5 * All Rights Reserved
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32/*
33 * iir.c: SCSI dependant code for the Intel Integrated RAID Controller driver
34 *
33 * Written by: Achim Leubner <achim.leubner@intel.com>
35 * Written by: Achim Leubner <achim_leubner@adaptec.com>
34 * Fixes/Additions: Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com>
35 *
36 * credits: Niklas Hallqvist; OpenBSD driver for the ICP Controllers.
37 * Mike Smith; Some driver source code.
38 * FreeBSD.ORG; Great O/S to work on and for.
39 *
36 * Fixes/Additions: Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com>
37 *
38 * credits: Niklas Hallqvist; OpenBSD driver for the ICP Controllers.
39 * Mike Smith; Some driver source code.
40 * FreeBSD.ORG; Great O/S to work on and for.
41 *
40 * TODO:
42 * $Id: iir.c 1.4 2003/08/26 12:29:44 achim Exp $"
41 */
42
43 */
44
43#ident "$Id: iir.c 1.3 2003/03/21 16:28:32 achim Exp $"
44#include <sys/cdefs.h>
45#include <sys/cdefs.h>
45__FBSDID("$FreeBSD: head/sys/dev/iir/iir.c 119418 2003-08-24 17:55:58Z obrien $");
46__FBSDID("$FreeBSD: head/sys/dev/iir/iir.c 120477 2003-09-26 15:36:47Z scottl $");
46
47#define _IIR_C_
48
49/* #include "opt_iir.h" */
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/endian.h>
53#include <sys/eventhandler.h>
54#include <sys/malloc.h>
55#include <sys/kernel.h>
56#include <sys/bus.h>
57
58#include <machine/bus_memio.h>
59#include <machine/bus_pio.h>
60#include <machine/bus.h>
61#include <machine/clock.h>
62#include <machine/stdarg.h>
63
64#include <cam/cam.h>
65#include <cam/cam_ccb.h>
66#include <cam/cam_sim.h>
67#include <cam/cam_xpt_sim.h>
68#include <cam/cam_debug.h>
69#include <cam/scsi/scsi_all.h>
70#include <cam/scsi/scsi_message.h>
71
72#include <vm/vm.h>
73#include <vm/pmap.h>
74
75#include <dev/iir/iir.h>
76
77struct gdt_softc *gdt_wait_gdt;
78int gdt_wait_index;
79
80#ifdef GDT_DEBUG
81int gdt_debug = GDT_DEBUG;
82#ifdef __SERIAL__
83#define MAX_SERBUF 160
84static void ser_init(void);
85static void ser_puts(char *str);
86static void ser_putc(int c);
87static char strbuf[MAX_SERBUF+1];
88#ifdef __COM2__
89#define COM_BASE 0x2f8
90#else
91#define COM_BASE 0x3f8
92#endif
93static void ser_init()
94{
95 unsigned port=COM_BASE;
96
97 outb(port+3, 0x80);
98 outb(port+1, 0);
99 /* 19200 Baud, if 9600: outb(12,port) */
100 outb(port, 6);
101 outb(port+3, 3);
102 outb(port+1, 0);
103}
104
105static void ser_puts(char *str)
106{
107 char *ptr;
108
109 ser_init();
110 for (ptr=str;*ptr;++ptr)
111 ser_putc((int)(*ptr));
112}
113
114static void ser_putc(int c)
115{
116 unsigned port=COM_BASE;
117
118 while ((inb(port+5) & 0x20)==0);
119 outb(port, c);
120 if (c==0x0a)
121 {
122 while ((inb(port+5) & 0x20)==0);
123 outb(port, 0x0d);
124 }
125}
126
127int ser_printf(const char *fmt, ...)
128{
129 va_list args;
130 int i;
131
132 va_start(args,fmt);
133 i = vsprintf(strbuf,fmt,args);
134 ser_puts(strbuf);
135 va_end(args);
136 return i;
137}
138#endif
139#endif
140
141/* The linked list of softc structures */
142struct gdt_softc_list gdt_softcs = TAILQ_HEAD_INITIALIZER(gdt_softcs);
143/* controller cnt. */
144int gdt_cnt = 0;
145/* event buffer */
146static gdt_evt_str ebuffer[GDT_MAX_EVENTS];
147static int elastidx, eoldidx;
148/* statistics */
149gdt_statist_t gdt_stat;
150
151/* Definitions for our use of the SIM private CCB area */
152#define ccb_sim_ptr spriv_ptr0
153#define ccb_priority spriv_field1
154
155static void iir_action(struct cam_sim *sim, union ccb *ccb);
156static void iir_poll(struct cam_sim *sim);
157static void iir_shutdown(void *arg, int howto);
158static void iir_timeout(void *arg);
159static void iir_watchdog(void *arg);
160
161static void gdt_eval_mapping(u_int32_t size, int *cyls, int *heads,
162 int *secs);
163static int gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
164 u_int8_t service, u_int16_t opcode,
165 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3);
166static int gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *ccb,
167 int timeout);
168
169static struct gdt_ccb *gdt_get_ccb(struct gdt_softc *gdt);
170static u_int32_t gdt_ccb_vtop(struct gdt_softc *gdt,
171 struct gdt_ccb *gccb);
172
173static int gdt_sync_event(struct gdt_softc *gdt, int service,
174 u_int8_t index, struct gdt_ccb *gccb);
175static int gdt_async_event(struct gdt_softc *gdt, int service);
176static struct gdt_ccb *gdt_raw_cmd(struct gdt_softc *gdt,
177 union ccb *ccb, int *lock);
178static struct gdt_ccb *gdt_cache_cmd(struct gdt_softc *gdt,
179 union ccb *ccb, int *lock);
180static struct gdt_ccb *gdt_ioctl_cmd(struct gdt_softc *gdt,
181 gdt_ucmd_t *ucmd, int *lock);
182static void gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb);
183
184static void gdtmapmem(void *arg, bus_dma_segment_t *dm_segs,
185 int nseg, int error);
186static void gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs,
187 int nseg, int error);
188
189int
190iir_init(struct gdt_softc *gdt)
191{
192 u_int16_t cdev_cnt;
193 int i, id, drv_cyls, drv_hds, drv_secs;
194 struct gdt_ccb *gccb;
195
196 GDT_DPRINTF(GDT_D_DEBUG, ("iir_init()\n"));
197
198 gdt->sc_state = GDT_POLLING;
199 gdt_clear_events();
200 bzero(&gdt_stat, sizeof(gdt_statist_t));
201
202 SLIST_INIT(&gdt->sc_free_gccb);
203 SLIST_INIT(&gdt->sc_pending_gccb);
204 TAILQ_INIT(&gdt->sc_ccb_queue);
205 TAILQ_INIT(&gdt->sc_ucmd_queue);
206 TAILQ_INSERT_TAIL(&gdt_softcs, gdt, links);
207
208 /* DMA tag for mapping buffers into device visible space. */
209 if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0,
210 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
211 /*highaddr*/BUS_SPACE_MAXADDR,
212 /*filter*/NULL, /*filterarg*/NULL,
213 /*maxsize*/MAXBSIZE, /*nsegments*/GDT_MAXSG,
214 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
215 /*flags*/BUS_DMA_ALLOCNOW,
216 /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant,
217 &gdt->sc_buffer_dmat) != 0) {
218 printf("iir%d: bus_dma_tag_create(...,gdt->sc_buffer_dmat) failed\n",
219 gdt->sc_hanum);
220 return (1);
221 }
222 gdt->sc_init_level++;
223
224 /* DMA tag for our ccb structures */
225 if (bus_dma_tag_create(gdt->sc_parent_dmat,
226 /*alignment*/1,
227 /*boundary*/0,
228 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
229 /*highaddr*/BUS_SPACE_MAXADDR,
230 /*filter*/NULL,
231 /*filterarg*/NULL,
232 GDT_MAXCMDS * sizeof(struct gdt_ccb), /* maxsize */
233 /*nsegments*/1,
234 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
235 /*flags*/0, /*lockfunc*/busdma_lock_mutex,
236 /*lockarg*/&Giant, &gdt->sc_gccb_dmat) != 0) {
237 printf("iir%d: bus_dma_tag_create(...,gdt->sc_gccb_dmat) failed\n",
238 gdt->sc_hanum);
239 return (1);
240 }
241 gdt->sc_init_level++;
242
243 /* Allocation for our ccbs */
244 if (bus_dmamem_alloc(gdt->sc_gccb_dmat, (void **)&gdt->sc_gccbs,
245 BUS_DMA_NOWAIT, &gdt->sc_gccb_dmamap) != 0) {
246 printf("iir%d: bus_dmamem_alloc(...,&gdt->sc_gccbs,...) failed\n",
247 gdt->sc_hanum);
248 return (1);
249 }
250 gdt->sc_init_level++;
251
252 /* And permanently map them */
253 bus_dmamap_load(gdt->sc_gccb_dmat, gdt->sc_gccb_dmamap,
254 gdt->sc_gccbs, GDT_MAXCMDS * sizeof(struct gdt_ccb),
255 gdtmapmem, &gdt->sc_gccb_busbase, /*flags*/0);
256 gdt->sc_init_level++;
257
258 /* Clear them out. */
259 bzero(gdt->sc_gccbs, GDT_MAXCMDS * sizeof(struct gdt_ccb));
260
261 /* Initialize the ccbs */
262 for (i = GDT_MAXCMDS-1; i >= 0; i--) {
263 gdt->sc_gccbs[i].gc_cmd_index = i + 2;
264 gdt->sc_gccbs[i].gc_flags = GDT_GCF_UNUSED;
265 gdt->sc_gccbs[i].gc_map_flag = FALSE;
266 if (bus_dmamap_create(gdt->sc_buffer_dmat, /*flags*/0,
267 &gdt->sc_gccbs[i].gc_dmamap) != 0)
268 return(1);
269 gdt->sc_gccbs[i].gc_map_flag = TRUE;
270 SLIST_INSERT_HEAD(&gdt->sc_free_gccb, &gdt->sc_gccbs[i], sle);
271 }
272 gdt->sc_init_level++;
273
274 /* create the control device */
275 gdt->sc_dev = gdt_make_dev(gdt->sc_hanum);
276
277 /* allocate ccb for gdt_internal_cmd() */
278 gccb = gdt_get_ccb(gdt);
279 if (gccb == NULL) {
280 printf("iir%d: No free command index found\n",
281 gdt->sc_hanum);
282 return (1);
283 }
284
285 if (!gdt_internal_cmd(gdt, gccb, GDT_SCREENSERVICE, GDT_INIT,
286 0, 0, 0)) {
287 printf("iir%d: Screen service initialization error %d\n",
288 gdt->sc_hanum, gdt->sc_status);
289 gdt_free_ccb(gdt, gccb);
290 return (1);
291 }
292
293 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
294 GDT_LINUX_OS, 0, 0)) {
295 printf("iir%d: Cache service initialization error %d\n",
296 gdt->sc_hanum, gdt->sc_status);
297 gdt_free_ccb(gdt, gccb);
298 return (1);
299 }
300 gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_UNFREEZE_IO,
301 0, 0, 0);
302
303 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_MOUNT,
304 0xffff, 1, 0)) {
305 printf("iir%d: Cache service mount error %d\n",
306 gdt->sc_hanum, gdt->sc_status);
307 gdt_free_ccb(gdt, gccb);
308 return (1);
309 }
310
311 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
312 GDT_LINUX_OS, 0, 0)) {
313 printf("iir%d: Cache service post-mount initialization error %d\n",
314 gdt->sc_hanum, gdt->sc_status);
315 gdt_free_ccb(gdt, gccb);
316 return (1);
317 }
318 cdev_cnt = (u_int16_t)gdt->sc_info;
319 gdt->sc_fw_vers = gdt->sc_service;
320
321 /* Detect number of buses */
322 gdt_enc32(gccb->gc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
323 gccb->gc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
324 gccb->gc_scratch[GDT_IOC_FIRST_CHAN] = 0;
325 gccb->gc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
326 gdt_enc32(gccb->gc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
327 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
328 GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
329 GDT_IOC_HDR_SZ + GDT_MAXBUS * GDT_RAWIOC_SZ)) {
330 gdt->sc_bus_cnt = gccb->gc_scratch[GDT_IOC_CHAN_COUNT];
331 for (i = 0; i < gdt->sc_bus_cnt; i++) {
332 id = gccb->gc_scratch[GDT_IOC_HDR_SZ +
333 i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
334 gdt->sc_bus_id[i] = id < GDT_MAXID_FC ? id : 0xff;
335 }
336 } else {
337 /* New method failed, use fallback. */
338 for (i = 0; i < GDT_MAXBUS; i++) {
339 gdt_enc32(gccb->gc_scratch + GDT_GETCH_CHANNEL_NO, i);
340 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
341 GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
342 GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
343 GDT_GETCH_SZ)) {
344 if (i == 0) {
345 printf("iir%d: Cannot get channel count, "
346 "error %d\n", gdt->sc_hanum, gdt->sc_status);
347 gdt_free_ccb(gdt, gccb);
348 return (1);
349 }
350 break;
351 }
352 gdt->sc_bus_id[i] =
353 (gccb->gc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID_FC) ?
354 gccb->gc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
355 }
356 gdt->sc_bus_cnt = i;
357 }
358 /* add one "virtual" channel for the host drives */
359 gdt->sc_virt_bus = gdt->sc_bus_cnt;
360 gdt->sc_bus_cnt++;
361
362 if (!gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_INIT,
363 0, 0, 0)) {
364 printf("iir%d: Raw service initialization error %d\n",
365 gdt->sc_hanum, gdt->sc_status);
366 gdt_free_ccb(gdt, gccb);
367 return (1);
368 }
369
370 /* Set/get features raw service (scatter/gather) */
371 gdt->sc_raw_feat = 0;
372 if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
373 GDT_SCATTER_GATHER, 0, 0)) {
374 if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_GET_FEAT,
375 0, 0, 0)) {
376 gdt->sc_raw_feat = gdt->sc_info;
377 if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
378 panic("iir%d: Scatter/Gather Raw Service "
379 "required but not supported!\n", gdt->sc_hanum);
380 gdt_free_ccb(gdt, gccb);
381 return (1);
382 }
383 }
384 }
385
386 /* Set/get features cache service (scatter/gather) */
387 gdt->sc_cache_feat = 0;
388 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_SET_FEAT,
389 0, GDT_SCATTER_GATHER, 0)) {
390 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_GET_FEAT,
391 0, 0, 0)) {
392 gdt->sc_cache_feat = gdt->sc_info;
393 if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
394 panic("iir%d: Scatter/Gather Cache Service "
395 "required but not supported!\n", gdt->sc_hanum);
396 gdt_free_ccb(gdt, gccb);
397 return (1);
398 }
399 }
400 }
401
402 /* OEM */
403 gdt_enc32(gccb->gc_scratch + GDT_OEM_VERSION, 0x01);
404 gdt_enc32(gccb->gc_scratch + GDT_OEM_BUFSIZE, sizeof(gdt_oem_record_t));
405 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
406 GDT_OEM_STR_RECORD, GDT_INVALID_CHANNEL,
407 sizeof(gdt_oem_str_record_t))) {
408 strncpy(gdt->oem_name, ((gdt_oem_str_record_t *)
409 gccb->gc_scratch)->text.scsi_host_drive_inquiry_vendor_id, 7);
410 gdt->oem_name[7]='\0';
411 } else {
412 /* Old method, based on PCI ID */
413 if (gdt->sc_vendor == INTEL_VENDOR_ID)
414 strcpy(gdt->oem_name,"Intel ");
415 else
416 strcpy(gdt->oem_name,"ICP ");
417 }
418
419 /* Scan for cache devices */
420 for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++) {
421 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INFO,
422 i, 0, 0)) {
423 gdt->sc_hdr[i].hd_present = 1;
424 gdt->sc_hdr[i].hd_size = gdt->sc_info;
425
426 /*
427 * Evaluate mapping (sectors per head, heads per cyl)
428 */
429 gdt->sc_hdr[i].hd_size &= ~GDT_SECS32;
430 if (gdt->sc_info2 == 0)
431 gdt_eval_mapping(gdt->sc_hdr[i].hd_size,
432 &drv_cyls, &drv_hds, &drv_secs);
433 else {
434 drv_hds = gdt->sc_info2 & 0xff;
435 drv_secs = (gdt->sc_info2 >> 8) & 0xff;
436 drv_cyls = gdt->sc_hdr[i].hd_size / drv_hds /
437 drv_secs;
438 }
439 gdt->sc_hdr[i].hd_heads = drv_hds;
440 gdt->sc_hdr[i].hd_secs = drv_secs;
441 /* Round the size */
442 gdt->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
443
444 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE,
445 GDT_DEVTYPE, i, 0, 0))
446 gdt->sc_hdr[i].hd_devtype = gdt->sc_info;
447 }
448 }
449
450 GDT_DPRINTF(GDT_D_INIT, ("dpmem %x %d-bus %d cache device%s\n",
451 gdt->sc_dpmembase,
452 gdt->sc_bus_cnt, cdev_cnt,
453 cdev_cnt == 1 ? "" : "s"));
454 gdt_free_ccb(gdt, gccb);
455
456 gdt_cnt++;
457 return (0);
458}
459
460void
461iir_free(struct gdt_softc *gdt)
462{
463 int i;
464
465 GDT_DPRINTF(GDT_D_INIT, ("iir_free()\n"));
466
467 switch (gdt->sc_init_level) {
468 default:
469 gdt_destroy_dev(gdt->sc_dev);
470 case 5:
471 for (i = GDT_MAXCMDS-1; i >= 0; i--)
472 if (gdt->sc_gccbs[i].gc_map_flag)
473 bus_dmamap_destroy(gdt->sc_buffer_dmat,
474 gdt->sc_gccbs[i].gc_dmamap);
475 bus_dmamap_unload(gdt->sc_gccb_dmat, gdt->sc_gccb_dmamap);
476 case 4:
477 bus_dmamem_free(gdt->sc_gccb_dmat, gdt->sc_gccbs, gdt->sc_gccb_dmamap);
478 case 3:
479 bus_dma_tag_destroy(gdt->sc_gccb_dmat);
480 case 2:
481 bus_dma_tag_destroy(gdt->sc_buffer_dmat);
482 case 1:
483 bus_dma_tag_destroy(gdt->sc_parent_dmat);
484 case 0:
485 break;
486 }
487 TAILQ_REMOVE(&gdt_softcs, gdt, links);
488}
489
490void
491iir_attach(struct gdt_softc *gdt)
492{
493 struct cam_devq *devq;
494 int i;
495
496 GDT_DPRINTF(GDT_D_INIT, ("iir_attach()\n"));
497
498 /*
499 * Create the device queue for our SIM.
500 */
501 devq = cam_simq_alloc(GDT_MAXCMDS);
502 if (devq == NULL)
503 return;
504
505 for (i = 0; i < gdt->sc_bus_cnt; i++) {
506 /*
507 * Construct our SIM entry
508 */
509 gdt->sims[i] = cam_sim_alloc(iir_action, iir_poll, "iir",
510 gdt, gdt->sc_hanum, /*untagged*/2,
511 /*tagged*/GDT_MAXCMDS, devq);
512 if (xpt_bus_register(gdt->sims[i], i) != CAM_SUCCESS) {
513 cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
514 break;
515 }
516
517 if (xpt_create_path(&gdt->paths[i], /*periph*/NULL,
518 cam_sim_path(gdt->sims[i]),
519 CAM_TARGET_WILDCARD,
520 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
521 xpt_bus_deregister(cam_sim_path(gdt->sims[i]));
522 cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
523 break;
524 }
525 }
526 if (i > 0)
527 EVENTHANDLER_REGISTER(shutdown_final, iir_shutdown,
528 gdt, SHUTDOWN_PRI_DEFAULT);
529 /* iir_watchdog(gdt); */
530 gdt->sc_state = GDT_NORMAL;
531}
532
533static void
534gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs)
535{
536 *cyls = size / GDT_HEADS / GDT_SECS;
537 if (*cyls < GDT_MAXCYLS) {
538 *heads = GDT_HEADS;
539 *secs = GDT_SECS;
540 } else {
541 /* Too high for 64 * 32 */
542 *cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
543 if (*cyls < GDT_MAXCYLS) {
544 *heads = GDT_MEDHEADS;
545 *secs = GDT_MEDSECS;
546 } else {
547 /* Too high for 127 * 63 */
548 *cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
549 *heads = GDT_BIGHEADS;
550 *secs = GDT_BIGSECS;
551 }
552 }
553}
554
555static int
556gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *gccb,
557 int timeout)
558{
559 int rv = 0;
560
561 GDT_DPRINTF(GDT_D_INIT,
562 ("gdt_wait(%p, %p, %d)\n", gdt, gccb, timeout));
563
564 gdt->sc_state |= GDT_POLL_WAIT;
565 do {
566 iir_intr(gdt);
567 if (gdt == gdt_wait_gdt &&
568 gccb->gc_cmd_index == gdt_wait_index) {
569 rv = 1;
570 break;
571 }
572 DELAY(1);
573 } while (--timeout);
574 gdt->sc_state &= ~GDT_POLL_WAIT;
575
576 while (gdt->sc_test_busy(gdt))
577 DELAY(1); /* XXX correct? */
578
579 return (rv);
580}
581
582static int
583gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
584 u_int8_t service, u_int16_t opcode,
585 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
586{
587 int retries;
588
589 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d)\n",
590 gdt, service, opcode, arg1, arg2, arg3));
591
592 bzero(gdt->sc_cmd, GDT_CMD_SZ);
593
594 for (retries = GDT_RETRIES; ; ) {
595 gccb->gc_service = service;
596 gccb->gc_flags = GDT_GCF_INTERNAL;
597
598 gdt->sc_set_sema0(gdt);
599 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
600 gccb->gc_cmd_index);
601 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode);
602
603 switch (service) {
604 case GDT_CACHESERVICE:
605 if (opcode == GDT_IOCTL) {
606 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
607 GDT_IOCTL_SUBFUNC, arg1);
608 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
609 GDT_IOCTL_CHANNEL, arg2);
610 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
611 GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
612 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
613 gdt_ccb_vtop(gdt, gccb) +
614 offsetof(struct gdt_ccb, gc_scratch[0]));
615 } else {
616 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
617 GDT_CACHE_DEVICENO, (u_int16_t)arg1);
618 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
619 GDT_CACHE_BLOCKNO, arg2);
620 }
621 break;
622
623 case GDT_SCSIRAWSERVICE:
624 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
625 GDT_RAW_DIRECTION, arg1);
626 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
627 (u_int8_t)arg2;
628 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
629 (u_int8_t)arg3;
630 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
631 (u_int8_t)(arg3 >> 8);
632 }
633
634 gdt->sc_cmd_len = GDT_CMD_SZ;
635 gdt->sc_cmd_off = 0;
636 gdt->sc_cmd_cnt = 0;
637 gdt->sc_copy_cmd(gdt, gccb);
638 gdt->sc_release_event(gdt);
639 DELAY(20);
640 if (!gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT))
641 return (0);
642 if (gdt->sc_status != GDT_S_BSY || --retries == 0)
643 break;
644 DELAY(1);
645 }
646 return (gdt->sc_status == GDT_S_OK);
647}
648
649static struct gdt_ccb *
650gdt_get_ccb(struct gdt_softc *gdt)
651{
652 struct gdt_ccb *gccb;
653 int lock;
654
655 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_get_ccb(%p)\n", gdt));
656
657 lock = splcam();
658 gccb = SLIST_FIRST(&gdt->sc_free_gccb);
659 if (gccb != NULL) {
660 SLIST_REMOVE_HEAD(&gdt->sc_free_gccb, sle);
661 SLIST_INSERT_HEAD(&gdt->sc_pending_gccb, gccb, sle);
662 ++gdt_stat.cmd_index_act;
663 if (gdt_stat.cmd_index_act > gdt_stat.cmd_index_max)
664 gdt_stat.cmd_index_max = gdt_stat.cmd_index_act;
665 }
666 splx(lock);
667 return (gccb);
668}
669
670void
671gdt_free_ccb(struct gdt_softc *gdt, struct gdt_ccb *gccb)
672{
673 int lock;
674
675 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_free_ccb(%p, %p)\n", gdt, gccb));
676
677 lock = splcam();
678 gccb->gc_flags = GDT_GCF_UNUSED;
679 SLIST_REMOVE(&gdt->sc_pending_gccb, gccb, gdt_ccb, sle);
680 SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle);
681 --gdt_stat.cmd_index_act;
682 splx(lock);
683 if (gdt->sc_state & GDT_SHUTDOWN)
684 wakeup(gccb);
685}
686
687static u_int32_t
688gdt_ccb_vtop(struct gdt_softc *gdt, struct gdt_ccb *gccb)
689{
690 return (gdt->sc_gccb_busbase
691 + (u_int32_t)((caddr_t)gccb - (caddr_t)gdt->sc_gccbs));
692}
693
694void
695gdt_next(struct gdt_softc *gdt)
696{
697 int lock;
698 union ccb *ccb;
699 gdt_ucmd_t *ucmd;
700 struct cam_sim *sim;
701 int bus, target, lun;
702 int next_cmd;
703
704 struct ccb_scsiio *csio;
705 struct ccb_hdr *ccbh;
706 struct gdt_ccb *gccb = NULL;
707 u_int8_t cmd;
708
709 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_next(%p)\n", gdt));
710
711 lock = splcam();
712 if (gdt->sc_test_busy(gdt)) {
713 if (!(gdt->sc_state & GDT_POLLING)) {
714 splx(lock);
715 return;
716 }
717 while (gdt->sc_test_busy(gdt))
718 DELAY(1);
719 }
720
721 gdt->sc_cmd_cnt = gdt->sc_cmd_off = 0;
722 next_cmd = TRUE;
723 for (;;) {
724 /* I/Os in queue? controller ready? */
725 if (!TAILQ_FIRST(&gdt->sc_ucmd_queue) &&
726 !TAILQ_FIRST(&gdt->sc_ccb_queue))
727 break;
728
729 /* 1.: I/Os without ccb (IOCTLs) */
730 ucmd = TAILQ_FIRST(&gdt->sc_ucmd_queue);
731 if (ucmd != NULL) {
732 TAILQ_REMOVE(&gdt->sc_ucmd_queue, ucmd, links);
733 if ((gccb = gdt_ioctl_cmd(gdt, ucmd, &lock)) == NULL) {
734 TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
735 break;
736 }
737 break;
738 /* wenn mehrere Kdos. zulassen: if (!gdt_polling) continue; */
739 }
740
741 /* 2.: I/Os with ccb */
742 ccb = (union ccb *)TAILQ_FIRST(&gdt->sc_ccb_queue);
743 /* ist dann immer != NULL, da oben getestet */
744 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
745 bus = cam_sim_bus(sim);
746 target = ccb->ccb_h.target_id;
747 lun = ccb->ccb_h.target_lun;
748
749 TAILQ_REMOVE(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
750 --gdt_stat.req_queue_act;
751 /* ccb->ccb_h.func_code is XPT_SCSI_IO */
752 GDT_DPRINTF(GDT_D_QUEUE, ("XPT_SCSI_IO flags 0x%x)\n",
753 ccb->ccb_h.flags));
754 csio = &ccb->csio;
755 ccbh = &ccb->ccb_h;
756 cmd = csio->cdb_io.cdb_bytes[0];
757 /* Max CDB length is 12 bytes */
758 if (csio->cdb_len > 12) {
759 ccbh->status = CAM_REQ_INVALID;
760 --gdt_stat.io_count_act;
761 xpt_done(ccb);
762 } else if (bus != gdt->sc_virt_bus) {
763 /* raw service command */
764 if ((gccb = gdt_raw_cmd(gdt, ccb, &lock)) == NULL) {
765 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
766 sim_links.tqe);
767 ++gdt_stat.req_queue_act;
768 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
769 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
770 next_cmd = FALSE;
771 }
772 } else if (target >= GDT_MAX_HDRIVES ||
773 !gdt->sc_hdr[target].hd_present || lun != 0) {
774 ccbh->status = CAM_DEV_NOT_THERE;
775 --gdt_stat.io_count_act;
776 xpt_done(ccb);
777 } else {
778 /* cache service command */
779 if (cmd == READ_6 || cmd == WRITE_6 ||
780 cmd == READ_10 || cmd == WRITE_10) {
781 if ((gccb = gdt_cache_cmd(gdt, ccb, &lock)) == NULL) {
782 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
783 sim_links.tqe);
784 ++gdt_stat.req_queue_act;
785 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
786 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
787 next_cmd = FALSE;
788 }
789 } else {
790 splx(lock);
791 gdt_internal_cache_cmd(gdt, ccb);
792 lock = splcam();
793 }
794 }
795 if ((gdt->sc_state & GDT_POLLING) || !next_cmd)
796 break;
797 }
798 if (gdt->sc_cmd_cnt > 0)
799 gdt->sc_release_event(gdt);
800
801 splx(lock);
802
803 if ((gdt->sc_state & GDT_POLLING) && gdt->sc_cmd_cnt > 0) {
804 gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT);
805 }
806}
807
808static struct gdt_ccb *
809gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
810{
811 struct gdt_ccb *gccb;
812 struct cam_sim *sim;
813
814 GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_cmd(%p, %p)\n", gdt, ccb));
815
816 if (roundup(GDT_CMD_UNION + GDT_RAW_SZ, sizeof(u_int32_t)) +
817 gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
818 gdt->sc_ic_all_size) {
819 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_raw_cmd(): DPMEM overflow\n",
820 gdt->sc_hanum));
821 return (NULL);
822 }
823
824 bzero(gdt->sc_cmd, GDT_CMD_SZ);
825
826 gccb = gdt_get_ccb(gdt);
827 if (gccb == NULL) {
828 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: No free command index found\n",
829 gdt->sc_hanum));
830 return (gccb);
831 }
832 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
833 gccb->gc_ccb = ccb;
834 gccb->gc_service = GDT_SCSIRAWSERVICE;
835 gccb->gc_flags = GDT_GCF_SCSI;
836
837 if (gdt->sc_cmd_cnt == 0)
838 gdt->sc_set_sema0(gdt);
839 splx(*lock);
840 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
841 gccb->gc_cmd_index);
842 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
843
844 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
845 (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
846 GDT_DATA_IN : GDT_DATA_OUT);
847 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
848 ccb->csio.dxfer_len);
849 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
850 ccb->csio.cdb_len);
851 bcopy(ccb->csio.cdb_io.cdb_bytes, gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
852 ccb->csio.cdb_len);
853 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
854 ccb->ccb_h.target_id;
855 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
856 ccb->ccb_h.target_lun;
857 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
858 cam_sim_bus(sim);
859 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
860 sizeof(struct scsi_sense_data));
861 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
862 gdt_ccb_vtop(gdt, gccb) +
863 offsetof(struct gdt_ccb, gc_scratch[0]));
864
865 /*
866 * If we have any data to send with this command,
867 * map it into bus space.
868 */
869 /* Only use S/G if there is a transfer */
870 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
871 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
872 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
873 int s;
874 int error;
875
876 /* vorher unlock von splcam() ??? */
877 s = splsoftvm();
878 error =
879 bus_dmamap_load(gdt->sc_buffer_dmat,
880 gccb->gc_dmamap,
881 ccb->csio.data_ptr,
882 ccb->csio.dxfer_len,
883 gdtexecuteccb,
884 gccb, /*flags*/0);
885 if (error == EINPROGRESS) {
886 xpt_freeze_simq(sim, 1);
887 gccb->gc_state |= CAM_RELEASE_SIMQ;
888 }
889 splx(s);
890 } else {
891 struct bus_dma_segment seg;
892
893 /* Pointer to physical buffer */
894 seg.ds_addr =
895 (bus_addr_t)ccb->csio.data_ptr;
896 seg.ds_len = ccb->csio.dxfer_len;
897 gdtexecuteccb(gccb, &seg, 1, 0);
898 }
899 } else {
900 struct bus_dma_segment *segs;
901
902 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
903 panic("iir%d: iir_action - Physical "
904 "segment pointers unsupported", gdt->sc_hanum);
905
906 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
907 panic("iir%d: iir_action - Virtual "
908 "segment addresses unsupported", gdt->sc_hanum);
909
910 /* Just use the segments provided */
911 segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
912 gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
913 }
914 } else {
915 gdtexecuteccb(gccb, NULL, 0, 0);
916 }
917
918 *lock = splcam();
919 return (gccb);
920}
921
922static struct gdt_ccb *
923gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
924{
925 struct gdt_ccb *gccb;
926 struct cam_sim *sim;
927 u_int8_t *cmdp;
928 u_int16_t opcode;
929 u_int32_t blockno, blockcnt;
930
931 GDT_DPRINTF(GDT_D_CMD, ("gdt_cache_cmd(%p, %p)\n", gdt, ccb));
932
933 if (roundup(GDT_CMD_UNION + GDT_CACHE_SZ, sizeof(u_int32_t)) +
934 gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
935 gdt->sc_ic_all_size) {
936 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_cache_cmd(): DPMEM overflow\n",
937 gdt->sc_hanum));
938 return (NULL);
939 }
940
941 bzero(gdt->sc_cmd, GDT_CMD_SZ);
942
943 gccb = gdt_get_ccb(gdt);
944 if (gccb == NULL) {
945 GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n",
946 gdt->sc_hanum));
947 return (gccb);
948 }
949 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
950 gccb->gc_ccb = ccb;
951 gccb->gc_service = GDT_CACHESERVICE;
952 gccb->gc_flags = GDT_GCF_SCSI;
953
954 if (gdt->sc_cmd_cnt == 0)
955 gdt->sc_set_sema0(gdt);
956 splx(*lock);
957 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
958 gccb->gc_cmd_index);
959 cmdp = ccb->csio.cdb_io.cdb_bytes;
960 opcode = (*cmdp == WRITE_6 || *cmdp == WRITE_10) ? GDT_WRITE : GDT_READ;
961 if ((gdt->sc_state & GDT_SHUTDOWN) && opcode == GDT_WRITE)
962 opcode = GDT_WRITE_THR;
963 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode);
964
965 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
966 ccb->ccb_h.target_id);
967 if (ccb->csio.cdb_len == 6) {
968 struct scsi_rw_6 *rw = (struct scsi_rw_6 *)cmdp;
969 blockno = scsi_3btoul(rw->addr) & ((SRW_TOPADDR<<16) | 0xffff);
970 blockcnt = rw->length ? rw->length : 0x100;
971 } else {
972 struct scsi_rw_10 *rw = (struct scsi_rw_10 *)cmdp;
973 blockno = scsi_4btoul(rw->addr);
974 blockcnt = scsi_2btoul(rw->length);
975 }
976 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
977 blockno);
978 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
979 blockcnt);
980
981 /*
982 * If we have any data to send with this command,
983 * map it into bus space.
984 */
985 /* Only use S/G if there is a transfer */
986 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
987 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
988 int s;
989 int error;
990
991 /* vorher unlock von splcam() ??? */
992 s = splsoftvm();
993 error =
994 bus_dmamap_load(gdt->sc_buffer_dmat,
995 gccb->gc_dmamap,
996 ccb->csio.data_ptr,
997 ccb->csio.dxfer_len,
998 gdtexecuteccb,
999 gccb, /*flags*/0);
1000 if (error == EINPROGRESS) {
1001 xpt_freeze_simq(sim, 1);
1002 gccb->gc_state |= CAM_RELEASE_SIMQ;
1003 }
1004 splx(s);
1005 } else {
1006 struct bus_dma_segment seg;
1007
1008 /* Pointer to physical buffer */
1009 seg.ds_addr =
1010 (bus_addr_t)ccb->csio.data_ptr;
1011 seg.ds_len = ccb->csio.dxfer_len;
1012 gdtexecuteccb(gccb, &seg, 1, 0);
1013 }
1014 } else {
1015 struct bus_dma_segment *segs;
1016
1017 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
1018 panic("iir%d: iir_action - Physical "
1019 "segment pointers unsupported", gdt->sc_hanum);
1020
1021 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
1022 panic("iir%d: iir_action - Virtual "
1023 "segment addresses unsupported", gdt->sc_hanum);
1024
1025 /* Just use the segments provided */
1026 segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
1027 gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
1028 }
1029
1030 *lock = splcam();
1031 return (gccb);
1032}
1033
1034static struct gdt_ccb *
1035gdt_ioctl_cmd(struct gdt_softc *gdt, gdt_ucmd_t *ucmd, int *lock)
1036{
1037 struct gdt_ccb *gccb;
1038 u_int32_t cnt;
1039
1040 GDT_DPRINTF(GDT_D_DEBUG, ("gdt_ioctl_cmd(%p, %p)\n", gdt, ucmd));
1041
1042 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1043
1044 gccb = gdt_get_ccb(gdt);
1045 if (gccb == NULL) {
1046 GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n",
1047 gdt->sc_hanum));
1048 return (gccb);
1049 }
1050 gccb->gc_ucmd = ucmd;
1051 gccb->gc_service = ucmd->service;
1052 gccb->gc_flags = GDT_GCF_IOCTL;
1053
1054 /* check DPMEM space, copy data buffer from user space */
1055 if (ucmd->service == GDT_CACHESERVICE) {
1056 if (ucmd->OpCode == GDT_IOCTL) {
1057 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_IOCTL_SZ,
1058 sizeof(u_int32_t));
1059 cnt = ucmd->u.ioctl.param_size;
1060 if (cnt > GDT_SCRATCH_SZ) {
1061 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1062 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt);
1063 gdt_free_ccb(gdt, gccb);
1064 return (NULL);
1065 }
1066 } else {
1067 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
1068 GDT_SG_SZ, sizeof(u_int32_t));
1069 cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
1070 if (cnt > GDT_SCRATCH_SZ) {
1071 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1072 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt);
1073 gdt_free_ccb(gdt, gccb);
1074 return (NULL);
1075 }
1076 }
1077 } else {
1078 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
1079 GDT_SG_SZ, sizeof(u_int32_t));
1080 cnt = ucmd->u.raw.sdlen;
1081 if (cnt + ucmd->u.raw.sense_len > GDT_SCRATCH_SZ) {
1082 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1083 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt + ucmd->u.raw.sense_len);
1084 gdt_free_ccb(gdt, gccb);
1085 return (NULL);
1086 }
1087 }
1088 if (cnt != 0)
1089 bcopy(ucmd->data, gccb->gc_scratch, cnt);
1090
1091 if (gdt->sc_cmd_off + gdt->sc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
1092 gdt->sc_ic_all_size) {
1093 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_ioctl_cmd(): DPMEM overflow\n",
1094 gdt->sc_hanum));
1095 gdt_free_ccb(gdt, gccb);
1096 return (NULL);
1097 }
1098
1099 if (gdt->sc_cmd_cnt == 0)
1100 gdt->sc_set_sema0(gdt);
1101 splx(*lock);
1102
1103 /* fill cmd structure */
1104 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1105 gccb->gc_cmd_index);
1106 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE,
1107 ucmd->OpCode);
1108
1109 if (ucmd->service == GDT_CACHESERVICE) {
1110 if (ucmd->OpCode == GDT_IOCTL) {
1111 /* IOCTL */
1112 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_PARAM_SIZE,
1113 ucmd->u.ioctl.param_size);
1114 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_SUBFUNC,
1115 ucmd->u.ioctl.subfunc);
1116 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_CHANNEL,
1117 ucmd->u.ioctl.channel);
1118 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
1119 gdt_ccb_vtop(gdt, gccb) +
1120 offsetof(struct gdt_ccb, gc_scratch[0]));
1121 } else {
1122 /* cache service command */
1123 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
1124 ucmd->u.cache.DeviceNo);
1125 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
1126 ucmd->u.cache.BlockNo);
1127 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
1128 ucmd->u.cache.BlockCnt);
1129 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1130 0xffffffffUL);
1131 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1132 1);
1133 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1134 GDT_SG_PTR, gdt_ccb_vtop(gdt, gccb) +
1135 offsetof(struct gdt_ccb, gc_scratch[0]));
1136 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1137 GDT_SG_LEN, ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE);
1138 }
1139 } else {
1140 /* raw service command */
1141 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
1142 ucmd->u.raw.direction);
1143 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1144 0xffffffffUL);
1145 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
1146 ucmd->u.raw.sdlen);
1147 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
1148 ucmd->u.raw.clen);
1149 bcopy(ucmd->u.raw.cmd, gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
1150 12);
1151 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
1152 ucmd->u.raw.target;
1153 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
1154 ucmd->u.raw.lun;
1155 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
1156 ucmd->u.raw.bus;
1157 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
1158 ucmd->u.raw.sense_len);
1159 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
1160 gdt_ccb_vtop(gdt, gccb) +
1161 offsetof(struct gdt_ccb, gc_scratch[ucmd->u.raw.sdlen]));
1162 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1163 1);
1164 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1165 GDT_SG_PTR, gdt_ccb_vtop(gdt, gccb) +
1166 offsetof(struct gdt_ccb, gc_scratch[0]));
1167 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1168 GDT_SG_LEN, ucmd->u.raw.sdlen);
1169 }
1170
1171 *lock = splcam();
1172 gdt_stat.sg_count_act = 1;
1173 gdt->sc_copy_cmd(gdt, gccb);
1174 return (gccb);
1175}
1176
1177static void
1178gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb)
1179{
1180 int t;
1181
1182 t = ccb->ccb_h.target_id;
1183 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd(%p, %p, 0x%x, %d)\n",
1184 gdt, ccb, ccb->csio.cdb_io.cdb_bytes[0], t));
1185
1186 switch (ccb->csio.cdb_io.cdb_bytes[0]) {
1187 case TEST_UNIT_READY:
1188 case START_STOP:
1189 break;
1190 case REQUEST_SENSE:
1191 GDT_DPRINTF(GDT_D_MISC, ("REQUEST_SENSE\n"));
1192 break;
1193 case INQUIRY:
1194 {
1195 struct scsi_inquiry_data *inq;
1196
1197 inq = (struct scsi_inquiry_data *)ccb->csio.data_ptr;
1198 bzero(inq, sizeof(struct scsi_inquiry_data));
1199 inq->device = (gdt->sc_hdr[t].hd_devtype & 4) ?
1200 T_CDROM : T_DIRECT;
1201 inq->dev_qual2 = (gdt->sc_hdr[t].hd_devtype & 1) ? 0x80 : 0;
1202 inq->version = SCSI_REV_2;
1203 inq->response_format = 2;
1204 inq->additional_length = 32;
1205 inq->flags = SID_CmdQue | SID_Sync;
1206 strcpy(inq->vendor, gdt->oem_name);
1207 sprintf(inq->product, "Host Drive #%02d", t);
1208 strcpy(inq->revision, " ");
1209 break;
1210 }
1211 case MODE_SENSE_6:
1212 {
1213 struct mpd_data {
1214 struct scsi_mode_hdr_6 hd;
1215 struct scsi_mode_block_descr bd;
1216 struct scsi_control_page cp;
1217 } *mpd;
1218 u_int8_t page;
1219
1220 mpd = (struct mpd_data *)ccb->csio.data_ptr;
1221 bzero(mpd, sizeof(struct mpd_data));
1222 mpd->hd.datalen = sizeof(struct scsi_mode_hdr_6) +
1223 sizeof(struct scsi_mode_block_descr);
1224 mpd->hd.dev_specific = (gdt->sc_hdr[t].hd_devtype & 2) ? 0x80 : 0;
1225 mpd->hd.block_descr_len = sizeof(struct scsi_mode_block_descr);
1226 mpd->bd.block_len[0] = (GDT_SECTOR_SIZE & 0x00ff0000) >> 16;
1227 mpd->bd.block_len[1] = (GDT_SECTOR_SIZE & 0x0000ff00) >> 8;
1228 mpd->bd.block_len[2] = (GDT_SECTOR_SIZE & 0x000000ff);
1229 page=((struct scsi_mode_sense_6 *)ccb->csio.cdb_io.cdb_bytes)->page;
1230 switch (page) {
1231 default:
1232 GDT_DPRINTF(GDT_D_MISC, ("MODE_SENSE_6: page 0x%x\n", page));
1233 break;
1234 }
1235 break;
1236 }
1237 case READ_CAPACITY:
1238 {
1239 struct scsi_read_capacity_data *rcd;
1240
1241 rcd = (struct scsi_read_capacity_data *)ccb->csio.data_ptr;
1242 bzero(rcd, sizeof(struct scsi_read_capacity_data));
1243 scsi_ulto4b(gdt->sc_hdr[t].hd_size - 1, rcd->addr);
1244 scsi_ulto4b(GDT_SECTOR_SIZE, rcd->length);
1245 break;
1246 }
1247 default:
1248 GDT_DPRINTF(GDT_D_MISC, ("gdt_internal_cache_cmd(%d) unknown\n",
1249 ccb->csio.cdb_io.cdb_bytes[0]));
1250 break;
1251 }
1252 ccb->ccb_h.status = CAM_REQ_CMP;
1253 --gdt_stat.io_count_act;
1254 xpt_done(ccb);
1255}
1256
1257static void
1258gdtmapmem(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1259{
1260 bus_addr_t *busaddrp;
1261
1262 busaddrp = (bus_addr_t *)arg;
1263 *busaddrp = dm_segs->ds_addr;
1264}
1265
1266static void
1267gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1268{
1269 struct gdt_ccb *gccb;
1270 union ccb *ccb;
1271 struct gdt_softc *gdt;
1272 int i, lock;
1273
1274 lock = splcam();
1275
1276 gccb = (struct gdt_ccb *)arg;
1277 ccb = gccb->gc_ccb;
1278 gdt = cam_sim_softc((struct cam_sim *)ccb->ccb_h.ccb_sim_ptr);
1279
1280 GDT_DPRINTF(GDT_D_CMD, ("gdtexecuteccb(%p, %p, %p, %d, %d)\n",
1281 gdt, gccb, dm_segs, nseg, error));
1282 gdt_stat.sg_count_act = nseg;
1283 if (nseg > gdt_stat.sg_count_max)
1284 gdt_stat.sg_count_max = nseg;
1285
1286 /* Copy the segments into our SG list */
1287 if (gccb->gc_service == GDT_CACHESERVICE) {
1288 for (i = 0; i < nseg; ++i) {
1289 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1290 i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1291 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1292 i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1293 dm_segs++;
1294 }
1295 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1296 nseg);
1297 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1298 0xffffffffUL);
1299
1300 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
1301 nseg * GDT_SG_SZ, sizeof(u_int32_t));
1302 } else {
1303 for (i = 0; i < nseg; ++i) {
1304 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1305 i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1306 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1307 i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1308 dm_segs++;
1309 }
1310 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1311 nseg);
1312 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1313 0xffffffffUL);
1314
1315 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
1316 nseg * GDT_SG_SZ, sizeof(u_int32_t));
1317 }
1318
1319 if (nseg != 0) {
1320 bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap,
1321 (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
1322 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1323 }
1324
1325 /* We must NOT abort the command here if CAM_REQ_INPROG is not set,
1326 * because command semaphore is already set!
1327 */
1328
1329 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1330 /* timeout handling */
1331 ccb->ccb_h.timeout_ch =
1332 timeout(iir_timeout, (caddr_t)gccb,
1333 (ccb->ccb_h.timeout * hz) / 1000);
1334
1335 gdt->sc_copy_cmd(gdt, gccb);
1336 splx(lock);
1337}
1338
1339
1340static void
1341iir_action( struct cam_sim *sim, union ccb *ccb )
1342{
1343 struct gdt_softc *gdt;
1344 int lock, bus, target, lun;
1345
1346 gdt = (struct gdt_softc *)cam_sim_softc( sim );
1347 ccb->ccb_h.ccb_sim_ptr = sim;
1348 bus = cam_sim_bus(sim);
1349 target = ccb->ccb_h.target_id;
1350 lun = ccb->ccb_h.target_lun;
1351 GDT_DPRINTF(GDT_D_CMD,
1352 ("iir_action(%p) func 0x%x cmd 0x%x bus %d target %d lun %d\n",
1353 gdt, ccb->ccb_h.func_code, ccb->csio.cdb_io.cdb_bytes[0],
1354 bus, target, lun));
1355 ++gdt_stat.io_count_act;
1356 if (gdt_stat.io_count_act > gdt_stat.io_count_max)
1357 gdt_stat.io_count_max = gdt_stat.io_count_act;
1358
1359 switch (ccb->ccb_h.func_code) {
1360 case XPT_SCSI_IO:
1361 lock = splcam();
1362 TAILQ_INSERT_TAIL(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1363 ++gdt_stat.req_queue_act;
1364 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1365 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1366 splx(lock);
1367 gdt_next(gdt);
1368 break;
1369 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
1370 case XPT_ABORT: /* Abort the specified CCB */
1371 /* XXX Implement */
1372 ccb->ccb_h.status = CAM_REQ_INVALID;
1373 --gdt_stat.io_count_act;
1374 xpt_done(ccb);
1375 break;
1376 case XPT_SET_TRAN_SETTINGS:
1377 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1378 --gdt_stat.io_count_act;
1379 xpt_done(ccb);
1380 break;
1381 case XPT_GET_TRAN_SETTINGS:
1382 /* Get default/user set transfer settings for the target */
1383 {
1384 struct ccb_trans_settings *cts;
1385 u_int target_mask;
1386
1387 cts = &ccb->cts;
1388 target_mask = 0x01 << target;
1389 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
1390 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
1391 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1392 cts->sync_period = 25; /* 10MHz */
1393 if (cts->sync_period != 0)
1394 cts->sync_offset = 15;
1395
1396 cts->valid = CCB_TRANS_SYNC_RATE_VALID
1397 | CCB_TRANS_SYNC_OFFSET_VALID
1398 | CCB_TRANS_BUS_WIDTH_VALID
1399 | CCB_TRANS_DISC_VALID
1400 | CCB_TRANS_TQ_VALID;
1401 ccb->ccb_h.status = CAM_REQ_CMP;
1402 } else {
1403 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1404 }
1405 --gdt_stat.io_count_act;
1406 xpt_done(ccb);
1407 break;
1408 }
1409 case XPT_CALC_GEOMETRY:
1410 {
1411 struct ccb_calc_geometry *ccg;
1412 u_int32_t secs_per_cylinder;
1413
1414 ccg = &ccb->ccg;
1415 ccg->heads = gdt->sc_hdr[target].hd_heads;
1416 ccg->secs_per_track = gdt->sc_hdr[target].hd_secs;
1417 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1418 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1419 ccb->ccb_h.status = CAM_REQ_CMP;
1420 --gdt_stat.io_count_act;
1421 xpt_done(ccb);
1422 break;
1423 }
1424 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
1425 {
1426 /* XXX Implement */
1427 ccb->ccb_h.status = CAM_REQ_CMP;
1428 --gdt_stat.io_count_act;
1429 xpt_done(ccb);
1430 break;
1431 }
1432 case XPT_TERM_IO: /* Terminate the I/O process */
1433 /* XXX Implement */
1434 ccb->ccb_h.status = CAM_REQ_INVALID;
1435 --gdt_stat.io_count_act;
1436 xpt_done(ccb);
1437 break;
1438 case XPT_PATH_INQ: /* Path routing inquiry */
1439 {
1440 struct ccb_pathinq *cpi = &ccb->cpi;
1441
1442 cpi->version_num = 1;
1443 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
1444 cpi->hba_inquiry |= PI_WIDE_16;
1445 cpi->target_sprt = 1;
1446 cpi->hba_misc = 0;
1447 cpi->hba_eng_cnt = 0;
1448 if (bus == gdt->sc_virt_bus)
1449 cpi->max_target = GDT_MAX_HDRIVES - 1;
1450 else if (gdt->sc_class & GDT_FC)
1451 cpi->max_target = GDT_MAXID_FC - 1;
1452 else
1453 cpi->max_target = GDT_MAXID - 1;
1454 cpi->max_lun = 7;
1455 cpi->unit_number = cam_sim_unit(sim);
1456 cpi->bus_id = bus;
1457 cpi->initiator_id =
1458 (bus == gdt->sc_virt_bus ? 127 : gdt->sc_bus_id[bus]);
1459 cpi->base_transfer_speed = 3300;
1460 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
47
48#define _IIR_C_
49
50/* #include "opt_iir.h" */
51#include <sys/param.h>
52#include <sys/systm.h>
53#include <sys/endian.h>
54#include <sys/eventhandler.h>
55#include <sys/malloc.h>
56#include <sys/kernel.h>
57#include <sys/bus.h>
58
59#include <machine/bus_memio.h>
60#include <machine/bus_pio.h>
61#include <machine/bus.h>
62#include <machine/clock.h>
63#include <machine/stdarg.h>
64
65#include <cam/cam.h>
66#include <cam/cam_ccb.h>
67#include <cam/cam_sim.h>
68#include <cam/cam_xpt_sim.h>
69#include <cam/cam_debug.h>
70#include <cam/scsi/scsi_all.h>
71#include <cam/scsi/scsi_message.h>
72
73#include <vm/vm.h>
74#include <vm/pmap.h>
75
76#include <dev/iir/iir.h>
77
78struct gdt_softc *gdt_wait_gdt;
79int gdt_wait_index;
80
81#ifdef GDT_DEBUG
82int gdt_debug = GDT_DEBUG;
83#ifdef __SERIAL__
84#define MAX_SERBUF 160
85static void ser_init(void);
86static void ser_puts(char *str);
87static void ser_putc(int c);
88static char strbuf[MAX_SERBUF+1];
89#ifdef __COM2__
90#define COM_BASE 0x2f8
91#else
92#define COM_BASE 0x3f8
93#endif
94static void ser_init()
95{
96 unsigned port=COM_BASE;
97
98 outb(port+3, 0x80);
99 outb(port+1, 0);
100 /* 19200 Baud, if 9600: outb(12,port) */
101 outb(port, 6);
102 outb(port+3, 3);
103 outb(port+1, 0);
104}
105
106static void ser_puts(char *str)
107{
108 char *ptr;
109
110 ser_init();
111 for (ptr=str;*ptr;++ptr)
112 ser_putc((int)(*ptr));
113}
114
115static void ser_putc(int c)
116{
117 unsigned port=COM_BASE;
118
119 while ((inb(port+5) & 0x20)==0);
120 outb(port, c);
121 if (c==0x0a)
122 {
123 while ((inb(port+5) & 0x20)==0);
124 outb(port, 0x0d);
125 }
126}
127
128int ser_printf(const char *fmt, ...)
129{
130 va_list args;
131 int i;
132
133 va_start(args,fmt);
134 i = vsprintf(strbuf,fmt,args);
135 ser_puts(strbuf);
136 va_end(args);
137 return i;
138}
139#endif
140#endif
141
142/* The linked list of softc structures */
143struct gdt_softc_list gdt_softcs = TAILQ_HEAD_INITIALIZER(gdt_softcs);
144/* controller cnt. */
145int gdt_cnt = 0;
146/* event buffer */
147static gdt_evt_str ebuffer[GDT_MAX_EVENTS];
148static int elastidx, eoldidx;
149/* statistics */
150gdt_statist_t gdt_stat;
151
152/* Definitions for our use of the SIM private CCB area */
153#define ccb_sim_ptr spriv_ptr0
154#define ccb_priority spriv_field1
155
156static void iir_action(struct cam_sim *sim, union ccb *ccb);
157static void iir_poll(struct cam_sim *sim);
158static void iir_shutdown(void *arg, int howto);
159static void iir_timeout(void *arg);
160static void iir_watchdog(void *arg);
161
162static void gdt_eval_mapping(u_int32_t size, int *cyls, int *heads,
163 int *secs);
164static int gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
165 u_int8_t service, u_int16_t opcode,
166 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3);
167static int gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *ccb,
168 int timeout);
169
170static struct gdt_ccb *gdt_get_ccb(struct gdt_softc *gdt);
171static u_int32_t gdt_ccb_vtop(struct gdt_softc *gdt,
172 struct gdt_ccb *gccb);
173
174static int gdt_sync_event(struct gdt_softc *gdt, int service,
175 u_int8_t index, struct gdt_ccb *gccb);
176static int gdt_async_event(struct gdt_softc *gdt, int service);
177static struct gdt_ccb *gdt_raw_cmd(struct gdt_softc *gdt,
178 union ccb *ccb, int *lock);
179static struct gdt_ccb *gdt_cache_cmd(struct gdt_softc *gdt,
180 union ccb *ccb, int *lock);
181static struct gdt_ccb *gdt_ioctl_cmd(struct gdt_softc *gdt,
182 gdt_ucmd_t *ucmd, int *lock);
183static void gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb);
184
185static void gdtmapmem(void *arg, bus_dma_segment_t *dm_segs,
186 int nseg, int error);
187static void gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs,
188 int nseg, int error);
189
190int
191iir_init(struct gdt_softc *gdt)
192{
193 u_int16_t cdev_cnt;
194 int i, id, drv_cyls, drv_hds, drv_secs;
195 struct gdt_ccb *gccb;
196
197 GDT_DPRINTF(GDT_D_DEBUG, ("iir_init()\n"));
198
199 gdt->sc_state = GDT_POLLING;
200 gdt_clear_events();
201 bzero(&gdt_stat, sizeof(gdt_statist_t));
202
203 SLIST_INIT(&gdt->sc_free_gccb);
204 SLIST_INIT(&gdt->sc_pending_gccb);
205 TAILQ_INIT(&gdt->sc_ccb_queue);
206 TAILQ_INIT(&gdt->sc_ucmd_queue);
207 TAILQ_INSERT_TAIL(&gdt_softcs, gdt, links);
208
209 /* DMA tag for mapping buffers into device visible space. */
210 if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0,
211 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
212 /*highaddr*/BUS_SPACE_MAXADDR,
213 /*filter*/NULL, /*filterarg*/NULL,
214 /*maxsize*/MAXBSIZE, /*nsegments*/GDT_MAXSG,
215 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
216 /*flags*/BUS_DMA_ALLOCNOW,
217 /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant,
218 &gdt->sc_buffer_dmat) != 0) {
219 printf("iir%d: bus_dma_tag_create(...,gdt->sc_buffer_dmat) failed\n",
220 gdt->sc_hanum);
221 return (1);
222 }
223 gdt->sc_init_level++;
224
225 /* DMA tag for our ccb structures */
226 if (bus_dma_tag_create(gdt->sc_parent_dmat,
227 /*alignment*/1,
228 /*boundary*/0,
229 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
230 /*highaddr*/BUS_SPACE_MAXADDR,
231 /*filter*/NULL,
232 /*filterarg*/NULL,
233 GDT_MAXCMDS * sizeof(struct gdt_ccb), /* maxsize */
234 /*nsegments*/1,
235 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
236 /*flags*/0, /*lockfunc*/busdma_lock_mutex,
237 /*lockarg*/&Giant, &gdt->sc_gccb_dmat) != 0) {
238 printf("iir%d: bus_dma_tag_create(...,gdt->sc_gccb_dmat) failed\n",
239 gdt->sc_hanum);
240 return (1);
241 }
242 gdt->sc_init_level++;
243
244 /* Allocation for our ccbs */
245 if (bus_dmamem_alloc(gdt->sc_gccb_dmat, (void **)&gdt->sc_gccbs,
246 BUS_DMA_NOWAIT, &gdt->sc_gccb_dmamap) != 0) {
247 printf("iir%d: bus_dmamem_alloc(...,&gdt->sc_gccbs,...) failed\n",
248 gdt->sc_hanum);
249 return (1);
250 }
251 gdt->sc_init_level++;
252
253 /* And permanently map them */
254 bus_dmamap_load(gdt->sc_gccb_dmat, gdt->sc_gccb_dmamap,
255 gdt->sc_gccbs, GDT_MAXCMDS * sizeof(struct gdt_ccb),
256 gdtmapmem, &gdt->sc_gccb_busbase, /*flags*/0);
257 gdt->sc_init_level++;
258
259 /* Clear them out. */
260 bzero(gdt->sc_gccbs, GDT_MAXCMDS * sizeof(struct gdt_ccb));
261
262 /* Initialize the ccbs */
263 for (i = GDT_MAXCMDS-1; i >= 0; i--) {
264 gdt->sc_gccbs[i].gc_cmd_index = i + 2;
265 gdt->sc_gccbs[i].gc_flags = GDT_GCF_UNUSED;
266 gdt->sc_gccbs[i].gc_map_flag = FALSE;
267 if (bus_dmamap_create(gdt->sc_buffer_dmat, /*flags*/0,
268 &gdt->sc_gccbs[i].gc_dmamap) != 0)
269 return(1);
270 gdt->sc_gccbs[i].gc_map_flag = TRUE;
271 SLIST_INSERT_HEAD(&gdt->sc_free_gccb, &gdt->sc_gccbs[i], sle);
272 }
273 gdt->sc_init_level++;
274
275 /* create the control device */
276 gdt->sc_dev = gdt_make_dev(gdt->sc_hanum);
277
278 /* allocate ccb for gdt_internal_cmd() */
279 gccb = gdt_get_ccb(gdt);
280 if (gccb == NULL) {
281 printf("iir%d: No free command index found\n",
282 gdt->sc_hanum);
283 return (1);
284 }
285
286 if (!gdt_internal_cmd(gdt, gccb, GDT_SCREENSERVICE, GDT_INIT,
287 0, 0, 0)) {
288 printf("iir%d: Screen service initialization error %d\n",
289 gdt->sc_hanum, gdt->sc_status);
290 gdt_free_ccb(gdt, gccb);
291 return (1);
292 }
293
294 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
295 GDT_LINUX_OS, 0, 0)) {
296 printf("iir%d: Cache service initialization error %d\n",
297 gdt->sc_hanum, gdt->sc_status);
298 gdt_free_ccb(gdt, gccb);
299 return (1);
300 }
301 gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_UNFREEZE_IO,
302 0, 0, 0);
303
304 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_MOUNT,
305 0xffff, 1, 0)) {
306 printf("iir%d: Cache service mount error %d\n",
307 gdt->sc_hanum, gdt->sc_status);
308 gdt_free_ccb(gdt, gccb);
309 return (1);
310 }
311
312 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
313 GDT_LINUX_OS, 0, 0)) {
314 printf("iir%d: Cache service post-mount initialization error %d\n",
315 gdt->sc_hanum, gdt->sc_status);
316 gdt_free_ccb(gdt, gccb);
317 return (1);
318 }
319 cdev_cnt = (u_int16_t)gdt->sc_info;
320 gdt->sc_fw_vers = gdt->sc_service;
321
322 /* Detect number of buses */
323 gdt_enc32(gccb->gc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
324 gccb->gc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
325 gccb->gc_scratch[GDT_IOC_FIRST_CHAN] = 0;
326 gccb->gc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
327 gdt_enc32(gccb->gc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
328 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
329 GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
330 GDT_IOC_HDR_SZ + GDT_MAXBUS * GDT_RAWIOC_SZ)) {
331 gdt->sc_bus_cnt = gccb->gc_scratch[GDT_IOC_CHAN_COUNT];
332 for (i = 0; i < gdt->sc_bus_cnt; i++) {
333 id = gccb->gc_scratch[GDT_IOC_HDR_SZ +
334 i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
335 gdt->sc_bus_id[i] = id < GDT_MAXID_FC ? id : 0xff;
336 }
337 } else {
338 /* New method failed, use fallback. */
339 for (i = 0; i < GDT_MAXBUS; i++) {
340 gdt_enc32(gccb->gc_scratch + GDT_GETCH_CHANNEL_NO, i);
341 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
342 GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
343 GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
344 GDT_GETCH_SZ)) {
345 if (i == 0) {
346 printf("iir%d: Cannot get channel count, "
347 "error %d\n", gdt->sc_hanum, gdt->sc_status);
348 gdt_free_ccb(gdt, gccb);
349 return (1);
350 }
351 break;
352 }
353 gdt->sc_bus_id[i] =
354 (gccb->gc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID_FC) ?
355 gccb->gc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
356 }
357 gdt->sc_bus_cnt = i;
358 }
359 /* add one "virtual" channel for the host drives */
360 gdt->sc_virt_bus = gdt->sc_bus_cnt;
361 gdt->sc_bus_cnt++;
362
363 if (!gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_INIT,
364 0, 0, 0)) {
365 printf("iir%d: Raw service initialization error %d\n",
366 gdt->sc_hanum, gdt->sc_status);
367 gdt_free_ccb(gdt, gccb);
368 return (1);
369 }
370
371 /* Set/get features raw service (scatter/gather) */
372 gdt->sc_raw_feat = 0;
373 if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
374 GDT_SCATTER_GATHER, 0, 0)) {
375 if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_GET_FEAT,
376 0, 0, 0)) {
377 gdt->sc_raw_feat = gdt->sc_info;
378 if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
379 panic("iir%d: Scatter/Gather Raw Service "
380 "required but not supported!\n", gdt->sc_hanum);
381 gdt_free_ccb(gdt, gccb);
382 return (1);
383 }
384 }
385 }
386
387 /* Set/get features cache service (scatter/gather) */
388 gdt->sc_cache_feat = 0;
389 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_SET_FEAT,
390 0, GDT_SCATTER_GATHER, 0)) {
391 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_GET_FEAT,
392 0, 0, 0)) {
393 gdt->sc_cache_feat = gdt->sc_info;
394 if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
395 panic("iir%d: Scatter/Gather Cache Service "
396 "required but not supported!\n", gdt->sc_hanum);
397 gdt_free_ccb(gdt, gccb);
398 return (1);
399 }
400 }
401 }
402
403 /* OEM */
404 gdt_enc32(gccb->gc_scratch + GDT_OEM_VERSION, 0x01);
405 gdt_enc32(gccb->gc_scratch + GDT_OEM_BUFSIZE, sizeof(gdt_oem_record_t));
406 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
407 GDT_OEM_STR_RECORD, GDT_INVALID_CHANNEL,
408 sizeof(gdt_oem_str_record_t))) {
409 strncpy(gdt->oem_name, ((gdt_oem_str_record_t *)
410 gccb->gc_scratch)->text.scsi_host_drive_inquiry_vendor_id, 7);
411 gdt->oem_name[7]='\0';
412 } else {
413 /* Old method, based on PCI ID */
414 if (gdt->sc_vendor == INTEL_VENDOR_ID)
415 strcpy(gdt->oem_name,"Intel ");
416 else
417 strcpy(gdt->oem_name,"ICP ");
418 }
419
420 /* Scan for cache devices */
421 for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++) {
422 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INFO,
423 i, 0, 0)) {
424 gdt->sc_hdr[i].hd_present = 1;
425 gdt->sc_hdr[i].hd_size = gdt->sc_info;
426
427 /*
428 * Evaluate mapping (sectors per head, heads per cyl)
429 */
430 gdt->sc_hdr[i].hd_size &= ~GDT_SECS32;
431 if (gdt->sc_info2 == 0)
432 gdt_eval_mapping(gdt->sc_hdr[i].hd_size,
433 &drv_cyls, &drv_hds, &drv_secs);
434 else {
435 drv_hds = gdt->sc_info2 & 0xff;
436 drv_secs = (gdt->sc_info2 >> 8) & 0xff;
437 drv_cyls = gdt->sc_hdr[i].hd_size / drv_hds /
438 drv_secs;
439 }
440 gdt->sc_hdr[i].hd_heads = drv_hds;
441 gdt->sc_hdr[i].hd_secs = drv_secs;
442 /* Round the size */
443 gdt->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
444
445 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE,
446 GDT_DEVTYPE, i, 0, 0))
447 gdt->sc_hdr[i].hd_devtype = gdt->sc_info;
448 }
449 }
450
451 GDT_DPRINTF(GDT_D_INIT, ("dpmem %x %d-bus %d cache device%s\n",
452 gdt->sc_dpmembase,
453 gdt->sc_bus_cnt, cdev_cnt,
454 cdev_cnt == 1 ? "" : "s"));
455 gdt_free_ccb(gdt, gccb);
456
457 gdt_cnt++;
458 return (0);
459}
460
461void
462iir_free(struct gdt_softc *gdt)
463{
464 int i;
465
466 GDT_DPRINTF(GDT_D_INIT, ("iir_free()\n"));
467
468 switch (gdt->sc_init_level) {
469 default:
470 gdt_destroy_dev(gdt->sc_dev);
471 case 5:
472 for (i = GDT_MAXCMDS-1; i >= 0; i--)
473 if (gdt->sc_gccbs[i].gc_map_flag)
474 bus_dmamap_destroy(gdt->sc_buffer_dmat,
475 gdt->sc_gccbs[i].gc_dmamap);
476 bus_dmamap_unload(gdt->sc_gccb_dmat, gdt->sc_gccb_dmamap);
477 case 4:
478 bus_dmamem_free(gdt->sc_gccb_dmat, gdt->sc_gccbs, gdt->sc_gccb_dmamap);
479 case 3:
480 bus_dma_tag_destroy(gdt->sc_gccb_dmat);
481 case 2:
482 bus_dma_tag_destroy(gdt->sc_buffer_dmat);
483 case 1:
484 bus_dma_tag_destroy(gdt->sc_parent_dmat);
485 case 0:
486 break;
487 }
488 TAILQ_REMOVE(&gdt_softcs, gdt, links);
489}
490
491void
492iir_attach(struct gdt_softc *gdt)
493{
494 struct cam_devq *devq;
495 int i;
496
497 GDT_DPRINTF(GDT_D_INIT, ("iir_attach()\n"));
498
499 /*
500 * Create the device queue for our SIM.
501 */
502 devq = cam_simq_alloc(GDT_MAXCMDS);
503 if (devq == NULL)
504 return;
505
506 for (i = 0; i < gdt->sc_bus_cnt; i++) {
507 /*
508 * Construct our SIM entry
509 */
510 gdt->sims[i] = cam_sim_alloc(iir_action, iir_poll, "iir",
511 gdt, gdt->sc_hanum, /*untagged*/2,
512 /*tagged*/GDT_MAXCMDS, devq);
513 if (xpt_bus_register(gdt->sims[i], i) != CAM_SUCCESS) {
514 cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
515 break;
516 }
517
518 if (xpt_create_path(&gdt->paths[i], /*periph*/NULL,
519 cam_sim_path(gdt->sims[i]),
520 CAM_TARGET_WILDCARD,
521 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
522 xpt_bus_deregister(cam_sim_path(gdt->sims[i]));
523 cam_sim_free(gdt->sims[i], /*free_devq*/i == 0);
524 break;
525 }
526 }
527 if (i > 0)
528 EVENTHANDLER_REGISTER(shutdown_final, iir_shutdown,
529 gdt, SHUTDOWN_PRI_DEFAULT);
530 /* iir_watchdog(gdt); */
531 gdt->sc_state = GDT_NORMAL;
532}
533
534static void
535gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs)
536{
537 *cyls = size / GDT_HEADS / GDT_SECS;
538 if (*cyls < GDT_MAXCYLS) {
539 *heads = GDT_HEADS;
540 *secs = GDT_SECS;
541 } else {
542 /* Too high for 64 * 32 */
543 *cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
544 if (*cyls < GDT_MAXCYLS) {
545 *heads = GDT_MEDHEADS;
546 *secs = GDT_MEDSECS;
547 } else {
548 /* Too high for 127 * 63 */
549 *cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
550 *heads = GDT_BIGHEADS;
551 *secs = GDT_BIGSECS;
552 }
553 }
554}
555
556static int
557gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *gccb,
558 int timeout)
559{
560 int rv = 0;
561
562 GDT_DPRINTF(GDT_D_INIT,
563 ("gdt_wait(%p, %p, %d)\n", gdt, gccb, timeout));
564
565 gdt->sc_state |= GDT_POLL_WAIT;
566 do {
567 iir_intr(gdt);
568 if (gdt == gdt_wait_gdt &&
569 gccb->gc_cmd_index == gdt_wait_index) {
570 rv = 1;
571 break;
572 }
573 DELAY(1);
574 } while (--timeout);
575 gdt->sc_state &= ~GDT_POLL_WAIT;
576
577 while (gdt->sc_test_busy(gdt))
578 DELAY(1); /* XXX correct? */
579
580 return (rv);
581}
582
583static int
584gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
585 u_int8_t service, u_int16_t opcode,
586 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
587{
588 int retries;
589
590 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d)\n",
591 gdt, service, opcode, arg1, arg2, arg3));
592
593 bzero(gdt->sc_cmd, GDT_CMD_SZ);
594
595 for (retries = GDT_RETRIES; ; ) {
596 gccb->gc_service = service;
597 gccb->gc_flags = GDT_GCF_INTERNAL;
598
599 gdt->sc_set_sema0(gdt);
600 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
601 gccb->gc_cmd_index);
602 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode);
603
604 switch (service) {
605 case GDT_CACHESERVICE:
606 if (opcode == GDT_IOCTL) {
607 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
608 GDT_IOCTL_SUBFUNC, arg1);
609 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
610 GDT_IOCTL_CHANNEL, arg2);
611 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
612 GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
613 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
614 gdt_ccb_vtop(gdt, gccb) +
615 offsetof(struct gdt_ccb, gc_scratch[0]));
616 } else {
617 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
618 GDT_CACHE_DEVICENO, (u_int16_t)arg1);
619 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
620 GDT_CACHE_BLOCKNO, arg2);
621 }
622 break;
623
624 case GDT_SCSIRAWSERVICE:
625 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
626 GDT_RAW_DIRECTION, arg1);
627 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
628 (u_int8_t)arg2;
629 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
630 (u_int8_t)arg3;
631 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
632 (u_int8_t)(arg3 >> 8);
633 }
634
635 gdt->sc_cmd_len = GDT_CMD_SZ;
636 gdt->sc_cmd_off = 0;
637 gdt->sc_cmd_cnt = 0;
638 gdt->sc_copy_cmd(gdt, gccb);
639 gdt->sc_release_event(gdt);
640 DELAY(20);
641 if (!gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT))
642 return (0);
643 if (gdt->sc_status != GDT_S_BSY || --retries == 0)
644 break;
645 DELAY(1);
646 }
647 return (gdt->sc_status == GDT_S_OK);
648}
649
650static struct gdt_ccb *
651gdt_get_ccb(struct gdt_softc *gdt)
652{
653 struct gdt_ccb *gccb;
654 int lock;
655
656 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_get_ccb(%p)\n", gdt));
657
658 lock = splcam();
659 gccb = SLIST_FIRST(&gdt->sc_free_gccb);
660 if (gccb != NULL) {
661 SLIST_REMOVE_HEAD(&gdt->sc_free_gccb, sle);
662 SLIST_INSERT_HEAD(&gdt->sc_pending_gccb, gccb, sle);
663 ++gdt_stat.cmd_index_act;
664 if (gdt_stat.cmd_index_act > gdt_stat.cmd_index_max)
665 gdt_stat.cmd_index_max = gdt_stat.cmd_index_act;
666 }
667 splx(lock);
668 return (gccb);
669}
670
671void
672gdt_free_ccb(struct gdt_softc *gdt, struct gdt_ccb *gccb)
673{
674 int lock;
675
676 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_free_ccb(%p, %p)\n", gdt, gccb));
677
678 lock = splcam();
679 gccb->gc_flags = GDT_GCF_UNUSED;
680 SLIST_REMOVE(&gdt->sc_pending_gccb, gccb, gdt_ccb, sle);
681 SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle);
682 --gdt_stat.cmd_index_act;
683 splx(lock);
684 if (gdt->sc_state & GDT_SHUTDOWN)
685 wakeup(gccb);
686}
687
688static u_int32_t
689gdt_ccb_vtop(struct gdt_softc *gdt, struct gdt_ccb *gccb)
690{
691 return (gdt->sc_gccb_busbase
692 + (u_int32_t)((caddr_t)gccb - (caddr_t)gdt->sc_gccbs));
693}
694
695void
696gdt_next(struct gdt_softc *gdt)
697{
698 int lock;
699 union ccb *ccb;
700 gdt_ucmd_t *ucmd;
701 struct cam_sim *sim;
702 int bus, target, lun;
703 int next_cmd;
704
705 struct ccb_scsiio *csio;
706 struct ccb_hdr *ccbh;
707 struct gdt_ccb *gccb = NULL;
708 u_int8_t cmd;
709
710 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_next(%p)\n", gdt));
711
712 lock = splcam();
713 if (gdt->sc_test_busy(gdt)) {
714 if (!(gdt->sc_state & GDT_POLLING)) {
715 splx(lock);
716 return;
717 }
718 while (gdt->sc_test_busy(gdt))
719 DELAY(1);
720 }
721
722 gdt->sc_cmd_cnt = gdt->sc_cmd_off = 0;
723 next_cmd = TRUE;
724 for (;;) {
725 /* I/Os in queue? controller ready? */
726 if (!TAILQ_FIRST(&gdt->sc_ucmd_queue) &&
727 !TAILQ_FIRST(&gdt->sc_ccb_queue))
728 break;
729
730 /* 1.: I/Os without ccb (IOCTLs) */
731 ucmd = TAILQ_FIRST(&gdt->sc_ucmd_queue);
732 if (ucmd != NULL) {
733 TAILQ_REMOVE(&gdt->sc_ucmd_queue, ucmd, links);
734 if ((gccb = gdt_ioctl_cmd(gdt, ucmd, &lock)) == NULL) {
735 TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
736 break;
737 }
738 break;
739 /* wenn mehrere Kdos. zulassen: if (!gdt_polling) continue; */
740 }
741
742 /* 2.: I/Os with ccb */
743 ccb = (union ccb *)TAILQ_FIRST(&gdt->sc_ccb_queue);
744 /* ist dann immer != NULL, da oben getestet */
745 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
746 bus = cam_sim_bus(sim);
747 target = ccb->ccb_h.target_id;
748 lun = ccb->ccb_h.target_lun;
749
750 TAILQ_REMOVE(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
751 --gdt_stat.req_queue_act;
752 /* ccb->ccb_h.func_code is XPT_SCSI_IO */
753 GDT_DPRINTF(GDT_D_QUEUE, ("XPT_SCSI_IO flags 0x%x)\n",
754 ccb->ccb_h.flags));
755 csio = &ccb->csio;
756 ccbh = &ccb->ccb_h;
757 cmd = csio->cdb_io.cdb_bytes[0];
758 /* Max CDB length is 12 bytes */
759 if (csio->cdb_len > 12) {
760 ccbh->status = CAM_REQ_INVALID;
761 --gdt_stat.io_count_act;
762 xpt_done(ccb);
763 } else if (bus != gdt->sc_virt_bus) {
764 /* raw service command */
765 if ((gccb = gdt_raw_cmd(gdt, ccb, &lock)) == NULL) {
766 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
767 sim_links.tqe);
768 ++gdt_stat.req_queue_act;
769 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
770 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
771 next_cmd = FALSE;
772 }
773 } else if (target >= GDT_MAX_HDRIVES ||
774 !gdt->sc_hdr[target].hd_present || lun != 0) {
775 ccbh->status = CAM_DEV_NOT_THERE;
776 --gdt_stat.io_count_act;
777 xpt_done(ccb);
778 } else {
779 /* cache service command */
780 if (cmd == READ_6 || cmd == WRITE_6 ||
781 cmd == READ_10 || cmd == WRITE_10) {
782 if ((gccb = gdt_cache_cmd(gdt, ccb, &lock)) == NULL) {
783 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
784 sim_links.tqe);
785 ++gdt_stat.req_queue_act;
786 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
787 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
788 next_cmd = FALSE;
789 }
790 } else {
791 splx(lock);
792 gdt_internal_cache_cmd(gdt, ccb);
793 lock = splcam();
794 }
795 }
796 if ((gdt->sc_state & GDT_POLLING) || !next_cmd)
797 break;
798 }
799 if (gdt->sc_cmd_cnt > 0)
800 gdt->sc_release_event(gdt);
801
802 splx(lock);
803
804 if ((gdt->sc_state & GDT_POLLING) && gdt->sc_cmd_cnt > 0) {
805 gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT);
806 }
807}
808
809static struct gdt_ccb *
810gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
811{
812 struct gdt_ccb *gccb;
813 struct cam_sim *sim;
814
815 GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_cmd(%p, %p)\n", gdt, ccb));
816
817 if (roundup(GDT_CMD_UNION + GDT_RAW_SZ, sizeof(u_int32_t)) +
818 gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
819 gdt->sc_ic_all_size) {
820 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_raw_cmd(): DPMEM overflow\n",
821 gdt->sc_hanum));
822 return (NULL);
823 }
824
825 bzero(gdt->sc_cmd, GDT_CMD_SZ);
826
827 gccb = gdt_get_ccb(gdt);
828 if (gccb == NULL) {
829 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: No free command index found\n",
830 gdt->sc_hanum));
831 return (gccb);
832 }
833 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
834 gccb->gc_ccb = ccb;
835 gccb->gc_service = GDT_SCSIRAWSERVICE;
836 gccb->gc_flags = GDT_GCF_SCSI;
837
838 if (gdt->sc_cmd_cnt == 0)
839 gdt->sc_set_sema0(gdt);
840 splx(*lock);
841 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
842 gccb->gc_cmd_index);
843 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
844
845 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
846 (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
847 GDT_DATA_IN : GDT_DATA_OUT);
848 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
849 ccb->csio.dxfer_len);
850 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
851 ccb->csio.cdb_len);
852 bcopy(ccb->csio.cdb_io.cdb_bytes, gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
853 ccb->csio.cdb_len);
854 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
855 ccb->ccb_h.target_id;
856 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
857 ccb->ccb_h.target_lun;
858 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
859 cam_sim_bus(sim);
860 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
861 sizeof(struct scsi_sense_data));
862 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
863 gdt_ccb_vtop(gdt, gccb) +
864 offsetof(struct gdt_ccb, gc_scratch[0]));
865
866 /*
867 * If we have any data to send with this command,
868 * map it into bus space.
869 */
870 /* Only use S/G if there is a transfer */
871 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
872 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
873 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
874 int s;
875 int error;
876
877 /* vorher unlock von splcam() ??? */
878 s = splsoftvm();
879 error =
880 bus_dmamap_load(gdt->sc_buffer_dmat,
881 gccb->gc_dmamap,
882 ccb->csio.data_ptr,
883 ccb->csio.dxfer_len,
884 gdtexecuteccb,
885 gccb, /*flags*/0);
886 if (error == EINPROGRESS) {
887 xpt_freeze_simq(sim, 1);
888 gccb->gc_state |= CAM_RELEASE_SIMQ;
889 }
890 splx(s);
891 } else {
892 struct bus_dma_segment seg;
893
894 /* Pointer to physical buffer */
895 seg.ds_addr =
896 (bus_addr_t)ccb->csio.data_ptr;
897 seg.ds_len = ccb->csio.dxfer_len;
898 gdtexecuteccb(gccb, &seg, 1, 0);
899 }
900 } else {
901 struct bus_dma_segment *segs;
902
903 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
904 panic("iir%d: iir_action - Physical "
905 "segment pointers unsupported", gdt->sc_hanum);
906
907 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
908 panic("iir%d: iir_action - Virtual "
909 "segment addresses unsupported", gdt->sc_hanum);
910
911 /* Just use the segments provided */
912 segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
913 gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
914 }
915 } else {
916 gdtexecuteccb(gccb, NULL, 0, 0);
917 }
918
919 *lock = splcam();
920 return (gccb);
921}
922
923static struct gdt_ccb *
924gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
925{
926 struct gdt_ccb *gccb;
927 struct cam_sim *sim;
928 u_int8_t *cmdp;
929 u_int16_t opcode;
930 u_int32_t blockno, blockcnt;
931
932 GDT_DPRINTF(GDT_D_CMD, ("gdt_cache_cmd(%p, %p)\n", gdt, ccb));
933
934 if (roundup(GDT_CMD_UNION + GDT_CACHE_SZ, sizeof(u_int32_t)) +
935 gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
936 gdt->sc_ic_all_size) {
937 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_cache_cmd(): DPMEM overflow\n",
938 gdt->sc_hanum));
939 return (NULL);
940 }
941
942 bzero(gdt->sc_cmd, GDT_CMD_SZ);
943
944 gccb = gdt_get_ccb(gdt);
945 if (gccb == NULL) {
946 GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n",
947 gdt->sc_hanum));
948 return (gccb);
949 }
950 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
951 gccb->gc_ccb = ccb;
952 gccb->gc_service = GDT_CACHESERVICE;
953 gccb->gc_flags = GDT_GCF_SCSI;
954
955 if (gdt->sc_cmd_cnt == 0)
956 gdt->sc_set_sema0(gdt);
957 splx(*lock);
958 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
959 gccb->gc_cmd_index);
960 cmdp = ccb->csio.cdb_io.cdb_bytes;
961 opcode = (*cmdp == WRITE_6 || *cmdp == WRITE_10) ? GDT_WRITE : GDT_READ;
962 if ((gdt->sc_state & GDT_SHUTDOWN) && opcode == GDT_WRITE)
963 opcode = GDT_WRITE_THR;
964 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode);
965
966 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
967 ccb->ccb_h.target_id);
968 if (ccb->csio.cdb_len == 6) {
969 struct scsi_rw_6 *rw = (struct scsi_rw_6 *)cmdp;
970 blockno = scsi_3btoul(rw->addr) & ((SRW_TOPADDR<<16) | 0xffff);
971 blockcnt = rw->length ? rw->length : 0x100;
972 } else {
973 struct scsi_rw_10 *rw = (struct scsi_rw_10 *)cmdp;
974 blockno = scsi_4btoul(rw->addr);
975 blockcnt = scsi_2btoul(rw->length);
976 }
977 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
978 blockno);
979 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
980 blockcnt);
981
982 /*
983 * If we have any data to send with this command,
984 * map it into bus space.
985 */
986 /* Only use S/G if there is a transfer */
987 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
988 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
989 int s;
990 int error;
991
992 /* vorher unlock von splcam() ??? */
993 s = splsoftvm();
994 error =
995 bus_dmamap_load(gdt->sc_buffer_dmat,
996 gccb->gc_dmamap,
997 ccb->csio.data_ptr,
998 ccb->csio.dxfer_len,
999 gdtexecuteccb,
1000 gccb, /*flags*/0);
1001 if (error == EINPROGRESS) {
1002 xpt_freeze_simq(sim, 1);
1003 gccb->gc_state |= CAM_RELEASE_SIMQ;
1004 }
1005 splx(s);
1006 } else {
1007 struct bus_dma_segment seg;
1008
1009 /* Pointer to physical buffer */
1010 seg.ds_addr =
1011 (bus_addr_t)ccb->csio.data_ptr;
1012 seg.ds_len = ccb->csio.dxfer_len;
1013 gdtexecuteccb(gccb, &seg, 1, 0);
1014 }
1015 } else {
1016 struct bus_dma_segment *segs;
1017
1018 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
1019 panic("iir%d: iir_action - Physical "
1020 "segment pointers unsupported", gdt->sc_hanum);
1021
1022 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
1023 panic("iir%d: iir_action - Virtual "
1024 "segment addresses unsupported", gdt->sc_hanum);
1025
1026 /* Just use the segments provided */
1027 segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
1028 gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
1029 }
1030
1031 *lock = splcam();
1032 return (gccb);
1033}
1034
1035static struct gdt_ccb *
1036gdt_ioctl_cmd(struct gdt_softc *gdt, gdt_ucmd_t *ucmd, int *lock)
1037{
1038 struct gdt_ccb *gccb;
1039 u_int32_t cnt;
1040
1041 GDT_DPRINTF(GDT_D_DEBUG, ("gdt_ioctl_cmd(%p, %p)\n", gdt, ucmd));
1042
1043 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1044
1045 gccb = gdt_get_ccb(gdt);
1046 if (gccb == NULL) {
1047 GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n",
1048 gdt->sc_hanum));
1049 return (gccb);
1050 }
1051 gccb->gc_ucmd = ucmd;
1052 gccb->gc_service = ucmd->service;
1053 gccb->gc_flags = GDT_GCF_IOCTL;
1054
1055 /* check DPMEM space, copy data buffer from user space */
1056 if (ucmd->service == GDT_CACHESERVICE) {
1057 if (ucmd->OpCode == GDT_IOCTL) {
1058 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_IOCTL_SZ,
1059 sizeof(u_int32_t));
1060 cnt = ucmd->u.ioctl.param_size;
1061 if (cnt > GDT_SCRATCH_SZ) {
1062 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1063 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt);
1064 gdt_free_ccb(gdt, gccb);
1065 return (NULL);
1066 }
1067 } else {
1068 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
1069 GDT_SG_SZ, sizeof(u_int32_t));
1070 cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
1071 if (cnt > GDT_SCRATCH_SZ) {
1072 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1073 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt);
1074 gdt_free_ccb(gdt, gccb);
1075 return (NULL);
1076 }
1077 }
1078 } else {
1079 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
1080 GDT_SG_SZ, sizeof(u_int32_t));
1081 cnt = ucmd->u.raw.sdlen;
1082 if (cnt + ucmd->u.raw.sense_len > GDT_SCRATCH_SZ) {
1083 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1084 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt + ucmd->u.raw.sense_len);
1085 gdt_free_ccb(gdt, gccb);
1086 return (NULL);
1087 }
1088 }
1089 if (cnt != 0)
1090 bcopy(ucmd->data, gccb->gc_scratch, cnt);
1091
1092 if (gdt->sc_cmd_off + gdt->sc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
1093 gdt->sc_ic_all_size) {
1094 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_ioctl_cmd(): DPMEM overflow\n",
1095 gdt->sc_hanum));
1096 gdt_free_ccb(gdt, gccb);
1097 return (NULL);
1098 }
1099
1100 if (gdt->sc_cmd_cnt == 0)
1101 gdt->sc_set_sema0(gdt);
1102 splx(*lock);
1103
1104 /* fill cmd structure */
1105 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1106 gccb->gc_cmd_index);
1107 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE,
1108 ucmd->OpCode);
1109
1110 if (ucmd->service == GDT_CACHESERVICE) {
1111 if (ucmd->OpCode == GDT_IOCTL) {
1112 /* IOCTL */
1113 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_PARAM_SIZE,
1114 ucmd->u.ioctl.param_size);
1115 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_SUBFUNC,
1116 ucmd->u.ioctl.subfunc);
1117 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_CHANNEL,
1118 ucmd->u.ioctl.channel);
1119 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
1120 gdt_ccb_vtop(gdt, gccb) +
1121 offsetof(struct gdt_ccb, gc_scratch[0]));
1122 } else {
1123 /* cache service command */
1124 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
1125 ucmd->u.cache.DeviceNo);
1126 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
1127 ucmd->u.cache.BlockNo);
1128 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
1129 ucmd->u.cache.BlockCnt);
1130 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1131 0xffffffffUL);
1132 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1133 1);
1134 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1135 GDT_SG_PTR, gdt_ccb_vtop(gdt, gccb) +
1136 offsetof(struct gdt_ccb, gc_scratch[0]));
1137 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1138 GDT_SG_LEN, ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE);
1139 }
1140 } else {
1141 /* raw service command */
1142 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
1143 ucmd->u.raw.direction);
1144 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1145 0xffffffffUL);
1146 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
1147 ucmd->u.raw.sdlen);
1148 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
1149 ucmd->u.raw.clen);
1150 bcopy(ucmd->u.raw.cmd, gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
1151 12);
1152 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
1153 ucmd->u.raw.target;
1154 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
1155 ucmd->u.raw.lun;
1156 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
1157 ucmd->u.raw.bus;
1158 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
1159 ucmd->u.raw.sense_len);
1160 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
1161 gdt_ccb_vtop(gdt, gccb) +
1162 offsetof(struct gdt_ccb, gc_scratch[ucmd->u.raw.sdlen]));
1163 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1164 1);
1165 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1166 GDT_SG_PTR, gdt_ccb_vtop(gdt, gccb) +
1167 offsetof(struct gdt_ccb, gc_scratch[0]));
1168 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1169 GDT_SG_LEN, ucmd->u.raw.sdlen);
1170 }
1171
1172 *lock = splcam();
1173 gdt_stat.sg_count_act = 1;
1174 gdt->sc_copy_cmd(gdt, gccb);
1175 return (gccb);
1176}
1177
1178static void
1179gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb)
1180{
1181 int t;
1182
1183 t = ccb->ccb_h.target_id;
1184 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd(%p, %p, 0x%x, %d)\n",
1185 gdt, ccb, ccb->csio.cdb_io.cdb_bytes[0], t));
1186
1187 switch (ccb->csio.cdb_io.cdb_bytes[0]) {
1188 case TEST_UNIT_READY:
1189 case START_STOP:
1190 break;
1191 case REQUEST_SENSE:
1192 GDT_DPRINTF(GDT_D_MISC, ("REQUEST_SENSE\n"));
1193 break;
1194 case INQUIRY:
1195 {
1196 struct scsi_inquiry_data *inq;
1197
1198 inq = (struct scsi_inquiry_data *)ccb->csio.data_ptr;
1199 bzero(inq, sizeof(struct scsi_inquiry_data));
1200 inq->device = (gdt->sc_hdr[t].hd_devtype & 4) ?
1201 T_CDROM : T_DIRECT;
1202 inq->dev_qual2 = (gdt->sc_hdr[t].hd_devtype & 1) ? 0x80 : 0;
1203 inq->version = SCSI_REV_2;
1204 inq->response_format = 2;
1205 inq->additional_length = 32;
1206 inq->flags = SID_CmdQue | SID_Sync;
1207 strcpy(inq->vendor, gdt->oem_name);
1208 sprintf(inq->product, "Host Drive #%02d", t);
1209 strcpy(inq->revision, " ");
1210 break;
1211 }
1212 case MODE_SENSE_6:
1213 {
1214 struct mpd_data {
1215 struct scsi_mode_hdr_6 hd;
1216 struct scsi_mode_block_descr bd;
1217 struct scsi_control_page cp;
1218 } *mpd;
1219 u_int8_t page;
1220
1221 mpd = (struct mpd_data *)ccb->csio.data_ptr;
1222 bzero(mpd, sizeof(struct mpd_data));
1223 mpd->hd.datalen = sizeof(struct scsi_mode_hdr_6) +
1224 sizeof(struct scsi_mode_block_descr);
1225 mpd->hd.dev_specific = (gdt->sc_hdr[t].hd_devtype & 2) ? 0x80 : 0;
1226 mpd->hd.block_descr_len = sizeof(struct scsi_mode_block_descr);
1227 mpd->bd.block_len[0] = (GDT_SECTOR_SIZE & 0x00ff0000) >> 16;
1228 mpd->bd.block_len[1] = (GDT_SECTOR_SIZE & 0x0000ff00) >> 8;
1229 mpd->bd.block_len[2] = (GDT_SECTOR_SIZE & 0x000000ff);
1230 page=((struct scsi_mode_sense_6 *)ccb->csio.cdb_io.cdb_bytes)->page;
1231 switch (page) {
1232 default:
1233 GDT_DPRINTF(GDT_D_MISC, ("MODE_SENSE_6: page 0x%x\n", page));
1234 break;
1235 }
1236 break;
1237 }
1238 case READ_CAPACITY:
1239 {
1240 struct scsi_read_capacity_data *rcd;
1241
1242 rcd = (struct scsi_read_capacity_data *)ccb->csio.data_ptr;
1243 bzero(rcd, sizeof(struct scsi_read_capacity_data));
1244 scsi_ulto4b(gdt->sc_hdr[t].hd_size - 1, rcd->addr);
1245 scsi_ulto4b(GDT_SECTOR_SIZE, rcd->length);
1246 break;
1247 }
1248 default:
1249 GDT_DPRINTF(GDT_D_MISC, ("gdt_internal_cache_cmd(%d) unknown\n",
1250 ccb->csio.cdb_io.cdb_bytes[0]));
1251 break;
1252 }
1253 ccb->ccb_h.status = CAM_REQ_CMP;
1254 --gdt_stat.io_count_act;
1255 xpt_done(ccb);
1256}
1257
1258static void
1259gdtmapmem(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1260{
1261 bus_addr_t *busaddrp;
1262
1263 busaddrp = (bus_addr_t *)arg;
1264 *busaddrp = dm_segs->ds_addr;
1265}
1266
1267static void
1268gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1269{
1270 struct gdt_ccb *gccb;
1271 union ccb *ccb;
1272 struct gdt_softc *gdt;
1273 int i, lock;
1274
1275 lock = splcam();
1276
1277 gccb = (struct gdt_ccb *)arg;
1278 ccb = gccb->gc_ccb;
1279 gdt = cam_sim_softc((struct cam_sim *)ccb->ccb_h.ccb_sim_ptr);
1280
1281 GDT_DPRINTF(GDT_D_CMD, ("gdtexecuteccb(%p, %p, %p, %d, %d)\n",
1282 gdt, gccb, dm_segs, nseg, error));
1283 gdt_stat.sg_count_act = nseg;
1284 if (nseg > gdt_stat.sg_count_max)
1285 gdt_stat.sg_count_max = nseg;
1286
1287 /* Copy the segments into our SG list */
1288 if (gccb->gc_service == GDT_CACHESERVICE) {
1289 for (i = 0; i < nseg; ++i) {
1290 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1291 i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1292 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1293 i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1294 dm_segs++;
1295 }
1296 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1297 nseg);
1298 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1299 0xffffffffUL);
1300
1301 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
1302 nseg * GDT_SG_SZ, sizeof(u_int32_t));
1303 } else {
1304 for (i = 0; i < nseg; ++i) {
1305 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1306 i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1307 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1308 i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1309 dm_segs++;
1310 }
1311 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1312 nseg);
1313 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1314 0xffffffffUL);
1315
1316 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
1317 nseg * GDT_SG_SZ, sizeof(u_int32_t));
1318 }
1319
1320 if (nseg != 0) {
1321 bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap,
1322 (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
1323 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1324 }
1325
1326 /* We must NOT abort the command here if CAM_REQ_INPROG is not set,
1327 * because command semaphore is already set!
1328 */
1329
1330 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1331 /* timeout handling */
1332 ccb->ccb_h.timeout_ch =
1333 timeout(iir_timeout, (caddr_t)gccb,
1334 (ccb->ccb_h.timeout * hz) / 1000);
1335
1336 gdt->sc_copy_cmd(gdt, gccb);
1337 splx(lock);
1338}
1339
1340
1341static void
1342iir_action( struct cam_sim *sim, union ccb *ccb )
1343{
1344 struct gdt_softc *gdt;
1345 int lock, bus, target, lun;
1346
1347 gdt = (struct gdt_softc *)cam_sim_softc( sim );
1348 ccb->ccb_h.ccb_sim_ptr = sim;
1349 bus = cam_sim_bus(sim);
1350 target = ccb->ccb_h.target_id;
1351 lun = ccb->ccb_h.target_lun;
1352 GDT_DPRINTF(GDT_D_CMD,
1353 ("iir_action(%p) func 0x%x cmd 0x%x bus %d target %d lun %d\n",
1354 gdt, ccb->ccb_h.func_code, ccb->csio.cdb_io.cdb_bytes[0],
1355 bus, target, lun));
1356 ++gdt_stat.io_count_act;
1357 if (gdt_stat.io_count_act > gdt_stat.io_count_max)
1358 gdt_stat.io_count_max = gdt_stat.io_count_act;
1359
1360 switch (ccb->ccb_h.func_code) {
1361 case XPT_SCSI_IO:
1362 lock = splcam();
1363 TAILQ_INSERT_TAIL(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1364 ++gdt_stat.req_queue_act;
1365 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1366 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1367 splx(lock);
1368 gdt_next(gdt);
1369 break;
1370 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
1371 case XPT_ABORT: /* Abort the specified CCB */
1372 /* XXX Implement */
1373 ccb->ccb_h.status = CAM_REQ_INVALID;
1374 --gdt_stat.io_count_act;
1375 xpt_done(ccb);
1376 break;
1377 case XPT_SET_TRAN_SETTINGS:
1378 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1379 --gdt_stat.io_count_act;
1380 xpt_done(ccb);
1381 break;
1382 case XPT_GET_TRAN_SETTINGS:
1383 /* Get default/user set transfer settings for the target */
1384 {
1385 struct ccb_trans_settings *cts;
1386 u_int target_mask;
1387
1388 cts = &ccb->cts;
1389 target_mask = 0x01 << target;
1390 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
1391 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
1392 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1393 cts->sync_period = 25; /* 10MHz */
1394 if (cts->sync_period != 0)
1395 cts->sync_offset = 15;
1396
1397 cts->valid = CCB_TRANS_SYNC_RATE_VALID
1398 | CCB_TRANS_SYNC_OFFSET_VALID
1399 | CCB_TRANS_BUS_WIDTH_VALID
1400 | CCB_TRANS_DISC_VALID
1401 | CCB_TRANS_TQ_VALID;
1402 ccb->ccb_h.status = CAM_REQ_CMP;
1403 } else {
1404 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1405 }
1406 --gdt_stat.io_count_act;
1407 xpt_done(ccb);
1408 break;
1409 }
1410 case XPT_CALC_GEOMETRY:
1411 {
1412 struct ccb_calc_geometry *ccg;
1413 u_int32_t secs_per_cylinder;
1414
1415 ccg = &ccb->ccg;
1416 ccg->heads = gdt->sc_hdr[target].hd_heads;
1417 ccg->secs_per_track = gdt->sc_hdr[target].hd_secs;
1418 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1419 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1420 ccb->ccb_h.status = CAM_REQ_CMP;
1421 --gdt_stat.io_count_act;
1422 xpt_done(ccb);
1423 break;
1424 }
1425 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
1426 {
1427 /* XXX Implement */
1428 ccb->ccb_h.status = CAM_REQ_CMP;
1429 --gdt_stat.io_count_act;
1430 xpt_done(ccb);
1431 break;
1432 }
1433 case XPT_TERM_IO: /* Terminate the I/O process */
1434 /* XXX Implement */
1435 ccb->ccb_h.status = CAM_REQ_INVALID;
1436 --gdt_stat.io_count_act;
1437 xpt_done(ccb);
1438 break;
1439 case XPT_PATH_INQ: /* Path routing inquiry */
1440 {
1441 struct ccb_pathinq *cpi = &ccb->cpi;
1442
1443 cpi->version_num = 1;
1444 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
1445 cpi->hba_inquiry |= PI_WIDE_16;
1446 cpi->target_sprt = 1;
1447 cpi->hba_misc = 0;
1448 cpi->hba_eng_cnt = 0;
1449 if (bus == gdt->sc_virt_bus)
1450 cpi->max_target = GDT_MAX_HDRIVES - 1;
1451 else if (gdt->sc_class & GDT_FC)
1452 cpi->max_target = GDT_MAXID_FC - 1;
1453 else
1454 cpi->max_target = GDT_MAXID - 1;
1455 cpi->max_lun = 7;
1456 cpi->unit_number = cam_sim_unit(sim);
1457 cpi->bus_id = bus;
1458 cpi->initiator_id =
1459 (bus == gdt->sc_virt_bus ? 127 : gdt->sc_bus_id[bus]);
1460 cpi->base_transfer_speed = 3300;
1461 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1461 strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
1462 if (gdt->sc_vendor == INTEL_VENDOR_ID)
1463 strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
1464 else
1465 strncpy(cpi->hba_vid, "ICP vortex ", HBA_IDLEN);
1462 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1463 cpi->ccb_h.status = CAM_REQ_CMP;
1464 --gdt_stat.io_count_act;
1465 xpt_done(ccb);
1466 break;
1467 }
1468 default:
1469 GDT_DPRINTF(GDT_D_INVALID, ("gdt_next(%p) cmd 0x%x invalid\n",
1470 gdt, ccb->ccb_h.func_code));
1471 ccb->ccb_h.status = CAM_REQ_INVALID;
1472 --gdt_stat.io_count_act;
1473 xpt_done(ccb);
1474 break;
1475 }
1476}
1477
1478static void
1479iir_poll( struct cam_sim *sim )
1480{
1481 struct gdt_softc *gdt;
1482
1483 gdt = (struct gdt_softc *)cam_sim_softc( sim );
1484 GDT_DPRINTF(GDT_D_CMD, ("iir_poll sim %p gdt %p\n", sim, gdt));
1485 iir_intr(gdt);
1486}
1487
1488static void
1489iir_timeout(void *arg)
1490{
1491 GDT_DPRINTF(GDT_D_TIMEOUT, ("iir_timeout(%p)\n", arg));
1492}
1493
1494static void
1495iir_watchdog(void *arg)
1496{
1497 struct gdt_softc *gdt;
1498
1499 gdt = (struct gdt_softc *)arg;
1500 GDT_DPRINTF(GDT_D_DEBUG, ("iir_watchdog(%p)\n", gdt));
1501
1502 {
1503 int ccbs = 0, ucmds = 0, frees = 0, pends = 0;
1504 struct gdt_ccb *p;
1505 struct ccb_hdr *h;
1506 struct gdt_ucmd *u;
1507
1508 for (h = TAILQ_FIRST(&gdt->sc_ccb_queue); h != NULL;
1509 h = TAILQ_NEXT(h, sim_links.tqe))
1510 ccbs++;
1511 for (u = TAILQ_FIRST(&gdt->sc_ucmd_queue); u != NULL;
1512 u = TAILQ_NEXT(u, links))
1513 ucmds++;
1514 for (p = SLIST_FIRST(&gdt->sc_free_gccb); p != NULL;
1515 p = SLIST_NEXT(p, sle))
1516 frees++;
1517 for (p = SLIST_FIRST(&gdt->sc_pending_gccb); p != NULL;
1518 p = SLIST_NEXT(p, sle))
1519 pends++;
1520
1521 GDT_DPRINTF(GDT_D_TIMEOUT, ("ccbs %d ucmds %d frees %d pends %d\n",
1522 ccbs, ucmds, frees, pends));
1523 }
1524
1525 timeout(iir_watchdog, (caddr_t)gdt, hz * 15);
1526}
1527
1528static void
1529iir_shutdown( void *arg, int howto )
1530{
1531 struct gdt_softc *gdt;
1532 struct gdt_ccb *gccb;
1533 gdt_ucmd_t *ucmd;
1534 int lock, i;
1535
1536 gdt = (struct gdt_softc *)arg;
1537 GDT_DPRINTF(GDT_D_CMD, ("iir_shutdown(%p, %d)\n", gdt, howto));
1538
1539 printf("iir%d: Flushing all Host Drives. Please wait ... ",
1540 gdt->sc_hanum);
1541
1542 /* allocate ucmd buffer */
1543 ucmd = malloc(sizeof(gdt_ucmd_t), M_DEVBUF, M_NOWAIT);
1544 if (ucmd == NULL) {
1545 printf("iir%d: iir_shutdown(): Cannot allocate resource\n",
1546 gdt->sc_hanum);
1547 return;
1548 }
1549 bzero(ucmd, sizeof(gdt_ucmd_t));
1550
1551 /* wait for pending IOs */
1552 lock = splcam();
1553 gdt->sc_state = GDT_SHUTDOWN;
1554 splx(lock);
1555 if ((gccb = SLIST_FIRST(&gdt->sc_pending_gccb)) != NULL)
1556 (void) tsleep((void *)gccb, PCATCH | PRIBIO, "iirshw", 100 * hz);
1557
1558 /* flush */
1559 for (i = 0; i < GDT_MAX_HDRIVES; ++i) {
1560 if (gdt->sc_hdr[i].hd_present) {
1561 ucmd->service = GDT_CACHESERVICE;
1562 ucmd->OpCode = GDT_FLUSH;
1563 ucmd->u.cache.DeviceNo = i;
1564 lock = splcam();
1565 TAILQ_INSERT_TAIL(&gdt->sc_ucmd_queue, ucmd, links);
1566 ucmd->complete_flag = FALSE;
1567 splx(lock);
1568 gdt_next(gdt);
1569 if (!ucmd->complete_flag)
1570 (void) tsleep((void *)ucmd, PCATCH|PRIBIO, "iirshw", 10*hz);
1571 }
1572 }
1573
1574 free(ucmd, M_DEVBUF);
1575 printf("Done.\n");
1576}
1577
1578void
1579iir_intr(void *arg)
1580{
1581 struct gdt_softc *gdt = arg;
1582 struct gdt_intr_ctx ctx;
1583 int lock = 0;
1584 struct gdt_ccb *gccb;
1585 gdt_ucmd_t *ucmd;
1586 u_int32_t cnt;
1587
1588 GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p)\n", gdt));
1589
1590 /* If polling and we were not called from gdt_wait, just return */
1591 if ((gdt->sc_state & GDT_POLLING) &&
1592 !(gdt->sc_state & GDT_POLL_WAIT))
1593 return;
1594
1595 if (!(gdt->sc_state & GDT_POLLING))
1596 lock = splcam();
1597 gdt_wait_index = 0;
1598
1599 ctx.istatus = gdt->sc_get_status(gdt);
1600 if (!ctx.istatus) {
1601 if (!(gdt->sc_state & GDT_POLLING))
1602 splx(lock);
1603 gdt->sc_status = GDT_S_NO_STATUS;
1604 return;
1605 }
1606
1607 gdt->sc_intr(gdt, &ctx);
1608
1609 gdt->sc_status = ctx.cmd_status;
1610 gdt->sc_service = ctx.service;
1611 gdt->sc_info = ctx.info;
1612 gdt->sc_info2 = ctx.info2;
1613
1614 if (gdt->sc_state & GDT_POLL_WAIT) {
1615 gdt_wait_gdt = gdt;
1616 gdt_wait_index = ctx.istatus;
1617 }
1618
1619 if (ctx.istatus == GDT_ASYNCINDEX) {
1620 gdt_async_event(gdt, ctx.service);
1621 if (!(gdt->sc_state & GDT_POLLING))
1622 splx(lock);
1623 return;
1624 }
1625 if (ctx.istatus == GDT_SPEZINDEX) {
1626 GDT_DPRINTF(GDT_D_INVALID,
1627 ("iir%d: Service unknown or not initialized!\n",
1628 gdt->sc_hanum));
1629 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1630 gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1631 gdt_store_event(GDT_ES_DRIVER, 4, &gdt->sc_dvr);
1632 if (!(gdt->sc_state & GDT_POLLING))
1633 splx(lock);
1634 return;
1635 }
1636
1637 gccb = &gdt->sc_gccbs[ctx.istatus - 2];
1638 ctx.service = gccb->gc_service;
1639
1640 switch (gccb->gc_flags) {
1641 case GDT_GCF_UNUSED:
1642 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: Index (%d) to unused command!\n",
1643 gdt->sc_hanum, ctx.istatus));
1644 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1645 gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1646 gdt->sc_dvr.eu.driver.index = ctx.istatus;
1647 gdt_store_event(GDT_ES_DRIVER, 1, &gdt->sc_dvr);
1648 gdt_free_ccb(gdt, gccb);
1649 /* fallthrough */
1650
1651 case GDT_GCF_INTERNAL:
1652 if (!(gdt->sc_state & GDT_POLLING))
1653 splx(lock);
1654 break;
1655
1656 case GDT_GCF_IOCTL:
1657 ucmd = gccb->gc_ucmd;
1658 if (gdt->sc_status == GDT_S_BSY) {
1659 GDT_DPRINTF(GDT_D_DEBUG, ("iir_intr(%p) ioctl: gccb %p busy\n",
1660 gdt, gccb));
1661 TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
1662 if (!(gdt->sc_state & GDT_POLLING))
1663 splx(lock);
1664 } else {
1665 ucmd->status = gdt->sc_status;
1666 ucmd->info = gdt->sc_info;
1667 ucmd->complete_flag = TRUE;
1668 if (ucmd->service == GDT_CACHESERVICE) {
1669 if (ucmd->OpCode == GDT_IOCTL) {
1670 cnt = ucmd->u.ioctl.param_size;
1671 if (cnt != 0)
1672 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1673 } else {
1674 cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
1675 if (cnt != 0)
1676 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1677 }
1678 } else {
1679 cnt = ucmd->u.raw.sdlen;
1680 if (cnt != 0)
1681 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1682 if (ucmd->u.raw.sense_len != 0)
1683 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1684 }
1685 gdt_free_ccb(gdt, gccb);
1686 if (!(gdt->sc_state & GDT_POLLING))
1687 splx(lock);
1688 /* wakeup */
1689 wakeup(ucmd);
1690 }
1691 gdt_next(gdt);
1692 break;
1693
1694 default:
1695 gdt_free_ccb(gdt, gccb);
1696 gdt_sync_event(gdt, ctx.service, ctx.istatus, gccb);
1697 if (!(gdt->sc_state & GDT_POLLING))
1698 splx(lock);
1699 gdt_next(gdt);
1700 break;
1701 }
1702}
1703
1704int
1705gdt_async_event(struct gdt_softc *gdt, int service)
1706{
1707 struct gdt_ccb *gccb;
1708
1709 GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d)\n", gdt, service));
1710
1711 if (service == GDT_SCREENSERVICE) {
1712 if (gdt->sc_status == GDT_MSG_REQUEST) {
1713 while (gdt->sc_test_busy(gdt))
1714 DELAY(1);
1715 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1716 gccb = gdt_get_ccb(gdt);
1717 if (gccb == NULL) {
1718 printf("iir%d: No free command index found\n",
1719 gdt->sc_hanum);
1720 return (1);
1721 }
1722 gccb->gc_service = service;
1723 gccb->gc_flags = GDT_GCF_SCREEN;
1724 gdt->sc_set_sema0(gdt);
1725 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1726 gccb->gc_cmd_index);
1727 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_READ);
1728 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1729 GDT_MSG_INV_HANDLE);
1730 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1731 gdt_ccb_vtop(gdt, gccb) +
1732 offsetof(struct gdt_ccb, gc_scratch[0]));
1733 gdt->sc_cmd_off = 0;
1734 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1735 sizeof(u_int32_t));
1736 gdt->sc_cmd_cnt = 0;
1737 gdt->sc_copy_cmd(gdt, gccb);
1738 printf("iir%d: [PCI %d/%d] ",
1739 gdt->sc_hanum,gdt->sc_bus,gdt->sc_slot);
1740 gdt->sc_release_event(gdt);
1741 }
1742
1743 } else {
1744 if ((gdt->sc_fw_vers & 0xff) >= 0x1a) {
1745 gdt->sc_dvr.size = 0;
1746 gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
1747 gdt->sc_dvr.eu.async.status = gdt->sc_status;
1748 /* severity and event_string already set! */
1749 } else {
1750 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.async);
1751 gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
1752 gdt->sc_dvr.eu.async.service = service;
1753 gdt->sc_dvr.eu.async.status = gdt->sc_status;
1754 gdt->sc_dvr.eu.async.info = gdt->sc_info;
1755 *(u_int32_t *)gdt->sc_dvr.eu.async.scsi_coord = gdt->sc_info2;
1756 }
1757 gdt_store_event(GDT_ES_ASYNC, service, &gdt->sc_dvr);
1758 printf("iir%d: %s\n", gdt->sc_hanum, gdt->sc_dvr.event_string);
1759 }
1760
1761 return (0);
1762}
1763
1764int
1765gdt_sync_event(struct gdt_softc *gdt, int service,
1766 u_int8_t index, struct gdt_ccb *gccb)
1767{
1768 union ccb *ccb;
1769
1770 GDT_DPRINTF(GDT_D_INTR,
1771 ("gdt_sync_event(%p, %d, %d, %p)\n", gdt,service,index,gccb));
1772
1773 ccb = gccb->gc_ccb;
1774
1775 if (service == GDT_SCREENSERVICE) {
1776 u_int32_t msg_len;
1777
1778 msg_len = gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_LEN);
1779 if (msg_len)
1780 if (!(gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1781 gccb->gc_scratch[GDT_SCR_MSG_EXT])) {
1782 gccb->gc_scratch[GDT_SCR_MSG_TEXT + msg_len] = '\0';
1783 printf("%s",&gccb->gc_scratch[GDT_SCR_MSG_TEXT]);
1784 }
1785
1786 if (gccb->gc_scratch[GDT_SCR_MSG_EXT] &&
1787 !gccb->gc_scratch[GDT_SCR_MSG_ANSWER]) {
1788 while (gdt->sc_test_busy(gdt))
1789 DELAY(1);
1790 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1791 gccb = gdt_get_ccb(gdt);
1792 if (gccb == NULL) {
1793 printf("iir%d: No free command index found\n",
1794 gdt->sc_hanum);
1795 return (1);
1796 }
1797 gccb->gc_service = service;
1798 gccb->gc_flags = GDT_GCF_SCREEN;
1799 gdt->sc_set_sema0(gdt);
1800 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1801 gccb->gc_cmd_index);
1802 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_READ);
1803 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1804 gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1805 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1806 gdt_ccb_vtop(gdt, gccb) +
1807 offsetof(struct gdt_ccb, gc_scratch[0]));
1808 gdt->sc_cmd_off = 0;
1809 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1810 sizeof(u_int32_t));
1811 gdt->sc_cmd_cnt = 0;
1812 gdt->sc_copy_cmd(gdt, gccb);
1813 gdt->sc_release_event(gdt);
1814 return (0);
1815 }
1816
1817 if (gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1818 gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN)) {
1819 /* default answers (getchar() not possible) */
1820 if (gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) == 1) {
1821 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN, 0);
1822 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 1);
1823 gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 0;
1824 } else {
1825 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN,
1826 gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) - 2);
1827 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 2);
1828 gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 1;
1829 gccb->gc_scratch[GDT_SCR_MSG_TEXT + 1] = 0;
1830 }
1831 gccb->gc_scratch[GDT_SCR_MSG_EXT] = 0;
1832 gccb->gc_scratch[GDT_SCR_MSG_ANSWER] = 0;
1833 while (gdt->sc_test_busy(gdt))
1834 DELAY(1);
1835 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1836 gccb = gdt_get_ccb(gdt);
1837 if (gccb == NULL) {
1838 printf("iir%d: No free command index found\n",
1839 gdt->sc_hanum);
1840 return (1);
1841 }
1842 gccb->gc_service = service;
1843 gccb->gc_flags = GDT_GCF_SCREEN;
1844 gdt->sc_set_sema0(gdt);
1845 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1846 gccb->gc_cmd_index);
1847 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
1848 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1849 gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1850 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1851 gdt_ccb_vtop(gdt, gccb) +
1852 offsetof(struct gdt_ccb, gc_scratch[0]));
1853 gdt->sc_cmd_off = 0;
1854 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1855 sizeof(u_int32_t));
1856 gdt->sc_cmd_cnt = 0;
1857 gdt->sc_copy_cmd(gdt, gccb);
1858 gdt->sc_release_event(gdt);
1859 return (0);
1860 }
1861 printf("\n");
1862 return (0);
1863 } else {
1864 untimeout(iir_timeout, gccb, ccb->ccb_h.timeout_ch);
1865 if (gdt->sc_status == GDT_S_BSY) {
1866 GDT_DPRINTF(GDT_D_DEBUG, ("gdt_sync_event(%p) gccb %p busy\n",
1867 gdt, gccb));
1868 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1869 ++gdt_stat.req_queue_act;
1870 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1871 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1872 return (2);
1873 }
1874
1875 bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap,
1876 (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
1877 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1878
1879 ccb->csio.resid = 0;
1880 if (gdt->sc_status == GDT_S_OK) {
1881 ccb->ccb_h.status = CAM_REQ_CMP;
1882 } else {
1883 /* error */
1884 if (gccb->gc_service == GDT_CACHESERVICE) {
1885 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
1886 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1887 bzero(&ccb->csio.sense_data, ccb->csio.sense_len);
1888 ccb->csio.sense_data.error_code =
1889 SSD_CURRENT_ERROR | SSD_ERRCODE_VALID;
1890 ccb->csio.sense_data.flags = SSD_KEY_NOT_READY;
1891
1892 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.sync);
1893 gdt->sc_dvr.eu.sync.ionode = gdt->sc_hanum;
1894 gdt->sc_dvr.eu.sync.service = service;
1895 gdt->sc_dvr.eu.sync.status = gdt->sc_status;
1896 gdt->sc_dvr.eu.sync.info = gdt->sc_info;
1897 gdt->sc_dvr.eu.sync.hostdrive = ccb->ccb_h.target_id;
1898 if (gdt->sc_status >= 0x8000)
1899 gdt_store_event(GDT_ES_SYNC, 0, &gdt->sc_dvr);
1900 else
1901 gdt_store_event(GDT_ES_SYNC, service, &gdt->sc_dvr);
1902 } else {
1903 /* raw service */
1904 if (gdt->sc_status != GDT_S_RAW_SCSI || gdt->sc_info >= 0x100) {
1905 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1906 } else {
1907 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR|CAM_AUTOSNS_VALID;
1908 ccb->csio.scsi_status = gdt->sc_info;
1909 bcopy(gccb->gc_scratch, &ccb->csio.sense_data,
1910 ccb->csio.sense_len);
1911 }
1912 }
1913 }
1914 --gdt_stat.io_count_act;
1915 xpt_done(ccb);
1916 }
1917 return (0);
1918}
1919
1920/* Controller event handling functions */
1921gdt_evt_str *gdt_store_event(u_int16_t source, u_int16_t idx,
1922 gdt_evt_data *evt)
1923{
1924 gdt_evt_str *e;
1925 struct timeval tv;
1926
1927 GDT_DPRINTF(GDT_D_MISC, ("gdt_store_event(%d, %d)\n", source, idx));
1928 if (source == 0) /* no source -> no event */
1929 return 0;
1930
1931 if (ebuffer[elastidx].event_source == source &&
1932 ebuffer[elastidx].event_idx == idx &&
1933 ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
1934 !memcmp((char *)&ebuffer[elastidx].event_data.eu,
1935 (char *)&evt->eu, evt->size)) ||
1936 (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
1937 !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
1938 (char *)&evt->event_string)))) {
1939 e = &ebuffer[elastidx];
1940 getmicrotime(&tv);
1941 e->last_stamp = tv.tv_sec;
1942 ++e->same_count;
1943 } else {
1944 if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
1945 ++elastidx;
1946 if (elastidx == GDT_MAX_EVENTS)
1947 elastidx = 0;
1948 if (elastidx == eoldidx) { /* reached mark ? */
1949 ++eoldidx;
1950 if (eoldidx == GDT_MAX_EVENTS)
1951 eoldidx = 0;
1952 }
1953 }
1954 e = &ebuffer[elastidx];
1955 e->event_source = source;
1956 e->event_idx = idx;
1957 getmicrotime(&tv);
1958 e->first_stamp = e->last_stamp = tv.tv_sec;
1959 e->same_count = 1;
1960 e->event_data = *evt;
1961 e->application = 0;
1962 }
1963 return e;
1964}
1965
1966int gdt_read_event(int handle, gdt_evt_str *estr)
1967{
1968 gdt_evt_str *e;
1969 int eindex, lock;
1970
1971 GDT_DPRINTF(GDT_D_MISC, ("gdt_read_event(%d)\n", handle));
1972 lock = splcam();
1973 if (handle == -1)
1974 eindex = eoldidx;
1975 else
1976 eindex = handle;
1977 estr->event_source = 0;
1978
1979 if (eindex >= GDT_MAX_EVENTS) {
1980 splx(lock);
1981 return eindex;
1982 }
1983 e = &ebuffer[eindex];
1984 if (e->event_source != 0) {
1985 if (eindex != elastidx) {
1986 if (++eindex == GDT_MAX_EVENTS)
1987 eindex = 0;
1988 } else {
1989 eindex = -1;
1990 }
1991 memcpy(estr, e, sizeof(gdt_evt_str));
1992 }
1993 splx(lock);
1994 return eindex;
1995}
1996
1997void gdt_readapp_event(u_int8_t application, gdt_evt_str *estr)
1998{
1999 gdt_evt_str *e;
2000 int found = FALSE;
2001 int eindex, lock;
2002
2003 GDT_DPRINTF(GDT_D_MISC, ("gdt_readapp_event(%d)\n", application));
2004 lock = splcam();
2005 eindex = eoldidx;
2006 for (;;) {
2007 e = &ebuffer[eindex];
2008 if (e->event_source == 0)
2009 break;
2010 if ((e->application & application) == 0) {
2011 e->application |= application;
2012 found = TRUE;
2013 break;
2014 }
2015 if (eindex == elastidx)
2016 break;
2017 if (++eindex == GDT_MAX_EVENTS)
2018 eindex = 0;
2019 }
2020 if (found)
2021 memcpy(estr, e, sizeof(gdt_evt_str));
2022 else
2023 estr->event_source = 0;
2024 splx(lock);
2025}
2026
2027void gdt_clear_events()
2028{
2029 GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events\n"));
2030
2031 eoldidx = elastidx = 0;
2032 ebuffer[0].event_source = 0;
2033}
1466 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1467 cpi->ccb_h.status = CAM_REQ_CMP;
1468 --gdt_stat.io_count_act;
1469 xpt_done(ccb);
1470 break;
1471 }
1472 default:
1473 GDT_DPRINTF(GDT_D_INVALID, ("gdt_next(%p) cmd 0x%x invalid\n",
1474 gdt, ccb->ccb_h.func_code));
1475 ccb->ccb_h.status = CAM_REQ_INVALID;
1476 --gdt_stat.io_count_act;
1477 xpt_done(ccb);
1478 break;
1479 }
1480}
1481
1482static void
1483iir_poll( struct cam_sim *sim )
1484{
1485 struct gdt_softc *gdt;
1486
1487 gdt = (struct gdt_softc *)cam_sim_softc( sim );
1488 GDT_DPRINTF(GDT_D_CMD, ("iir_poll sim %p gdt %p\n", sim, gdt));
1489 iir_intr(gdt);
1490}
1491
1492static void
1493iir_timeout(void *arg)
1494{
1495 GDT_DPRINTF(GDT_D_TIMEOUT, ("iir_timeout(%p)\n", arg));
1496}
1497
1498static void
1499iir_watchdog(void *arg)
1500{
1501 struct gdt_softc *gdt;
1502
1503 gdt = (struct gdt_softc *)arg;
1504 GDT_DPRINTF(GDT_D_DEBUG, ("iir_watchdog(%p)\n", gdt));
1505
1506 {
1507 int ccbs = 0, ucmds = 0, frees = 0, pends = 0;
1508 struct gdt_ccb *p;
1509 struct ccb_hdr *h;
1510 struct gdt_ucmd *u;
1511
1512 for (h = TAILQ_FIRST(&gdt->sc_ccb_queue); h != NULL;
1513 h = TAILQ_NEXT(h, sim_links.tqe))
1514 ccbs++;
1515 for (u = TAILQ_FIRST(&gdt->sc_ucmd_queue); u != NULL;
1516 u = TAILQ_NEXT(u, links))
1517 ucmds++;
1518 for (p = SLIST_FIRST(&gdt->sc_free_gccb); p != NULL;
1519 p = SLIST_NEXT(p, sle))
1520 frees++;
1521 for (p = SLIST_FIRST(&gdt->sc_pending_gccb); p != NULL;
1522 p = SLIST_NEXT(p, sle))
1523 pends++;
1524
1525 GDT_DPRINTF(GDT_D_TIMEOUT, ("ccbs %d ucmds %d frees %d pends %d\n",
1526 ccbs, ucmds, frees, pends));
1527 }
1528
1529 timeout(iir_watchdog, (caddr_t)gdt, hz * 15);
1530}
1531
1532static void
1533iir_shutdown( void *arg, int howto )
1534{
1535 struct gdt_softc *gdt;
1536 struct gdt_ccb *gccb;
1537 gdt_ucmd_t *ucmd;
1538 int lock, i;
1539
1540 gdt = (struct gdt_softc *)arg;
1541 GDT_DPRINTF(GDT_D_CMD, ("iir_shutdown(%p, %d)\n", gdt, howto));
1542
1543 printf("iir%d: Flushing all Host Drives. Please wait ... ",
1544 gdt->sc_hanum);
1545
1546 /* allocate ucmd buffer */
1547 ucmd = malloc(sizeof(gdt_ucmd_t), M_DEVBUF, M_NOWAIT);
1548 if (ucmd == NULL) {
1549 printf("iir%d: iir_shutdown(): Cannot allocate resource\n",
1550 gdt->sc_hanum);
1551 return;
1552 }
1553 bzero(ucmd, sizeof(gdt_ucmd_t));
1554
1555 /* wait for pending IOs */
1556 lock = splcam();
1557 gdt->sc_state = GDT_SHUTDOWN;
1558 splx(lock);
1559 if ((gccb = SLIST_FIRST(&gdt->sc_pending_gccb)) != NULL)
1560 (void) tsleep((void *)gccb, PCATCH | PRIBIO, "iirshw", 100 * hz);
1561
1562 /* flush */
1563 for (i = 0; i < GDT_MAX_HDRIVES; ++i) {
1564 if (gdt->sc_hdr[i].hd_present) {
1565 ucmd->service = GDT_CACHESERVICE;
1566 ucmd->OpCode = GDT_FLUSH;
1567 ucmd->u.cache.DeviceNo = i;
1568 lock = splcam();
1569 TAILQ_INSERT_TAIL(&gdt->sc_ucmd_queue, ucmd, links);
1570 ucmd->complete_flag = FALSE;
1571 splx(lock);
1572 gdt_next(gdt);
1573 if (!ucmd->complete_flag)
1574 (void) tsleep((void *)ucmd, PCATCH|PRIBIO, "iirshw", 10*hz);
1575 }
1576 }
1577
1578 free(ucmd, M_DEVBUF);
1579 printf("Done.\n");
1580}
1581
1582void
1583iir_intr(void *arg)
1584{
1585 struct gdt_softc *gdt = arg;
1586 struct gdt_intr_ctx ctx;
1587 int lock = 0;
1588 struct gdt_ccb *gccb;
1589 gdt_ucmd_t *ucmd;
1590 u_int32_t cnt;
1591
1592 GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p)\n", gdt));
1593
1594 /* If polling and we were not called from gdt_wait, just return */
1595 if ((gdt->sc_state & GDT_POLLING) &&
1596 !(gdt->sc_state & GDT_POLL_WAIT))
1597 return;
1598
1599 if (!(gdt->sc_state & GDT_POLLING))
1600 lock = splcam();
1601 gdt_wait_index = 0;
1602
1603 ctx.istatus = gdt->sc_get_status(gdt);
1604 if (!ctx.istatus) {
1605 if (!(gdt->sc_state & GDT_POLLING))
1606 splx(lock);
1607 gdt->sc_status = GDT_S_NO_STATUS;
1608 return;
1609 }
1610
1611 gdt->sc_intr(gdt, &ctx);
1612
1613 gdt->sc_status = ctx.cmd_status;
1614 gdt->sc_service = ctx.service;
1615 gdt->sc_info = ctx.info;
1616 gdt->sc_info2 = ctx.info2;
1617
1618 if (gdt->sc_state & GDT_POLL_WAIT) {
1619 gdt_wait_gdt = gdt;
1620 gdt_wait_index = ctx.istatus;
1621 }
1622
1623 if (ctx.istatus == GDT_ASYNCINDEX) {
1624 gdt_async_event(gdt, ctx.service);
1625 if (!(gdt->sc_state & GDT_POLLING))
1626 splx(lock);
1627 return;
1628 }
1629 if (ctx.istatus == GDT_SPEZINDEX) {
1630 GDT_DPRINTF(GDT_D_INVALID,
1631 ("iir%d: Service unknown or not initialized!\n",
1632 gdt->sc_hanum));
1633 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1634 gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1635 gdt_store_event(GDT_ES_DRIVER, 4, &gdt->sc_dvr);
1636 if (!(gdt->sc_state & GDT_POLLING))
1637 splx(lock);
1638 return;
1639 }
1640
1641 gccb = &gdt->sc_gccbs[ctx.istatus - 2];
1642 ctx.service = gccb->gc_service;
1643
1644 switch (gccb->gc_flags) {
1645 case GDT_GCF_UNUSED:
1646 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: Index (%d) to unused command!\n",
1647 gdt->sc_hanum, ctx.istatus));
1648 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1649 gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1650 gdt->sc_dvr.eu.driver.index = ctx.istatus;
1651 gdt_store_event(GDT_ES_DRIVER, 1, &gdt->sc_dvr);
1652 gdt_free_ccb(gdt, gccb);
1653 /* fallthrough */
1654
1655 case GDT_GCF_INTERNAL:
1656 if (!(gdt->sc_state & GDT_POLLING))
1657 splx(lock);
1658 break;
1659
1660 case GDT_GCF_IOCTL:
1661 ucmd = gccb->gc_ucmd;
1662 if (gdt->sc_status == GDT_S_BSY) {
1663 GDT_DPRINTF(GDT_D_DEBUG, ("iir_intr(%p) ioctl: gccb %p busy\n",
1664 gdt, gccb));
1665 TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
1666 if (!(gdt->sc_state & GDT_POLLING))
1667 splx(lock);
1668 } else {
1669 ucmd->status = gdt->sc_status;
1670 ucmd->info = gdt->sc_info;
1671 ucmd->complete_flag = TRUE;
1672 if (ucmd->service == GDT_CACHESERVICE) {
1673 if (ucmd->OpCode == GDT_IOCTL) {
1674 cnt = ucmd->u.ioctl.param_size;
1675 if (cnt != 0)
1676 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1677 } else {
1678 cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
1679 if (cnt != 0)
1680 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1681 }
1682 } else {
1683 cnt = ucmd->u.raw.sdlen;
1684 if (cnt != 0)
1685 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1686 if (ucmd->u.raw.sense_len != 0)
1687 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1688 }
1689 gdt_free_ccb(gdt, gccb);
1690 if (!(gdt->sc_state & GDT_POLLING))
1691 splx(lock);
1692 /* wakeup */
1693 wakeup(ucmd);
1694 }
1695 gdt_next(gdt);
1696 break;
1697
1698 default:
1699 gdt_free_ccb(gdt, gccb);
1700 gdt_sync_event(gdt, ctx.service, ctx.istatus, gccb);
1701 if (!(gdt->sc_state & GDT_POLLING))
1702 splx(lock);
1703 gdt_next(gdt);
1704 break;
1705 }
1706}
1707
1708int
1709gdt_async_event(struct gdt_softc *gdt, int service)
1710{
1711 struct gdt_ccb *gccb;
1712
1713 GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d)\n", gdt, service));
1714
1715 if (service == GDT_SCREENSERVICE) {
1716 if (gdt->sc_status == GDT_MSG_REQUEST) {
1717 while (gdt->sc_test_busy(gdt))
1718 DELAY(1);
1719 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1720 gccb = gdt_get_ccb(gdt);
1721 if (gccb == NULL) {
1722 printf("iir%d: No free command index found\n",
1723 gdt->sc_hanum);
1724 return (1);
1725 }
1726 gccb->gc_service = service;
1727 gccb->gc_flags = GDT_GCF_SCREEN;
1728 gdt->sc_set_sema0(gdt);
1729 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1730 gccb->gc_cmd_index);
1731 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_READ);
1732 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1733 GDT_MSG_INV_HANDLE);
1734 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1735 gdt_ccb_vtop(gdt, gccb) +
1736 offsetof(struct gdt_ccb, gc_scratch[0]));
1737 gdt->sc_cmd_off = 0;
1738 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1739 sizeof(u_int32_t));
1740 gdt->sc_cmd_cnt = 0;
1741 gdt->sc_copy_cmd(gdt, gccb);
1742 printf("iir%d: [PCI %d/%d] ",
1743 gdt->sc_hanum,gdt->sc_bus,gdt->sc_slot);
1744 gdt->sc_release_event(gdt);
1745 }
1746
1747 } else {
1748 if ((gdt->sc_fw_vers & 0xff) >= 0x1a) {
1749 gdt->sc_dvr.size = 0;
1750 gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
1751 gdt->sc_dvr.eu.async.status = gdt->sc_status;
1752 /* severity and event_string already set! */
1753 } else {
1754 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.async);
1755 gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
1756 gdt->sc_dvr.eu.async.service = service;
1757 gdt->sc_dvr.eu.async.status = gdt->sc_status;
1758 gdt->sc_dvr.eu.async.info = gdt->sc_info;
1759 *(u_int32_t *)gdt->sc_dvr.eu.async.scsi_coord = gdt->sc_info2;
1760 }
1761 gdt_store_event(GDT_ES_ASYNC, service, &gdt->sc_dvr);
1762 printf("iir%d: %s\n", gdt->sc_hanum, gdt->sc_dvr.event_string);
1763 }
1764
1765 return (0);
1766}
1767
1768int
1769gdt_sync_event(struct gdt_softc *gdt, int service,
1770 u_int8_t index, struct gdt_ccb *gccb)
1771{
1772 union ccb *ccb;
1773
1774 GDT_DPRINTF(GDT_D_INTR,
1775 ("gdt_sync_event(%p, %d, %d, %p)\n", gdt,service,index,gccb));
1776
1777 ccb = gccb->gc_ccb;
1778
1779 if (service == GDT_SCREENSERVICE) {
1780 u_int32_t msg_len;
1781
1782 msg_len = gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_LEN);
1783 if (msg_len)
1784 if (!(gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1785 gccb->gc_scratch[GDT_SCR_MSG_EXT])) {
1786 gccb->gc_scratch[GDT_SCR_MSG_TEXT + msg_len] = '\0';
1787 printf("%s",&gccb->gc_scratch[GDT_SCR_MSG_TEXT]);
1788 }
1789
1790 if (gccb->gc_scratch[GDT_SCR_MSG_EXT] &&
1791 !gccb->gc_scratch[GDT_SCR_MSG_ANSWER]) {
1792 while (gdt->sc_test_busy(gdt))
1793 DELAY(1);
1794 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1795 gccb = gdt_get_ccb(gdt);
1796 if (gccb == NULL) {
1797 printf("iir%d: No free command index found\n",
1798 gdt->sc_hanum);
1799 return (1);
1800 }
1801 gccb->gc_service = service;
1802 gccb->gc_flags = GDT_GCF_SCREEN;
1803 gdt->sc_set_sema0(gdt);
1804 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1805 gccb->gc_cmd_index);
1806 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_READ);
1807 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1808 gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1809 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1810 gdt_ccb_vtop(gdt, gccb) +
1811 offsetof(struct gdt_ccb, gc_scratch[0]));
1812 gdt->sc_cmd_off = 0;
1813 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1814 sizeof(u_int32_t));
1815 gdt->sc_cmd_cnt = 0;
1816 gdt->sc_copy_cmd(gdt, gccb);
1817 gdt->sc_release_event(gdt);
1818 return (0);
1819 }
1820
1821 if (gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1822 gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN)) {
1823 /* default answers (getchar() not possible) */
1824 if (gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) == 1) {
1825 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN, 0);
1826 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 1);
1827 gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 0;
1828 } else {
1829 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN,
1830 gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) - 2);
1831 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 2);
1832 gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 1;
1833 gccb->gc_scratch[GDT_SCR_MSG_TEXT + 1] = 0;
1834 }
1835 gccb->gc_scratch[GDT_SCR_MSG_EXT] = 0;
1836 gccb->gc_scratch[GDT_SCR_MSG_ANSWER] = 0;
1837 while (gdt->sc_test_busy(gdt))
1838 DELAY(1);
1839 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1840 gccb = gdt_get_ccb(gdt);
1841 if (gccb == NULL) {
1842 printf("iir%d: No free command index found\n",
1843 gdt->sc_hanum);
1844 return (1);
1845 }
1846 gccb->gc_service = service;
1847 gccb->gc_flags = GDT_GCF_SCREEN;
1848 gdt->sc_set_sema0(gdt);
1849 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1850 gccb->gc_cmd_index);
1851 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
1852 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1853 gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1854 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1855 gdt_ccb_vtop(gdt, gccb) +
1856 offsetof(struct gdt_ccb, gc_scratch[0]));
1857 gdt->sc_cmd_off = 0;
1858 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1859 sizeof(u_int32_t));
1860 gdt->sc_cmd_cnt = 0;
1861 gdt->sc_copy_cmd(gdt, gccb);
1862 gdt->sc_release_event(gdt);
1863 return (0);
1864 }
1865 printf("\n");
1866 return (0);
1867 } else {
1868 untimeout(iir_timeout, gccb, ccb->ccb_h.timeout_ch);
1869 if (gdt->sc_status == GDT_S_BSY) {
1870 GDT_DPRINTF(GDT_D_DEBUG, ("gdt_sync_event(%p) gccb %p busy\n",
1871 gdt, gccb));
1872 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1873 ++gdt_stat.req_queue_act;
1874 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1875 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1876 return (2);
1877 }
1878
1879 bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap,
1880 (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
1881 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1882
1883 ccb->csio.resid = 0;
1884 if (gdt->sc_status == GDT_S_OK) {
1885 ccb->ccb_h.status = CAM_REQ_CMP;
1886 } else {
1887 /* error */
1888 if (gccb->gc_service == GDT_CACHESERVICE) {
1889 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
1890 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1891 bzero(&ccb->csio.sense_data, ccb->csio.sense_len);
1892 ccb->csio.sense_data.error_code =
1893 SSD_CURRENT_ERROR | SSD_ERRCODE_VALID;
1894 ccb->csio.sense_data.flags = SSD_KEY_NOT_READY;
1895
1896 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.sync);
1897 gdt->sc_dvr.eu.sync.ionode = gdt->sc_hanum;
1898 gdt->sc_dvr.eu.sync.service = service;
1899 gdt->sc_dvr.eu.sync.status = gdt->sc_status;
1900 gdt->sc_dvr.eu.sync.info = gdt->sc_info;
1901 gdt->sc_dvr.eu.sync.hostdrive = ccb->ccb_h.target_id;
1902 if (gdt->sc_status >= 0x8000)
1903 gdt_store_event(GDT_ES_SYNC, 0, &gdt->sc_dvr);
1904 else
1905 gdt_store_event(GDT_ES_SYNC, service, &gdt->sc_dvr);
1906 } else {
1907 /* raw service */
1908 if (gdt->sc_status != GDT_S_RAW_SCSI || gdt->sc_info >= 0x100) {
1909 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1910 } else {
1911 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR|CAM_AUTOSNS_VALID;
1912 ccb->csio.scsi_status = gdt->sc_info;
1913 bcopy(gccb->gc_scratch, &ccb->csio.sense_data,
1914 ccb->csio.sense_len);
1915 }
1916 }
1917 }
1918 --gdt_stat.io_count_act;
1919 xpt_done(ccb);
1920 }
1921 return (0);
1922}
1923
1924/* Controller event handling functions */
1925gdt_evt_str *gdt_store_event(u_int16_t source, u_int16_t idx,
1926 gdt_evt_data *evt)
1927{
1928 gdt_evt_str *e;
1929 struct timeval tv;
1930
1931 GDT_DPRINTF(GDT_D_MISC, ("gdt_store_event(%d, %d)\n", source, idx));
1932 if (source == 0) /* no source -> no event */
1933 return 0;
1934
1935 if (ebuffer[elastidx].event_source == source &&
1936 ebuffer[elastidx].event_idx == idx &&
1937 ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
1938 !memcmp((char *)&ebuffer[elastidx].event_data.eu,
1939 (char *)&evt->eu, evt->size)) ||
1940 (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
1941 !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
1942 (char *)&evt->event_string)))) {
1943 e = &ebuffer[elastidx];
1944 getmicrotime(&tv);
1945 e->last_stamp = tv.tv_sec;
1946 ++e->same_count;
1947 } else {
1948 if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
1949 ++elastidx;
1950 if (elastidx == GDT_MAX_EVENTS)
1951 elastidx = 0;
1952 if (elastidx == eoldidx) { /* reached mark ? */
1953 ++eoldidx;
1954 if (eoldidx == GDT_MAX_EVENTS)
1955 eoldidx = 0;
1956 }
1957 }
1958 e = &ebuffer[elastidx];
1959 e->event_source = source;
1960 e->event_idx = idx;
1961 getmicrotime(&tv);
1962 e->first_stamp = e->last_stamp = tv.tv_sec;
1963 e->same_count = 1;
1964 e->event_data = *evt;
1965 e->application = 0;
1966 }
1967 return e;
1968}
1969
1970int gdt_read_event(int handle, gdt_evt_str *estr)
1971{
1972 gdt_evt_str *e;
1973 int eindex, lock;
1974
1975 GDT_DPRINTF(GDT_D_MISC, ("gdt_read_event(%d)\n", handle));
1976 lock = splcam();
1977 if (handle == -1)
1978 eindex = eoldidx;
1979 else
1980 eindex = handle;
1981 estr->event_source = 0;
1982
1983 if (eindex >= GDT_MAX_EVENTS) {
1984 splx(lock);
1985 return eindex;
1986 }
1987 e = &ebuffer[eindex];
1988 if (e->event_source != 0) {
1989 if (eindex != elastidx) {
1990 if (++eindex == GDT_MAX_EVENTS)
1991 eindex = 0;
1992 } else {
1993 eindex = -1;
1994 }
1995 memcpy(estr, e, sizeof(gdt_evt_str));
1996 }
1997 splx(lock);
1998 return eindex;
1999}
2000
2001void gdt_readapp_event(u_int8_t application, gdt_evt_str *estr)
2002{
2003 gdt_evt_str *e;
2004 int found = FALSE;
2005 int eindex, lock;
2006
2007 GDT_DPRINTF(GDT_D_MISC, ("gdt_readapp_event(%d)\n", application));
2008 lock = splcam();
2009 eindex = eoldidx;
2010 for (;;) {
2011 e = &ebuffer[eindex];
2012 if (e->event_source == 0)
2013 break;
2014 if ((e->application & application) == 0) {
2015 e->application |= application;
2016 found = TRUE;
2017 break;
2018 }
2019 if (eindex == elastidx)
2020 break;
2021 if (++eindex == GDT_MAX_EVENTS)
2022 eindex = 0;
2023 }
2024 if (found)
2025 memcpy(estr, e, sizeof(gdt_evt_str));
2026 else
2027 estr->event_source = 0;
2028 splx(lock);
2029}
2030
2031void gdt_clear_events()
2032{
2033 GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events\n"));
2034
2035 eoldidx = elastidx = 0;
2036 ebuffer[0].event_source = 0;
2037}