icp.c revision 1.17
1/*	$NetBSD: icp.c,v 1.17 2005/12/11 12:21:27 christos Exp $	*/
2
3/*-
4 * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe of Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *        This product includes software developed by the NetBSD
21 *        Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 *    contributors may be used to endorse or promote products derived
24 *    from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39/*
40 * Copyright (c) 1999, 2000 Niklas Hallqvist.  All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 *    notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 *    notice, this list of conditions and the following disclaimer in the
49 *    documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 *    must display the following acknowledgement:
52 *	This product includes software developed by Niklas Hallqvist.
53 * 4. The name of the author may not be used to endorse or promote products
54 *    derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *
67 * from OpenBSD: gdt_common.c,v 1.12 2001/07/04 06:43:18 niklas Exp
68 */
69
70/*
71 * This driver would not have written if it was not for the hardware donations
72 * from both ICP-Vortex and �ko.neT.  I want to thank them for their support.
73 *
74 * Re-worked for NetBSD by Andrew Doran.  Test hardware kindly supplied by
75 * Intel.
76 *
77 * Support for the ICP-Vortex management tools added by
78 * Jason R. Thorpe of Wasabi Systems, Inc., based on code
79 * provided by Achim Leubner <achim.leubner@intel.com>.
80 *
81 * Additional support for dynamic rescan of cacheservice drives by
82 * Jason R. Thorpe of Wasabi Systems, Inc.
83 */
84
85#include <sys/cdefs.h>
86__KERNEL_RCSID(0, "$NetBSD: icp.c,v 1.17 2005/12/11 12:21:27 christos Exp $");
87
88#include <sys/param.h>
89#include <sys/systm.h>
90#include <sys/kernel.h>
91#include <sys/device.h>
92#include <sys/queue.h>
93#include <sys/proc.h>
94#include <sys/buf.h>
95#include <sys/endian.h>
96#include <sys/malloc.h>
97#include <sys/disk.h>
98
99#include <uvm/uvm_extern.h>
100
101#include <machine/bswap.h>
102#include <machine/bus.h>
103
104#include <dev/pci/pcireg.h>
105#include <dev/pci/pcivar.h>
106#include <dev/pci/pcidevs.h>
107
108#include <dev/ic/icpreg.h>
109#include <dev/ic/icpvar.h>
110
111#include <dev/scsipi/scsipi_all.h>
112#include <dev/scsipi/scsiconf.h>
113
114#include "locators.h"
115
116int	icp_async_event(struct icp_softc *, int);
117void	icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic);
118void	icp_chain(struct icp_softc *);
119int	icp_print(void *, const char *);
120void	icp_watchdog(void *);
121void	icp_ucmd_intr(struct icp_ccb *);
122void	icp_recompute_openings(struct icp_softc *);
123
124int	icp_count;	/* total # of controllers, for ioctl interface */
125
126/*
127 * Statistics for the ioctl interface to query.
128 *
129 * XXX Global.  They should probably be made per-controller
130 * XXX at some point.
131 */
132gdt_statist_t icp_stats;
133
134int
135icp_init(struct icp_softc *icp, const char *intrstr)
136{
137	struct icp_attach_args icpa;
138	struct icp_binfo binfo;
139	struct icp_ccb *ic;
140	u_int16_t cdev_cnt;
141	int i, j, state, feat, nsegs, rv;
142	int locs[ICPCF_NLOCS];
143
144	state = 0;
145
146	if (intrstr != NULL)
147		aprint_normal("%s: interrupting at %s\n", icp->icp_dv.dv_xname,
148		    intrstr);
149
150	SIMPLEQ_INIT(&icp->icp_ccb_queue);
151	SIMPLEQ_INIT(&icp->icp_ccb_freelist);
152	SIMPLEQ_INIT(&icp->icp_ucmd_queue);
153	callout_init(&icp->icp_wdog_callout);
154
155	/*
156	 * Allocate a scratch area.
157	 */
158	if (bus_dmamap_create(icp->icp_dmat, ICP_SCRATCH_SIZE, 1,
159	    ICP_SCRATCH_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
160	    &icp->icp_scr_dmamap) != 0) {
161		aprint_error("%s: cannot create scratch dmamap\n",
162		    icp->icp_dv.dv_xname);
163		return (1);
164	}
165	state++;
166
167	if (bus_dmamem_alloc(icp->icp_dmat, ICP_SCRATCH_SIZE, PAGE_SIZE, 0,
168	    icp->icp_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
169		aprint_error("%s: cannot alloc scratch dmamem\n",
170		    icp->icp_dv.dv_xname);
171		goto bail_out;
172	}
173	state++;
174
175	if (bus_dmamem_map(icp->icp_dmat, icp->icp_scr_seg, nsegs,
176	    ICP_SCRATCH_SIZE, &icp->icp_scr, 0)) {
177		aprint_error("%s: cannot map scratch dmamem\n",
178		    icp->icp_dv.dv_xname);
179		goto bail_out;
180	}
181	state++;
182
183	if (bus_dmamap_load(icp->icp_dmat, icp->icp_scr_dmamap, icp->icp_scr,
184	    ICP_SCRATCH_SIZE, NULL, BUS_DMA_NOWAIT)) {
185		aprint_error("%s: cannot load scratch dmamap\n",
186		    icp->icp_dv.dv_xname);
187		goto bail_out;
188	}
189	state++;
190
191	/*
192	 * Allocate and initialize the command control blocks.
193	 */
194	ic = malloc(sizeof(*ic) * ICP_NCCBS, M_DEVBUF, M_NOWAIT | M_ZERO);
195	if ((icp->icp_ccbs = ic) == NULL) {
196		aprint_error("%s: malloc() failed\n", icp->icp_dv.dv_xname);
197		goto bail_out;
198	}
199	state++;
200
201	for (i = 0; i < ICP_NCCBS; i++, ic++) {
202		/*
203		 * The first two command indexes have special meanings, so
204		 * we can't use them.
205		 */
206		ic->ic_ident = i + 2;
207		rv = bus_dmamap_create(icp->icp_dmat, ICP_MAX_XFER,
208		    ICP_MAXSG, ICP_MAX_XFER, 0,
209		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
210		    &ic->ic_xfer_map);
211		if (rv != 0)
212			break;
213		icp->icp_nccbs++;
214		icp_ccb_free(icp, ic);
215	}
216#ifdef DIAGNOSTIC
217	if (icp->icp_nccbs != ICP_NCCBS)
218		aprint_error("%s: %d/%d CCBs usable\n", icp->icp_dv.dv_xname,
219		    icp->icp_nccbs, ICP_NCCBS);
220#endif
221
222	/*
223	 * Initalize the controller.
224	 */
225	if (!icp_cmd(icp, ICP_SCREENSERVICE, ICP_INIT, 0, 0, 0)) {
226		aprint_error("%s: screen service init error %d\n",
227		    icp->icp_dv.dv_xname, icp->icp_status);
228		goto bail_out;
229	}
230
231	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
232		aprint_error("%s: cache service init error %d\n",
233		    icp->icp_dv.dv_xname, icp->icp_status);
234		goto bail_out;
235	}
236
237	icp_cmd(icp, ICP_CACHESERVICE, ICP_UNFREEZE_IO, 0, 0, 0);
238
239	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_MOUNT, 0xffff, 1, 0)) {
240		aprint_error("%s: cache service mount error %d\n",
241		    icp->icp_dv.dv_xname, icp->icp_status);
242		goto bail_out;
243	}
244
245	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
246		aprint_error("%s: cache service post-mount init error %d\n",
247		    icp->icp_dv.dv_xname, icp->icp_status);
248		goto bail_out;
249	}
250	cdev_cnt = (u_int16_t)icp->icp_info;
251	icp->icp_fw_vers = icp->icp_service;
252
253	if (!icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_INIT, 0, 0, 0)) {
254		aprint_error("%s: raw service init error %d\n",
255		    icp->icp_dv.dv_xname, icp->icp_status);
256		goto bail_out;
257	}
258
259	/*
260	 * Set/get raw service features (scatter/gather).
261	 */
262	feat = 0;
263	if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_SET_FEAT, ICP_SCATTER_GATHER,
264	    0, 0))
265		if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_GET_FEAT, 0, 0, 0))
266			feat = icp->icp_info;
267
268	if ((feat & ICP_SCATTER_GATHER) == 0) {
269#ifdef DIAGNOSTIC
270		aprint_normal(
271		    "%s: scatter/gather not supported (raw service)\n",
272		    icp->icp_dv.dv_xname);
273#endif
274	} else
275		icp->icp_features |= ICP_FEAT_RAWSERVICE;
276
277	/*
278	 * Set/get cache service features (scatter/gather).
279	 */
280	feat = 0;
281	if (icp_cmd(icp, ICP_CACHESERVICE, ICP_SET_FEAT, 0,
282	    ICP_SCATTER_GATHER, 0))
283		if (icp_cmd(icp, ICP_CACHESERVICE, ICP_GET_FEAT, 0, 0, 0))
284			feat = icp->icp_info;
285
286	if ((feat & ICP_SCATTER_GATHER) == 0) {
287#ifdef DIAGNOSTIC
288		aprint_normal(
289		    "%s: scatter/gather not supported (cache service)\n",
290		    icp->icp_dv.dv_xname);
291#endif
292	} else
293		icp->icp_features |= ICP_FEAT_CACHESERVICE;
294
295	/*
296	 * Pull some information from the board and dump.
297	 */
298	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, ICP_BOARD_INFO,
299	    ICP_INVALID_CHANNEL, sizeof(struct icp_binfo))) {
300		aprint_error("%s: unable to retrive board info\n",
301		    icp->icp_dv.dv_xname);
302		goto bail_out;
303	}
304	memcpy(&binfo, icp->icp_scr, sizeof(binfo));
305
306	aprint_normal(
307	    "%s: model <%s>, firmware <%s>, %d channel(s), %dMB memory\n",
308	    icp->icp_dv.dv_xname, binfo.bi_type_string, binfo.bi_raid_string,
309	    binfo.bi_chan_count, le32toh(binfo.bi_memsize) >> 20);
310
311	/*
312	 * Determine the number of devices, and number of openings per
313	 * device.
314	 */
315	if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
316		for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
317			if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, j, 0,
318			    0))
319				continue;
320
321			icp->icp_cdr[j].cd_size = icp->icp_info;
322			if (icp->icp_cdr[j].cd_size != 0)
323				icp->icp_ndevs++;
324
325			if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, j, 0,
326			    0))
327				icp->icp_cdr[j].cd_type = icp->icp_info;
328		}
329	}
330
331	if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
332		icp->icp_nchan = binfo.bi_chan_count;
333		icp->icp_ndevs += icp->icp_nchan;
334	}
335
336	icp_recompute_openings(icp);
337
338	/*
339	 * Attach SCSI channels.
340	 */
341	if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
342		struct icp_ioc_version *iv;
343		struct icp_rawioc *ri;
344		struct icp_getch *gc;
345
346		iv = (struct icp_ioc_version *)icp->icp_scr;
347		iv->iv_version = htole32(ICP_IOC_NEWEST);
348		iv->iv_listents = ICP_MAXBUS;
349		iv->iv_firstchan = 0;
350		iv->iv_lastchan = ICP_MAXBUS - 1;
351		iv->iv_listoffset = htole32(sizeof(*iv));
352
353		if (icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
354		    ICP_IOCHAN_RAW_DESC, ICP_INVALID_CHANNEL,
355		    sizeof(*iv) + ICP_MAXBUS * sizeof(*ri))) {
356			ri = (struct icp_rawioc *)(iv + 1);
357			for (j = 0; j < binfo.bi_chan_count; j++, ri++)
358				icp->icp_bus_id[j] = ri->ri_procid;
359		} else {
360			/*
361			 * Fall back to the old method.
362			 */
363			gc = (struct icp_getch *)icp->icp_scr;
364
365			for (j = 0; j < binfo.bi_chan_count; j++) {
366				if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
367				    ICP_SCSI_CHAN_CNT | ICP_L_CTRL_PATTERN,
368				    ICP_IO_CHANNEL | ICP_INVALID_CHANNEL,
369				    sizeof(*gc))) {
370				    	aprint_error(
371					    "%s: unable to get chan info",
372				    	    icp->icp_dv.dv_xname);
373					goto bail_out;
374				}
375				icp->icp_bus_id[j] = gc->gc_scsiid;
376			}
377		}
378
379		for (j = 0; j < binfo.bi_chan_count; j++) {
380			if (icp->icp_bus_id[j] > ICP_MAXID_FC)
381				icp->icp_bus_id[j] = ICP_MAXID_FC;
382
383			icpa.icpa_unit = j + ICPA_UNIT_SCSI;
384
385			locs[ICPCF_UNIT] = j + ICPA_UNIT_SCSI;
386
387			icp->icp_children[icpa.icpa_unit] =
388				config_found_sm_loc(&icp->icp_dv, "icp", locs,
389					&icpa, icp_print, config_stdsubmatch);
390		}
391	}
392
393	/*
394	 * Attach cache devices.
395	 */
396	if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
397		for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
398			if (icp->icp_cdr[j].cd_size == 0)
399				continue;
400
401			icpa.icpa_unit = j;
402
403			locs[ICPCF_UNIT] = j;
404
405			icp->icp_children[icpa.icpa_unit] =
406			    config_found_sm_loc(&icp->icp_dv, "icp", locs,
407				&icpa, icp_print, config_stdsubmatch);
408		}
409	}
410
411	/*
412	 * Start the watchdog.
413	 */
414	icp_watchdog(icp);
415
416	/*
417	 * Count the controller, and we're done!
418	 */
419	icp_count++;
420
421	return (0);
422
423 bail_out:
424	if (state > 4)
425		for (j = 0; j < i; j++)
426			bus_dmamap_destroy(icp->icp_dmat,
427			    icp->icp_ccbs[j].ic_xfer_map);
428 	if (state > 3)
429		free(icp->icp_ccbs, M_DEVBUF);
430	if (state > 2)
431		bus_dmamap_unload(icp->icp_dmat, icp->icp_scr_dmamap);
432	if (state > 1)
433		bus_dmamem_unmap(icp->icp_dmat, icp->icp_scr,
434		    ICP_SCRATCH_SIZE);
435	if (state > 0)
436		bus_dmamem_free(icp->icp_dmat, icp->icp_scr_seg, nsegs);
437	bus_dmamap_destroy(icp->icp_dmat, icp->icp_scr_dmamap);
438
439	return (1);
440}
441
442void
443icp_register_servicecb(struct icp_softc *icp, int unit,
444    const struct icp_servicecb *cb)
445{
446
447	icp->icp_servicecb[unit] = cb;
448}
449
450void
451icp_rescan(struct icp_softc *icp, int unit)
452{
453	struct icp_attach_args icpa;
454	u_int newsize, newtype;
455	int locs[ICPCF_NLOCS];
456
457	/*
458	 * NOTE: It is very important that the queue be frozen and not
459	 * commands running when this is called.  The ioctl mutex must
460	 * also be held.
461	 */
462
463	KASSERT(icp->icp_qfreeze != 0);
464	KASSERT(icp->icp_running == 0);
465	KASSERT(unit < ICP_MAX_HDRIVES);
466
467	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, unit, 0, 0)) {
468#ifdef ICP_DEBUG
469		printf("%s: rescan: unit %d ICP_INFO failed -> 0x%04x\n",
470		    icp->icp_dv.dv_xname, unit, icp->icp_status);
471#endif
472		goto gone;
473	}
474	if ((newsize = icp->icp_info) == 0) {
475#ifdef ICP_DEBUG
476		printf("%s: rescan: unit %d has zero size\n",
477		    icp->icp_dv.dv_xname, unit);
478#endif
479 gone:
480		/*
481		 * Host drive is no longer present; detach if a child
482		 * is currently there.
483		 */
484		if (icp->icp_cdr[unit].cd_size != 0)
485			icp->icp_ndevs--;
486		icp->icp_cdr[unit].cd_size = 0;
487		if (icp->icp_children[unit] != NULL) {
488			(void) config_detach(icp->icp_children[unit],
489			    DETACH_FORCE);
490			icp->icp_children[unit] = NULL;
491		}
492		return;
493	}
494
495	if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, unit, 0, 0))
496		newtype = icp->icp_info;
497	else {
498#ifdef ICP_DEBUG
499		printf("%s: rescan: unit %d ICP_DEVTYPE failed\n",
500		    icp->icp_dv.dv_xname, unit);
501#endif
502		newtype = 0;	/* XXX? */
503	}
504
505#ifdef ICP_DEBUG
506	printf("%s: rescan: unit %d old %u/%u, new %u/%u\n",
507	    icp->icp_dv.dv_xname, unit, icp->icp_cdr[unit].cd_size,
508	    icp->icp_cdr[unit].cd_type, newsize, newtype);
509#endif
510
511	/*
512	 * If the type or size changed, detach any old child (if it exists)
513	 * and attach a new one.
514	 */
515	if (icp->icp_children[unit] == NULL ||
516	    newsize != icp->icp_cdr[unit].cd_size ||
517	    newtype != icp->icp_cdr[unit].cd_type) {
518		if (icp->icp_cdr[unit].cd_size == 0)
519			icp->icp_ndevs++;
520		icp->icp_cdr[unit].cd_size = newsize;
521		icp->icp_cdr[unit].cd_type = newtype;
522		if (icp->icp_children[unit] != NULL)
523			(void) config_detach(icp->icp_children[unit],
524			    DETACH_FORCE);
525
526		icpa.icpa_unit = unit;
527
528		locs[ICPCF_UNIT] = unit;
529
530		icp->icp_children[unit] = config_found_sm_loc(&icp->icp_dv,
531			"icp", locs, &icpa, icp_print, config_stdsubmatch);
532	}
533
534	icp_recompute_openings(icp);
535}
536
537void
538icp_rescan_all(struct icp_softc *icp)
539{
540	int unit;
541	u_int16_t cdev_cnt;
542
543	/*
544	 * This is the old method of rescanning the host drives.  We
545	 * start by reinitializing the cache service.
546	 */
547	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
548		printf("%s: unable to re-initialize cache service for rescan\n",
549		    icp->icp_dv.dv_xname);
550		return;
551	}
552	cdev_cnt = (u_int16_t) icp->icp_info;
553
554	/* For each host drive, do the new-style rescan. */
555	for (unit = 0; unit < cdev_cnt && unit < ICP_MAX_HDRIVES; unit++)
556		icp_rescan(icp, unit);
557
558	/* Now detach anything in the slots after cdev_cnt. */
559	for (; unit < ICP_MAX_HDRIVES; unit++) {
560		if (icp->icp_cdr[unit].cd_size != 0) {
561#ifdef ICP_DEBUG
562			printf("%s: rescan all: unit %d < new cdev_cnt (%d)\n",
563			    icp->icp_dv.dv_xname, unit, cdev_cnt);
564#endif
565			icp->icp_ndevs--;
566			icp->icp_cdr[unit].cd_size = 0;
567			if (icp->icp_children[unit] != NULL) {
568				(void) config_detach(icp->icp_children[unit],
569				    DETACH_FORCE);
570				icp->icp_children[unit] = NULL;
571			}
572		}
573	}
574
575	icp_recompute_openings(icp);
576}
577
578void
579icp_recompute_openings(struct icp_softc *icp)
580{
581	int unit, openings;
582
583	if (icp->icp_ndevs != 0)
584		openings =
585		    (icp->icp_nccbs - ICP_NCCB_RESERVE) / icp->icp_ndevs;
586	else
587		openings = 0;
588	if (openings == icp->icp_openings)
589		return;
590	icp->icp_openings = openings;
591
592#ifdef ICP_DEBUG
593	printf("%s: %d device%s, %d openings per device\n",
594	    icp->icp_dv.dv_xname, icp->icp_ndevs,
595	    icp->icp_ndevs == 1 ? "" : "s", icp->icp_openings);
596#endif
597
598	for (unit = 0; unit < ICP_MAX_HDRIVES + ICP_MAXBUS; unit++) {
599		if (icp->icp_children[unit] != NULL)
600			(*icp->icp_servicecb[unit]->iscb_openings)(
601			    icp->icp_children[unit], icp->icp_openings);
602	}
603}
604
605void
606icp_watchdog(void *cookie)
607{
608	struct icp_softc *icp;
609	int s;
610
611	icp = cookie;
612
613	s = splbio();
614	icp_intr(icp);
615	if (ICP_HAS_WORK(icp))
616		icp_ccb_enqueue(icp, NULL);
617	splx(s);
618
619	callout_reset(&icp->icp_wdog_callout, hz * ICP_WATCHDOG_FREQ,
620	    icp_watchdog, icp);
621}
622
623int
624icp_print(void *aux, const char *pnp)
625{
626	struct icp_attach_args *icpa;
627	const char *str;
628
629	icpa = (struct icp_attach_args *)aux;
630
631	if (pnp != NULL) {
632		if (icpa->icpa_unit < ICPA_UNIT_SCSI)
633			str = "block device";
634		else
635			str = "SCSI channel";
636		aprint_normal("%s at %s", str, pnp);
637	}
638	aprint_normal(" unit %d", icpa->icpa_unit);
639
640	return (UNCONF);
641}
642
643int
644icp_async_event(struct icp_softc *icp, int service)
645{
646
647	if (service == ICP_SCREENSERVICE) {
648		if (icp->icp_status == ICP_S_MSG_REQUEST) {
649			/* XXX */
650		}
651	} else {
652		if ((icp->icp_fw_vers & 0xff) >= 0x1a) {
653			icp->icp_evt.size = 0;
654			icp->icp_evt.eu.async.ionode = icp->icp_dv.dv_unit;
655			icp->icp_evt.eu.async.status = icp->icp_status;
656			/*
657			 * Severity and event string are filled in by the
658			 * hardware interface interrupt handler.
659			 */
660			printf("%s: %s\n", icp->icp_dv.dv_xname,
661			    icp->icp_evt.event_string);
662		} else {
663			icp->icp_evt.size = sizeof(icp->icp_evt.eu.async);
664			icp->icp_evt.eu.async.ionode = icp->icp_dv.dv_unit;
665			icp->icp_evt.eu.async.service = service;
666			icp->icp_evt.eu.async.status = icp->icp_status;
667			icp->icp_evt.eu.async.info = icp->icp_info;
668			/* XXXJRT FIX THIS */
669			*(u_int32_t *) icp->icp_evt.eu.async.scsi_coord =
670			    icp->icp_info2;
671		}
672		icp_store_event(icp, GDT_ES_ASYNC, service, &icp->icp_evt);
673	}
674
675	return (0);
676}
677
678int
679icp_intr(void *cookie)
680{
681	struct icp_softc *icp;
682	struct icp_intr_ctx ctx;
683	struct icp_ccb *ic;
684
685	icp = cookie;
686
687	ctx.istatus = (*icp->icp_get_status)(icp);
688	if (!ctx.istatus) {
689		icp->icp_status = ICP_S_NO_STATUS;
690		return (0);
691	}
692
693	(*icp->icp_intr)(icp, &ctx);
694
695	icp->icp_status = ctx.cmd_status;
696	icp->icp_service = ctx.service;
697	icp->icp_info = ctx.info;
698	icp->icp_info2 = ctx.info2;
699
700	switch (ctx.istatus) {
701	case ICP_ASYNCINDEX:
702		icp_async_event(icp, ctx.service);
703		return (1);
704
705	case ICP_SPEZINDEX:
706		printf("%s: uninitialized or unknown service (%d/%d)\n",
707		    icp->icp_dv.dv_xname, ctx.info, ctx.info2);
708		icp->icp_evt.size = sizeof(icp->icp_evt.eu.driver);
709		icp->icp_evt.eu.driver.ionode = icp->icp_dv.dv_unit;
710		icp_store_event(icp, GDT_ES_DRIVER, 4, &icp->icp_evt);
711		return (1);
712	}
713
714	if ((ctx.istatus - 2) > icp->icp_nccbs)
715		panic("icp_intr: bad command index returned");
716
717	ic = &icp->icp_ccbs[ctx.istatus - 2];
718	ic->ic_status = icp->icp_status;
719
720	if ((ic->ic_flags & IC_ALLOCED) == 0) {
721		/* XXX ICP's "iir" driver just sends an event here. */
722		panic("icp_intr: inactive CCB identified");
723	}
724
725	/*
726	 * Try to protect ourselves from the running command count already
727	 * being 0 (e.g. if a polled command times out).
728	 */
729	KDASSERT(icp->icp_running != 0);
730	if (--icp->icp_running == 0 &&
731	    (icp->icp_flags & ICP_F_WAIT_FREEZE) != 0) {
732		icp->icp_flags &= ~ICP_F_WAIT_FREEZE;
733		wakeup(&icp->icp_qfreeze);
734	}
735
736	switch (icp->icp_status) {
737	case ICP_S_BSY:
738#ifdef ICP_DEBUG
739		printf("%s: ICP_S_BSY received\n", icp->icp_dv.dv_xname);
740#endif
741		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
742			SIMPLEQ_INSERT_HEAD(&icp->icp_ucmd_queue, ic, ic_chain);
743		else
744			SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_queue, ic, ic_chain);
745		break;
746
747	default:
748		ic->ic_flags |= IC_COMPLETE;
749
750		if ((ic->ic_flags & IC_WAITING) != 0)
751			wakeup(ic);
752		else if (ic->ic_intr != NULL)
753			(*ic->ic_intr)(ic);
754
755		if (ICP_HAS_WORK(icp))
756			icp_ccb_enqueue(icp, NULL);
757
758		break;
759	}
760
761	return (1);
762}
763
764struct icp_ucmd_ctx {
765	gdt_ucmd_t *iu_ucmd;
766	u_int32_t iu_cnt;
767};
768
769void
770icp_ucmd_intr(struct icp_ccb *ic)
771{
772	struct icp_softc *icp = (void *) ic->ic_dv;
773	struct icp_ucmd_ctx *iu = ic->ic_context;
774	gdt_ucmd_t *ucmd = iu->iu_ucmd;
775
776	ucmd->status = icp->icp_status;
777	ucmd->info = icp->icp_info;
778
779	if (iu->iu_cnt != 0) {
780		bus_dmamap_sync(icp->icp_dmat,
781		    icp->icp_scr_dmamap,
782		    ICP_SCRATCH_UCMD, iu->iu_cnt,
783		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
784		memcpy(ucmd->data,
785		    icp->icp_scr + ICP_SCRATCH_UCMD, iu->iu_cnt);
786	}
787
788	icp->icp_ucmd_ccb = NULL;
789
790	ic->ic_flags |= IC_COMPLETE;
791	wakeup(ic);
792}
793
794/*
795 * NOTE: We assume that it is safe to sleep here!
796 */
797int
798icp_cmd(struct icp_softc *icp, u_int8_t service, u_int16_t opcode,
799	u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
800{
801	struct icp_ioctlcmd *icmd;
802	struct icp_cachecmd *cc;
803	struct icp_rawcmd *rc;
804	int retries, rv;
805	struct icp_ccb *ic;
806
807	retries = ICP_RETRIES;
808
809	do {
810		ic = icp_ccb_alloc_wait(icp);
811		memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
812		ic->ic_cmd.cmd_opcode = htole16(opcode);
813
814		switch (service) {
815		case ICP_CACHESERVICE:
816			if (opcode == ICP_IOCTL) {
817				icmd = &ic->ic_cmd.cmd_packet.ic;
818				icmd->ic_subfunc = htole16(arg1);
819				icmd->ic_channel = htole32(arg2);
820				icmd->ic_bufsize = htole32(arg3);
821				icmd->ic_addr =
822				    htole32(icp->icp_scr_seg[0].ds_addr);
823
824				bus_dmamap_sync(icp->icp_dmat,
825				    icp->icp_scr_dmamap, 0, arg3,
826				    BUS_DMASYNC_PREWRITE |
827				    BUS_DMASYNC_PREREAD);
828			} else {
829				cc = &ic->ic_cmd.cmd_packet.cc;
830				cc->cc_deviceno = htole16(arg1);
831				cc->cc_blockno = htole32(arg2);
832			}
833			break;
834
835		case ICP_SCSIRAWSERVICE:
836			rc = &ic->ic_cmd.cmd_packet.rc;
837			rc->rc_direction = htole32(arg1);
838			rc->rc_bus = arg2;
839			rc->rc_target = arg3;
840			rc->rc_lun = arg3 >> 8;
841			break;
842		}
843
844		ic->ic_service = service;
845		ic->ic_cmdlen = sizeof(ic->ic_cmd);
846		rv = icp_ccb_poll(icp, ic, 10000);
847
848		switch (service) {
849		case ICP_CACHESERVICE:
850			if (opcode == ICP_IOCTL) {
851				bus_dmamap_sync(icp->icp_dmat,
852				    icp->icp_scr_dmamap, 0, arg3,
853				    BUS_DMASYNC_POSTWRITE |
854				    BUS_DMASYNC_POSTREAD);
855			}
856			break;
857		}
858
859		icp_ccb_free(icp, ic);
860	} while (rv != 0 && --retries > 0);
861
862	return (icp->icp_status == ICP_S_OK);
863}
864
865int
866icp_ucmd(struct icp_softc *icp, gdt_ucmd_t *ucmd)
867{
868	struct icp_ccb *ic;
869	struct icp_ucmd_ctx iu;
870	u_int32_t cnt;
871	int error;
872
873	if (ucmd->service == ICP_CACHESERVICE) {
874		if (ucmd->command.cmd_opcode == ICP_IOCTL) {
875			cnt = ucmd->command.cmd_packet.ic.ic_bufsize;
876			if (cnt > GDT_SCRATCH_SZ) {
877				printf("%s: scratch buffer too small (%d/%d)\n",
878				    icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt);
879				return (EINVAL);
880			}
881		} else {
882			cnt = ucmd->command.cmd_packet.cc.cc_blockcnt *
883			    ICP_SECTOR_SIZE;
884			if (cnt > GDT_SCRATCH_SZ) {
885				printf("%s: scratch buffer too small (%d/%d)\n",
886				    icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt);
887				return (EINVAL);
888			}
889		}
890	} else {
891		cnt = ucmd->command.cmd_packet.rc.rc_sdlen +
892		    ucmd->command.cmd_packet.rc.rc_sense_len;
893		if (cnt > GDT_SCRATCH_SZ) {
894			printf("%s: scratch buffer too small (%d/%d)\n",
895			    icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt);
896			return (EINVAL);
897		}
898	}
899
900	iu.iu_ucmd = ucmd;
901	iu.iu_cnt = cnt;
902
903	ic = icp_ccb_alloc_wait(icp);
904	memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
905	ic->ic_cmd.cmd_opcode = htole16(ucmd->command.cmd_opcode);
906
907	if (ucmd->service == ICP_CACHESERVICE) {
908		if (ucmd->command.cmd_opcode == ICP_IOCTL) {
909			struct icp_ioctlcmd *icmd, *uicmd;
910
911			icmd = &ic->ic_cmd.cmd_packet.ic;
912			uicmd = &ucmd->command.cmd_packet.ic;
913
914			icmd->ic_subfunc = htole16(uicmd->ic_subfunc);
915			icmd->ic_channel = htole32(uicmd->ic_channel);
916			icmd->ic_bufsize = htole32(uicmd->ic_bufsize);
917			icmd->ic_addr =
918			    htole32(icp->icp_scr_seg[0].ds_addr +
919				    ICP_SCRATCH_UCMD);
920		} else {
921			struct icp_cachecmd *cc, *ucc;
922
923			cc = &ic->ic_cmd.cmd_packet.cc;
924			ucc = &ucmd->command.cmd_packet.cc;
925
926			cc->cc_deviceno = htole16(ucc->cc_deviceno);
927			cc->cc_blockno = htole32(ucc->cc_blockno);
928			cc->cc_blockcnt = htole32(ucc->cc_blockcnt);
929			cc->cc_addr = htole32(0xffffffffU);
930			cc->cc_nsgent = htole32(1);
931			cc->cc_sg[0].sg_addr =
932			    htole32(icp->icp_scr_seg[0].ds_addr +
933				    ICP_SCRATCH_UCMD);
934			cc->cc_sg[0].sg_len = htole32(cnt);
935		}
936	} else {
937		struct icp_rawcmd *rc, *urc;
938
939		rc = &ic->ic_cmd.cmd_packet.rc;
940		urc = &ucmd->command.cmd_packet.rc;
941
942		rc->rc_direction = htole32(urc->rc_direction);
943		rc->rc_sdata = htole32(0xffffffffU);
944		rc->rc_sdlen = htole32(urc->rc_sdlen);
945		rc->rc_clen = htole32(urc->rc_clen);
946		memcpy(rc->rc_cdb, urc->rc_cdb, sizeof(rc->rc_cdb));
947		rc->rc_target = urc->rc_target;
948		rc->rc_lun = urc->rc_lun;
949		rc->rc_bus = urc->rc_bus;
950		rc->rc_sense_len = htole32(urc->rc_sense_len);
951		rc->rc_sense_addr =
952		    htole32(icp->icp_scr_seg[0].ds_addr +
953			    ICP_SCRATCH_UCMD + urc->rc_sdlen);
954		rc->rc_nsgent = htole32(1);
955		rc->rc_sg[0].sg_addr =
956		    htole32(icp->icp_scr_seg[0].ds_addr + ICP_SCRATCH_UCMD);
957		rc->rc_sg[0].sg_len = htole32(cnt - urc->rc_sense_len);
958	}
959
960	ic->ic_service = ucmd->service;
961	ic->ic_cmdlen = sizeof(ic->ic_cmd);
962	ic->ic_context = &iu;
963
964	/*
965	 * XXX What units are ucmd->timeout in?  Until we know, we
966	 * XXX just pull a number out of thin air.
967	 */
968	if (__predict_false((error = icp_ccb_wait_user(icp, ic, 30000)) != 0))
969		printf("%s: error %d waiting for ucmd to complete\n",
970		    icp->icp_dv.dv_xname, error);
971
972	/* icp_ucmd_intr() has updated ucmd. */
973	icp_ccb_free(icp, ic);
974
975	return (error);
976}
977
978struct icp_ccb *
979icp_ccb_alloc(struct icp_softc *icp)
980{
981	struct icp_ccb *ic;
982	int s;
983
984	s = splbio();
985	if (__predict_false((ic =
986			     SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL)) {
987		splx(s);
988		return (NULL);
989	}
990	SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
991	splx(s);
992
993	ic->ic_flags = IC_ALLOCED;
994	return (ic);
995}
996
997struct icp_ccb *
998icp_ccb_alloc_wait(struct icp_softc *icp)
999{
1000	struct icp_ccb *ic;
1001	int s;
1002
1003	s = splbio();
1004	while ((ic = SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL) {
1005		icp->icp_flags |= ICP_F_WAIT_CCB;
1006		(void) tsleep(&icp->icp_ccb_freelist, PRIBIO, "icpccb", 0);
1007	}
1008	SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
1009	splx(s);
1010
1011	ic->ic_flags = IC_ALLOCED;
1012	return (ic);
1013}
1014
1015void
1016icp_ccb_free(struct icp_softc *icp, struct icp_ccb *ic)
1017{
1018	int s;
1019
1020	s = splbio();
1021	ic->ic_flags = 0;
1022	ic->ic_intr = NULL;
1023	SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_freelist, ic, ic_chain);
1024	if (__predict_false((icp->icp_flags & ICP_F_WAIT_CCB) != 0)) {
1025		icp->icp_flags &= ~ICP_F_WAIT_CCB;
1026		wakeup(&icp->icp_ccb_freelist);
1027	}
1028	splx(s);
1029}
1030
1031void
1032icp_ccb_enqueue(struct icp_softc *icp, struct icp_ccb *ic)
1033{
1034	int s;
1035
1036	s = splbio();
1037
1038	if (ic != NULL) {
1039		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1040			SIMPLEQ_INSERT_TAIL(&icp->icp_ucmd_queue, ic, ic_chain);
1041		else
1042			SIMPLEQ_INSERT_TAIL(&icp->icp_ccb_queue, ic, ic_chain);
1043	}
1044
1045	for (; icp->icp_qfreeze == 0;) {
1046		if (__predict_false((ic =
1047			    SIMPLEQ_FIRST(&icp->icp_ucmd_queue)) != NULL)) {
1048			struct icp_ucmd_ctx *iu = ic->ic_context;
1049			gdt_ucmd_t *ucmd = iu->iu_ucmd;
1050
1051			/*
1052			 * All user-generated commands share the same
1053			 * scratch space, so if one is already running,
1054			 * we have to stall the command queue.
1055			 */
1056			if (icp->icp_ucmd_ccb != NULL)
1057				break;
1058			if ((*icp->icp_test_busy)(icp))
1059				break;
1060			icp->icp_ucmd_ccb = ic;
1061
1062			if (iu->iu_cnt != 0) {
1063				memcpy(icp->icp_scr + ICP_SCRATCH_UCMD,
1064				    ucmd->data, iu->iu_cnt);
1065				bus_dmamap_sync(icp->icp_dmat,
1066				    icp->icp_scr_dmamap,
1067				    ICP_SCRATCH_UCMD, iu->iu_cnt,
1068				    BUS_DMASYNC_PREREAD |
1069				    BUS_DMASYNC_PREWRITE);
1070			}
1071		} else if (__predict_true((ic =
1072				SIMPLEQ_FIRST(&icp->icp_ccb_queue)) != NULL)) {
1073			if ((*icp->icp_test_busy)(icp))
1074				break;
1075		} else {
1076			/* no command found */
1077			break;
1078		}
1079		icp_ccb_submit(icp, ic);
1080		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1081			SIMPLEQ_REMOVE_HEAD(&icp->icp_ucmd_queue, ic_chain);
1082		else
1083			SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_queue, ic_chain);
1084	}
1085
1086	splx(s);
1087}
1088
1089int
1090icp_ccb_map(struct icp_softc *icp, struct icp_ccb *ic, void *data, int size,
1091	    int dir)
1092{
1093	struct icp_sg *sg;
1094	int nsegs, i, rv;
1095	bus_dmamap_t xfer;
1096
1097	xfer = ic->ic_xfer_map;
1098
1099	rv = bus_dmamap_load(icp->icp_dmat, xfer, data, size, NULL,
1100	    BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1101	    ((dir & IC_XFER_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
1102	if (rv != 0)
1103		return (rv);
1104
1105	nsegs = xfer->dm_nsegs;
1106	ic->ic_xfer_size = size;
1107	ic->ic_nsgent = nsegs;
1108	ic->ic_flags |= dir;
1109	sg = ic->ic_sg;
1110
1111	if (sg != NULL) {
1112		for (i = 0; i < nsegs; i++, sg++) {
1113			sg->sg_addr = htole32(xfer->dm_segs[i].ds_addr);
1114			sg->sg_len = htole32(xfer->dm_segs[i].ds_len);
1115		}
1116	} else if (nsegs > 1)
1117		panic("icp_ccb_map: no SG list specified, but nsegs > 1");
1118
1119	if ((dir & IC_XFER_OUT) != 0)
1120		i = BUS_DMASYNC_PREWRITE;
1121	else /* if ((dir & IC_XFER_IN) != 0) */
1122		i = BUS_DMASYNC_PREREAD;
1123
1124	bus_dmamap_sync(icp->icp_dmat, xfer, 0, ic->ic_xfer_size, i);
1125	return (0);
1126}
1127
1128void
1129icp_ccb_unmap(struct icp_softc *icp, struct icp_ccb *ic)
1130{
1131	int i;
1132
1133	if ((ic->ic_flags & IC_XFER_OUT) != 0)
1134		i = BUS_DMASYNC_POSTWRITE;
1135	else /* if ((ic->ic_flags & IC_XFER_IN) != 0) */
1136		i = BUS_DMASYNC_POSTREAD;
1137
1138	bus_dmamap_sync(icp->icp_dmat, ic->ic_xfer_map, 0, ic->ic_xfer_size, i);
1139	bus_dmamap_unload(icp->icp_dmat, ic->ic_xfer_map);
1140}
1141
1142int
1143icp_ccb_poll(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1144{
1145	int s, rv;
1146
1147	s = splbio();
1148
1149	for (timo = ICP_BUSY_WAIT_MS * 100; timo != 0; timo--) {
1150		if (!(*icp->icp_test_busy)(icp))
1151			break;
1152		DELAY(10);
1153	}
1154	if (timo == 0) {
1155		printf("%s: submit: busy\n", icp->icp_dv.dv_xname);
1156		return (EAGAIN);
1157	}
1158
1159	icp_ccb_submit(icp, ic);
1160
1161	if (cold) {
1162		for (timo *= 10; timo != 0; timo--) {
1163			DELAY(100);
1164			icp_intr(icp);
1165			if ((ic->ic_flags & IC_COMPLETE) != 0)
1166				break;
1167		}
1168	} else {
1169		ic->ic_flags |= IC_WAITING;
1170		while ((ic->ic_flags & IC_COMPLETE) == 0) {
1171			if ((rv = tsleep(ic, PRIBIO, "icpwccb",
1172					 mstohz(timo))) != 0) {
1173				timo = 0;
1174				break;
1175			}
1176		}
1177	}
1178
1179	if (timo != 0) {
1180		if (ic->ic_status != ICP_S_OK) {
1181#ifdef ICP_DEBUG
1182			printf("%s: request failed; status=0x%04x\n",
1183			    icp->icp_dv.dv_xname, ic->ic_status);
1184#endif
1185			rv = EIO;
1186		} else
1187			rv = 0;
1188	} else {
1189		printf("%s: command timed out\n", icp->icp_dv.dv_xname);
1190		rv = EIO;
1191	}
1192
1193	while ((*icp->icp_test_busy)(icp) != 0)
1194		DELAY(10);
1195
1196	splx(s);
1197
1198	return (rv);
1199}
1200
1201int
1202icp_ccb_wait(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1203{
1204	int s, rv;
1205
1206	ic->ic_flags |= IC_WAITING;
1207
1208	s = splbio();
1209	icp_ccb_enqueue(icp, ic);
1210	while ((ic->ic_flags & IC_COMPLETE) == 0) {
1211		if ((rv = tsleep(ic, PRIBIO, "icpwccb", mstohz(timo))) != 0) {
1212			splx(s);
1213			return (rv);
1214		}
1215	}
1216	splx(s);
1217
1218	if (ic->ic_status != ICP_S_OK) {
1219		printf("%s: command failed; status=%x\n", icp->icp_dv.dv_xname,
1220		    ic->ic_status);
1221		return (EIO);
1222	}
1223
1224	return (0);
1225}
1226
1227int
1228icp_ccb_wait_user(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1229{
1230	int s, rv;
1231
1232	ic->ic_dv = &icp->icp_dv;
1233	ic->ic_intr = icp_ucmd_intr;
1234	ic->ic_flags |= IC_UCMD;
1235
1236	s = splbio();
1237	icp_ccb_enqueue(icp, ic);
1238	while ((ic->ic_flags & IC_COMPLETE) == 0) {
1239		if ((rv = tsleep(ic, PRIBIO, "icpwuccb", mstohz(timo))) != 0) {
1240			splx(s);
1241			return (rv);
1242		}
1243	}
1244	splx(s);
1245
1246	return (0);
1247}
1248
1249void
1250icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic)
1251{
1252
1253	ic->ic_cmdlen = (ic->ic_cmdlen + 3) & ~3;
1254
1255	(*icp->icp_set_sema0)(icp);
1256	DELAY(10);
1257
1258	ic->ic_cmd.cmd_boardnode = htole32(ICP_LOCALBOARD);
1259	ic->ic_cmd.cmd_cmdindex = htole32(ic->ic_ident);
1260
1261	icp->icp_running++;
1262
1263	(*icp->icp_copy_cmd)(icp, ic);
1264	(*icp->icp_release_event)(icp, ic);
1265}
1266
1267int
1268icp_freeze(struct icp_softc *icp)
1269{
1270	int s, error = 0;
1271
1272	s = splbio();
1273	if (icp->icp_qfreeze++ == 0) {
1274		while (icp->icp_running != 0) {
1275			icp->icp_flags |= ICP_F_WAIT_FREEZE;
1276			error = tsleep(&icp->icp_qfreeze, PRIBIO|PCATCH,
1277			    "icpqfrz", 0);
1278			if (error != 0 && --icp->icp_qfreeze == 0 &&
1279			    ICP_HAS_WORK(icp)) {
1280				icp_ccb_enqueue(icp, NULL);
1281				break;
1282			}
1283		}
1284	}
1285	splx(s);
1286
1287	return (error);
1288}
1289
1290void
1291icp_unfreeze(struct icp_softc *icp)
1292{
1293	int s;
1294
1295	s = splbio();
1296	KDASSERT(icp->icp_qfreeze != 0);
1297	if (--icp->icp_qfreeze == 0 && ICP_HAS_WORK(icp))
1298		icp_ccb_enqueue(icp, NULL);
1299	splx(s);
1300}
1301
1302/* XXX Global - should be per-controller? XXX */
1303static gdt_evt_str icp_event_buffer[ICP_MAX_EVENTS];
1304static int icp_event_oldidx;
1305static int icp_event_lastidx;
1306
1307gdt_evt_str *
1308icp_store_event(struct icp_softc *icp, u_int16_t source, u_int16_t idx,
1309    gdt_evt_data *evt)
1310{
1311	gdt_evt_str *e;
1312
1313	/* no source == no event */
1314	if (source == 0)
1315		return (NULL);
1316
1317	e = &icp_event_buffer[icp_event_lastidx];
1318	if (e->event_source == source && e->event_idx == idx &&
1319	    ((evt->size != 0 && e->event_data.size != 0 &&
1320	      memcmp(&e->event_data.eu, &evt->eu, evt->size) == 0) ||
1321	     (evt->size == 0 && e->event_data.size == 0 &&
1322	      strcmp((char *) e->event_data.event_string,
1323	      	     (char *) evt->event_string) == 0))) {
1324		e->last_stamp = time.tv_sec;
1325		e->same_count++;
1326	} else {
1327		if (icp_event_buffer[icp_event_lastidx].event_source != 0) {
1328			icp_event_lastidx++;
1329			if (icp_event_lastidx == ICP_MAX_EVENTS)
1330				icp_event_lastidx = 0;
1331			if (icp_event_lastidx == icp_event_oldidx) {
1332				icp_event_oldidx++;
1333				if (icp_event_oldidx == ICP_MAX_EVENTS)
1334					icp_event_oldidx = 0;
1335			}
1336		}
1337		e = &icp_event_buffer[icp_event_lastidx];
1338		e->event_source = source;
1339		e->event_idx = idx;
1340		e->first_stamp = e->last_stamp = time.tv_sec;
1341		e->same_count = 1;
1342		e->event_data = *evt;
1343		e->application = 0;
1344	}
1345	return (e);
1346}
1347
1348int
1349icp_read_event(struct icp_softc *icp, int handle, gdt_evt_str *estr)
1350{
1351	gdt_evt_str *e;
1352	int eindex, s;
1353
1354	s = splbio();
1355
1356	if (handle == -1)
1357		eindex = icp_event_oldidx;
1358	else
1359		eindex = handle;
1360
1361	estr->event_source = 0;
1362
1363	if (eindex < 0 || eindex >= ICP_MAX_EVENTS) {
1364		splx(s);
1365		return (eindex);
1366	}
1367
1368	e = &icp_event_buffer[eindex];
1369	if (e->event_source != 0) {
1370		if (eindex != icp_event_lastidx) {
1371			eindex++;
1372			if (eindex == ICP_MAX_EVENTS)
1373				eindex = 0;
1374		} else
1375			eindex = -1;
1376		memcpy(estr, e, sizeof(gdt_evt_str));
1377	}
1378
1379	splx(s);
1380
1381	return (eindex);
1382}
1383
1384void
1385icp_readapp_event(struct icp_softc *icp, u_int8_t application,
1386    gdt_evt_str *estr)
1387{
1388	gdt_evt_str *e;
1389	int found = 0, eindex, s;
1390
1391	s = splbio();
1392
1393	eindex = icp_event_oldidx;
1394	for (;;) {
1395		e = &icp_event_buffer[eindex];
1396		if (e->event_source == 0)
1397			break;
1398		if ((e->application & application) == 0) {
1399			e->application |= application;
1400			found = 1;
1401			break;
1402		}
1403		if (eindex == icp_event_lastidx)
1404			break;
1405		eindex++;
1406		if (eindex == ICP_MAX_EVENTS)
1407			eindex = 0;
1408	}
1409	if (found)
1410		memcpy(estr, e, sizeof(gdt_evt_str));
1411	else
1412		estr->event_source = 0;
1413
1414	splx(s);
1415}
1416
1417void
1418icp_clear_events(struct icp_softc *icp)
1419{
1420	int s;
1421
1422	s = splbio();
1423	icp_event_oldidx = icp_event_lastidx = 0;
1424	memset(icp_event_buffer, 0, sizeof(icp_event_buffer));
1425	splx(s);
1426}
1427