1/*	$NetBSD: icp.c,v 1.37 2022/04/10 09:50:45 andvar Exp $	*/
2
3/*-
4 * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe of Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Copyright (c) 1999, 2000 Niklas Hallqvist.  All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 *    notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 *    notice, this list of conditions and the following disclaimer in the
42 *    documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 *    must display the following acknowledgement:
45 *	This product includes software developed by Niklas Hallqvist.
46 * 4. The name of the author may not be used to endorse or promote products
47 *    derived from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 *
60 * from OpenBSD: gdt_common.c,v 1.12 2001/07/04 06:43:18 niklas Exp
61 */
62
63/*
64 * This driver would not have written if it was not for the hardware donations
65 * from both ICP-Vortex and �ko.neT.  I want to thank them for their support.
66 *
67 * Re-worked for NetBSD by Andrew Doran.  Test hardware kindly supplied by
68 * Intel.
69 *
70 * Support for the ICP-Vortex management tools added by
71 * Jason R. Thorpe of Wasabi Systems, Inc., based on code
72 * provided by Achim Leubner <achim.leubner@intel.com>.
73 *
74 * Additional support for dynamic rescan of cacheservice drives by
75 * Jason R. Thorpe of Wasabi Systems, Inc.
76 */
77
78#include <sys/cdefs.h>
79__KERNEL_RCSID(0, "$NetBSD: icp.c,v 1.37 2022/04/10 09:50:45 andvar Exp $");
80
81#include <sys/param.h>
82#include <sys/systm.h>
83#include <sys/kernel.h>
84#include <sys/device.h>
85#include <sys/queue.h>
86#include <sys/proc.h>
87#include <sys/buf.h>
88#include <sys/endian.h>
89#include <sys/malloc.h>
90#include <sys/disk.h>
91
92#include <sys/bswap.h>
93#include <sys/bus.h>
94
95#include <dev/pci/pcireg.h>
96#include <dev/pci/pcivar.h>
97#include <dev/pci/pcidevs.h>
98
99#include <dev/ic/icpreg.h>
100#include <dev/ic/icpvar.h>
101
102#include <dev/scsipi/scsipi_all.h>
103#include <dev/scsipi/scsiconf.h>
104
105#include "locators.h"
106
107int	icp_async_event(struct icp_softc *, int);
108void	icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic);
109void	icp_chain(struct icp_softc *);
110int	icp_print(void *, const char *);
111void	icp_watchdog(void *);
112void	icp_ucmd_intr(struct icp_ccb *);
113void	icp_recompute_openings(struct icp_softc *);
114
115int	icp_count;	/* total # of controllers, for ioctl interface */
116
117/*
118 * Statistics for the ioctl interface to query.
119 *
120 * XXX Global.  They should probably be made per-controller
121 * XXX at some point.
122 */
123gdt_statist_t icp_stats;
124
125int
126icp_init(struct icp_softc *icp, const char *intrstr)
127{
128	struct icp_attach_args icpa;
129	struct icp_binfo binfo;
130	struct icp_ccb *ic;
131	u_int16_t cdev_cnt;
132	int i, j, state, feat, nsegs, rv;
133	int locs[ICPCF_NLOCS];
134
135	state = 0;
136
137	if (intrstr != NULL)
138		aprint_normal_dev(icp->icp_dv, "interrupting at %s\n",
139		    intrstr);
140
141	SIMPLEQ_INIT(&icp->icp_ccb_queue);
142	SIMPLEQ_INIT(&icp->icp_ccb_freelist);
143	SIMPLEQ_INIT(&icp->icp_ucmd_queue);
144	callout_init(&icp->icp_wdog_callout, 0);
145
146	/*
147	 * Allocate a scratch area.
148	 */
149	if (bus_dmamap_create(icp->icp_dmat, ICP_SCRATCH_SIZE, 1,
150	    ICP_SCRATCH_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
151	    &icp->icp_scr_dmamap) != 0) {
152		aprint_error_dev(icp->icp_dv, "cannot create scratch dmamap\n");
153		return (1);
154	}
155	state++;
156
157	if (bus_dmamem_alloc(icp->icp_dmat, ICP_SCRATCH_SIZE, PAGE_SIZE, 0,
158	    icp->icp_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
159		aprint_error_dev(icp->icp_dv, "cannot alloc scratch dmamem\n");
160		goto bail_out;
161	}
162	state++;
163
164	if (bus_dmamem_map(icp->icp_dmat, icp->icp_scr_seg, nsegs,
165	    ICP_SCRATCH_SIZE, &icp->icp_scr, 0)) {
166		aprint_error_dev(icp->icp_dv, "cannot map scratch dmamem\n");
167		goto bail_out;
168	}
169	state++;
170
171	if (bus_dmamap_load(icp->icp_dmat, icp->icp_scr_dmamap, icp->icp_scr,
172	    ICP_SCRATCH_SIZE, NULL, BUS_DMA_NOWAIT)) {
173		aprint_error_dev(icp->icp_dv, "cannot load scratch dmamap\n");
174		goto bail_out;
175	}
176	state++;
177
178	/*
179	 * Allocate and initialize the command control blocks.
180	 */
181	ic = malloc(sizeof(*ic) * ICP_NCCBS, M_DEVBUF, M_WAITOK | M_ZERO);
182	icp->icp_ccbs = ic;
183	state++;
184
185	for (i = 0; i < ICP_NCCBS; i++, ic++) {
186		/*
187		 * The first two command indexes have special meanings, so
188		 * we can't use them.
189		 */
190		ic->ic_ident = i + 2;
191		rv = bus_dmamap_create(icp->icp_dmat, ICP_MAX_XFER,
192		    ICP_MAXSG, ICP_MAX_XFER, 0,
193		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
194		    &ic->ic_xfer_map);
195		if (rv != 0)
196			break;
197		icp->icp_nccbs++;
198		icp_ccb_free(icp, ic);
199	}
200#ifdef DIAGNOSTIC
201	if (icp->icp_nccbs != ICP_NCCBS)
202		aprint_error_dev(icp->icp_dv, "%d/%d CCBs usable\n",
203		    icp->icp_nccbs, ICP_NCCBS);
204#endif
205
206	/*
207	 * Initialize the controller.
208	 */
209	if (!icp_cmd(icp, ICP_SCREENSERVICE, ICP_INIT, 0, 0, 0)) {
210		aprint_error_dev(icp->icp_dv, "screen service init error %d\n",
211		    icp->icp_status);
212		goto bail_out;
213	}
214
215	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
216		aprint_error_dev(icp->icp_dv, "cache service init error %d\n",
217		    icp->icp_status);
218		goto bail_out;
219	}
220
221	icp_cmd(icp, ICP_CACHESERVICE, ICP_UNFREEZE_IO, 0, 0, 0);
222
223	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_MOUNT, 0xffff, 1, 0)) {
224		aprint_error_dev(icp->icp_dv, "cache service mount error %d\n",
225		    icp->icp_status);
226		goto bail_out;
227	}
228
229	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
230		aprint_error_dev(icp->icp_dv, "cache service post-mount init error %d\n",
231		    icp->icp_status);
232		goto bail_out;
233	}
234	cdev_cnt = (u_int16_t)icp->icp_info;
235	icp->icp_fw_vers = icp->icp_service;
236
237	if (!icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_INIT, 0, 0, 0)) {
238		aprint_error_dev(icp->icp_dv, "raw service init error %d\n",
239		    icp->icp_status);
240		goto bail_out;
241	}
242
243	/*
244	 * Set/get raw service features (scatter/gather).
245	 */
246	feat = 0;
247	if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_SET_FEAT, ICP_SCATTER_GATHER,
248	    0, 0))
249		if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_GET_FEAT, 0, 0, 0))
250			feat = icp->icp_info;
251
252	if ((feat & ICP_SCATTER_GATHER) == 0) {
253#ifdef DIAGNOSTIC
254		aprint_normal_dev(icp->icp_dv,
255		    "scatter/gather not supported (raw service)\n");
256#endif
257	} else
258		icp->icp_features |= ICP_FEAT_RAWSERVICE;
259
260	/*
261	 * Set/get cache service features (scatter/gather).
262	 */
263	feat = 0;
264	if (icp_cmd(icp, ICP_CACHESERVICE, ICP_SET_FEAT, 0,
265	    ICP_SCATTER_GATHER, 0))
266		if (icp_cmd(icp, ICP_CACHESERVICE, ICP_GET_FEAT, 0, 0, 0))
267			feat = icp->icp_info;
268
269	if ((feat & ICP_SCATTER_GATHER) == 0) {
270#ifdef DIAGNOSTIC
271		aprint_normal_dev(icp->icp_dv,
272		    "scatter/gather not supported (cache service)\n");
273#endif
274	} else
275		icp->icp_features |= ICP_FEAT_CACHESERVICE;
276
277	/*
278	 * Pull some information from the board and dump.
279	 */
280	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, ICP_BOARD_INFO,
281	    ICP_INVALID_CHANNEL, sizeof(struct icp_binfo))) {
282		aprint_error_dev(icp->icp_dv, "unable to retrieve board info\n");
283		goto bail_out;
284	}
285	memcpy(&binfo, icp->icp_scr, sizeof(binfo));
286
287	aprint_normal_dev(icp->icp_dv,
288	    "model <%s>, firmware <%s>, %d channel(s), %dMB memory\n",
289	    binfo.bi_type_string, binfo.bi_raid_string,
290	    binfo.bi_chan_count, le32toh(binfo.bi_memsize) >> 20);
291
292	/*
293	 * Determine the number of devices, and number of openings per
294	 * device.
295	 */
296	if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
297		for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
298			if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, j, 0,
299			    0))
300				continue;
301
302			icp->icp_cdr[j].cd_size = icp->icp_info;
303			if (icp->icp_cdr[j].cd_size != 0)
304				icp->icp_ndevs++;
305
306			if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, j, 0,
307			    0))
308				icp->icp_cdr[j].cd_type = icp->icp_info;
309		}
310	}
311
312	if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
313		icp->icp_nchan = binfo.bi_chan_count;
314		icp->icp_ndevs += icp->icp_nchan;
315	}
316
317	icp_recompute_openings(icp);
318
319	/*
320	 * Attach SCSI channels.
321	 */
322	if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
323		struct icp_ioc_version *iv;
324		struct icp_rawioc *ri;
325		struct icp_getch *gc;
326
327		iv = (struct icp_ioc_version *)icp->icp_scr;
328		iv->iv_version = htole32(ICP_IOC_NEWEST);
329		iv->iv_listents = ICP_MAXBUS;
330		iv->iv_firstchan = 0;
331		iv->iv_lastchan = ICP_MAXBUS - 1;
332		iv->iv_listoffset = htole32(sizeof(*iv));
333
334		if (icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
335		    ICP_IOCHAN_RAW_DESC, ICP_INVALID_CHANNEL,
336		    sizeof(*iv) + ICP_MAXBUS * sizeof(*ri))) {
337			ri = (struct icp_rawioc *)(iv + 1);
338			for (j = 0; j < binfo.bi_chan_count; j++, ri++)
339				icp->icp_bus_id[j] = ri->ri_procid;
340		} else {
341			/*
342			 * Fall back to the old method.
343			 */
344			gc = (struct icp_getch *)icp->icp_scr;
345
346			for (j = 0; j < binfo.bi_chan_count; j++) {
347				if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
348				    ICP_SCSI_CHAN_CNT | ICP_L_CTRL_PATTERN,
349				    ICP_IO_CHANNEL | ICP_INVALID_CHANNEL,
350				    sizeof(*gc))) {
351				    	aprint_error_dev(icp->icp_dv,
352					    "unable to get chan info");
353					goto bail_out;
354				}
355				icp->icp_bus_id[j] = gc->gc_scsiid;
356			}
357		}
358
359		for (j = 0; j < binfo.bi_chan_count; j++) {
360			if (icp->icp_bus_id[j] > ICP_MAXID_FC)
361				icp->icp_bus_id[j] = ICP_MAXID_FC;
362
363			icpa.icpa_unit = j + ICPA_UNIT_SCSI;
364
365			locs[ICPCF_UNIT] = j + ICPA_UNIT_SCSI;
366
367			icp->icp_children[icpa.icpa_unit] =
368			    config_found(icp->icp_dv, &icpa, icp_print,
369					 CFARGS(.submatch = config_stdsubmatch,
370						.locators = locs));
371		}
372	}
373
374	/*
375	 * Attach cache devices.
376	 */
377	if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
378		for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
379			if (icp->icp_cdr[j].cd_size == 0)
380				continue;
381
382			icpa.icpa_unit = j;
383
384			locs[ICPCF_UNIT] = j;
385
386			icp->icp_children[icpa.icpa_unit] =
387			    config_found(icp->icp_dv, &icpa, icp_print,
388					 CFARGS(.submatch = config_stdsubmatch,
389						.locators = locs));
390		}
391	}
392
393	/*
394	 * Start the watchdog.
395	 */
396	icp_watchdog(icp);
397
398	/*
399	 * Count the controller, and we're done!
400	 */
401	if (icp_count++ == 0)
402		mutex_init(&icp_ioctl_mutex, MUTEX_DEFAULT, IPL_NONE);
403
404	return (0);
405
406 bail_out:
407	if (state > 4)
408		for (j = 0; j < i; j++)
409			bus_dmamap_destroy(icp->icp_dmat,
410			    icp->icp_ccbs[j].ic_xfer_map);
411 	if (state > 3)
412		free(icp->icp_ccbs, M_DEVBUF);
413	if (state > 2)
414		bus_dmamap_unload(icp->icp_dmat, icp->icp_scr_dmamap);
415	if (state > 1)
416		bus_dmamem_unmap(icp->icp_dmat, icp->icp_scr,
417		    ICP_SCRATCH_SIZE);
418	if (state > 0)
419		bus_dmamem_free(icp->icp_dmat, icp->icp_scr_seg, nsegs);
420	bus_dmamap_destroy(icp->icp_dmat, icp->icp_scr_dmamap);
421
422	return (1);
423}
424
425void
426icp_register_servicecb(struct icp_softc *icp, int unit,
427    const struct icp_servicecb *cb)
428{
429
430	icp->icp_servicecb[unit] = cb;
431}
432
433void
434icp_rescan(struct icp_softc *icp, int unit)
435{
436	struct icp_attach_args icpa;
437	u_int newsize, newtype;
438	int locs[ICPCF_NLOCS];
439
440	/*
441	 * NOTE: It is very important that the queue be frozen and not
442	 * commands running when this is called.  The ioctl mutex must
443	 * also be held.
444	 */
445
446	KASSERT(icp->icp_qfreeze != 0);
447	KASSERT(icp->icp_running == 0);
448	KASSERT(unit < ICP_MAX_HDRIVES);
449
450	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, unit, 0, 0)) {
451#ifdef ICP_DEBUG
452		printf("%s: rescan: unit %d ICP_INFO failed -> 0x%04x\n",
453		    device_xname(icp->icp_dv), unit, icp->icp_status);
454#endif
455		goto gone;
456	}
457	if ((newsize = icp->icp_info) == 0) {
458#ifdef ICP_DEBUG
459		printf("%s: rescan: unit %d has zero size\n",
460		    device_xname(icp->icp_dv), unit);
461#endif
462 gone:
463		/*
464		 * Host drive is no longer present; detach if a child
465		 * is currently there.
466		 */
467		if (icp->icp_cdr[unit].cd_size != 0)
468			icp->icp_ndevs--;
469		icp->icp_cdr[unit].cd_size = 0;
470		if (icp->icp_children[unit] != NULL) {
471			(void) config_detach(icp->icp_children[unit],
472			    DETACH_FORCE);
473			icp->icp_children[unit] = NULL;
474		}
475		return;
476	}
477
478	if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, unit, 0, 0))
479		newtype = icp->icp_info;
480	else {
481#ifdef ICP_DEBUG
482		printf("%s: rescan: unit %d ICP_DEVTYPE failed\n",
483		    device_xname(icp->icp_dv), unit);
484#endif
485		newtype = 0;	/* XXX? */
486	}
487
488#ifdef ICP_DEBUG
489	printf("%s: rescan: unit %d old %u/%u, new %u/%u\n",
490	    device_xname(icp->icp_dv), unit, icp->icp_cdr[unit].cd_size,
491	    icp->icp_cdr[unit].cd_type, newsize, newtype);
492#endif
493
494	/*
495	 * If the type or size changed, detach any old child (if it exists)
496	 * and attach a new one.
497	 */
498	if (icp->icp_children[unit] == NULL ||
499	    newsize != icp->icp_cdr[unit].cd_size ||
500	    newtype != icp->icp_cdr[unit].cd_type) {
501		if (icp->icp_cdr[unit].cd_size == 0)
502			icp->icp_ndevs++;
503		icp->icp_cdr[unit].cd_size = newsize;
504		icp->icp_cdr[unit].cd_type = newtype;
505		if (icp->icp_children[unit] != NULL)
506			(void) config_detach(icp->icp_children[unit],
507			    DETACH_FORCE);
508
509		icpa.icpa_unit = unit;
510
511		locs[ICPCF_UNIT] = unit;
512
513		icp->icp_children[unit] =
514		    config_found(icp->icp_dv, &icpa, icp_print,
515				 CFARGS(.submatch = config_stdsubmatch,
516					.locators = locs));
517	}
518
519	icp_recompute_openings(icp);
520}
521
522void
523icp_rescan_all(struct icp_softc *icp)
524{
525	int unit;
526	u_int16_t cdev_cnt;
527
528	/*
529	 * This is the old method of rescanning the host drives.  We
530	 * start by reinitializing the cache service.
531	 */
532	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
533		printf("%s: unable to re-initialize cache service for rescan\n",
534		    device_xname(icp->icp_dv));
535		return;
536	}
537	cdev_cnt = (u_int16_t) icp->icp_info;
538
539	/* For each host drive, do the new-style rescan. */
540	for (unit = 0; unit < cdev_cnt && unit < ICP_MAX_HDRIVES; unit++)
541		icp_rescan(icp, unit);
542
543	/* Now detach anything in the slots after cdev_cnt. */
544	for (; unit < ICP_MAX_HDRIVES; unit++) {
545		if (icp->icp_cdr[unit].cd_size != 0) {
546#ifdef ICP_DEBUG
547			printf("%s: rescan all: unit %d < new cdev_cnt (%d)\n",
548			    device_xname(icp->icp_dv), unit, cdev_cnt);
549#endif
550			icp->icp_ndevs--;
551			icp->icp_cdr[unit].cd_size = 0;
552			if (icp->icp_children[unit] != NULL) {
553				(void) config_detach(icp->icp_children[unit],
554				    DETACH_FORCE);
555				icp->icp_children[unit] = NULL;
556			}
557		}
558	}
559
560	icp_recompute_openings(icp);
561}
562
563void
564icp_recompute_openings(struct icp_softc *icp)
565{
566	int unit, openings;
567
568	if (icp->icp_ndevs != 0)
569		openings =
570		    (icp->icp_nccbs - ICP_NCCB_RESERVE) / icp->icp_ndevs;
571	else
572		openings = 0;
573	if (openings == icp->icp_openings)
574		return;
575	icp->icp_openings = openings;
576
577#ifdef ICP_DEBUG
578	printf("%s: %d device%s, %d openings per device\n",
579	    device_xname(icp->icp_dv), icp->icp_ndevs,
580	    icp->icp_ndevs == 1 ? "" : "s", icp->icp_openings);
581#endif
582
583	for (unit = 0; unit < ICP_MAX_HDRIVES + ICP_MAXBUS; unit++) {
584		if (icp->icp_children[unit] != NULL)
585			(*icp->icp_servicecb[unit]->iscb_openings)(
586			    icp->icp_children[unit], icp->icp_openings);
587	}
588}
589
590void
591icp_watchdog(void *cookie)
592{
593	struct icp_softc *icp;
594	int s;
595
596	icp = cookie;
597
598	s = splbio();
599	icp_intr(icp);
600	if (ICP_HAS_WORK(icp))
601		icp_ccb_enqueue(icp, NULL);
602	splx(s);
603
604	callout_reset(&icp->icp_wdog_callout, hz * ICP_WATCHDOG_FREQ,
605	    icp_watchdog, icp);
606}
607
608int
609icp_print(void *aux, const char *pnp)
610{
611	struct icp_attach_args *icpa;
612	const char *str;
613
614	icpa = (struct icp_attach_args *)aux;
615
616	if (pnp != NULL) {
617		if (icpa->icpa_unit < ICPA_UNIT_SCSI)
618			str = "block device";
619		else
620			str = "SCSI channel";
621		aprint_normal("%s at %s", str, pnp);
622	}
623	aprint_normal(" unit %d", icpa->icpa_unit);
624
625	return (UNCONF);
626}
627
628int
629icp_async_event(struct icp_softc *icp, int service)
630{
631
632	if (service == ICP_SCREENSERVICE) {
633		if (icp->icp_status == ICP_S_MSG_REQUEST) {
634			/* XXX */
635		}
636	} else {
637		if ((icp->icp_fw_vers & 0xff) >= 0x1a) {
638			icp->icp_evt.size = 0;
639			icp->icp_evt.eu.async.ionode =
640			    device_unit(icp->icp_dv);
641			icp->icp_evt.eu.async.status = icp->icp_status;
642			/*
643			 * Severity and event string are filled in by the
644			 * hardware interface interrupt handler.
645			 */
646			printf("%s: %s\n", device_xname(icp->icp_dv),
647			    icp->icp_evt.event_string);
648		} else {
649			icp->icp_evt.size = sizeof(icp->icp_evt.eu.async);
650			icp->icp_evt.eu.async.ionode =
651			    device_unit(icp->icp_dv);
652			icp->icp_evt.eu.async.service = service;
653			icp->icp_evt.eu.async.status = icp->icp_status;
654			icp->icp_evt.eu.async.info = icp->icp_info;
655			/* XXXJRT FIX THIS */
656			*(u_int32_t *) icp->icp_evt.eu.async.scsi_coord =
657			    icp->icp_info2;
658		}
659		icp_store_event(icp, GDT_ES_ASYNC, service, &icp->icp_evt);
660	}
661
662	return (0);
663}
664
665int
666icp_intr(void *cookie)
667{
668	struct icp_softc *icp;
669	struct icp_intr_ctx ctx;
670	struct icp_ccb *ic;
671
672	icp = cookie;
673
674	ctx.istatus = (*icp->icp_get_status)(icp);
675	if (!ctx.istatus) {
676		icp->icp_status = ICP_S_NO_STATUS;
677		return (0);
678	}
679
680	(*icp->icp_intr)(icp, &ctx);
681
682	icp->icp_status = ctx.cmd_status;
683	icp->icp_service = ctx.service;
684	icp->icp_info = ctx.info;
685	icp->icp_info2 = ctx.info2;
686
687	switch (ctx.istatus) {
688	case ICP_ASYNCINDEX:
689		icp_async_event(icp, ctx.service);
690		return (1);
691
692	case ICP_SPEZINDEX:
693		aprint_error_dev(icp->icp_dv, "uninitialized or unknown service (%d/%d)\n",
694		    ctx.info, ctx.info2);
695		icp->icp_evt.size = sizeof(icp->icp_evt.eu.driver);
696		icp->icp_evt.eu.driver.ionode = device_unit(icp->icp_dv);
697		icp_store_event(icp, GDT_ES_DRIVER, 4, &icp->icp_evt);
698		return (1);
699	}
700
701	if ((ctx.istatus - 2) > icp->icp_nccbs)
702		panic("icp_intr: bad command index returned");
703
704	ic = &icp->icp_ccbs[ctx.istatus - 2];
705	ic->ic_status = icp->icp_status;
706
707	if ((ic->ic_flags & IC_ALLOCED) == 0) {
708		/* XXX ICP's "iir" driver just sends an event here. */
709		panic("icp_intr: inactive CCB identified");
710	}
711
712	/*
713	 * Try to protect ourselves from the running command count already
714	 * being 0 (e.g. if a polled command times out).
715	 */
716	KDASSERT(icp->icp_running != 0);
717	if (--icp->icp_running == 0 &&
718	    (icp->icp_flags & ICP_F_WAIT_FREEZE) != 0) {
719		icp->icp_flags &= ~ICP_F_WAIT_FREEZE;
720		wakeup(&icp->icp_qfreeze);
721	}
722
723	switch (icp->icp_status) {
724	case ICP_S_BSY:
725#ifdef ICP_DEBUG
726		printf("%s: ICP_S_BSY received\n", device_xname(icp->icp_dv));
727#endif
728		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
729			SIMPLEQ_INSERT_HEAD(&icp->icp_ucmd_queue, ic, ic_chain);
730		else
731			SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_queue, ic, ic_chain);
732		break;
733
734	default:
735		ic->ic_flags |= IC_COMPLETE;
736
737		if ((ic->ic_flags & IC_WAITING) != 0)
738			wakeup(ic);
739		else if (ic->ic_intr != NULL)
740			(*ic->ic_intr)(ic);
741
742		if (ICP_HAS_WORK(icp))
743			icp_ccb_enqueue(icp, NULL);
744
745		break;
746	}
747
748	return (1);
749}
750
751struct icp_ucmd_ctx {
752	gdt_ucmd_t *iu_ucmd;
753	u_int32_t iu_cnt;
754};
755
756void
757icp_ucmd_intr(struct icp_ccb *ic)
758{
759	struct icp_softc *icp = device_private(ic->ic_dv);
760	struct icp_ucmd_ctx *iu = ic->ic_context;
761	gdt_ucmd_t *ucmd = iu->iu_ucmd;
762
763	ucmd->status = icp->icp_status;
764	ucmd->info = icp->icp_info;
765
766	if (iu->iu_cnt != 0) {
767		bus_dmamap_sync(icp->icp_dmat,
768		    icp->icp_scr_dmamap,
769		    ICP_SCRATCH_UCMD, iu->iu_cnt,
770		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
771		memcpy(ucmd->data,
772		    (char *)icp->icp_scr + ICP_SCRATCH_UCMD, iu->iu_cnt);
773	}
774
775	icp->icp_ucmd_ccb = NULL;
776
777	ic->ic_flags |= IC_COMPLETE;
778	wakeup(ic);
779}
780
781/*
782 * NOTE: We assume that it is safe to sleep here!
783 */
784int
785icp_cmd(struct icp_softc *icp, u_int8_t service, u_int16_t opcode,
786	u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
787{
788	struct icp_ioctlcmd *icmd;
789	struct icp_cachecmd *cc;
790	struct icp_rawcmd *rc;
791	int retries, rv;
792	struct icp_ccb *ic;
793
794	retries = ICP_RETRIES;
795
796	do {
797		ic = icp_ccb_alloc_wait(icp);
798		memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
799		ic->ic_cmd.cmd_opcode = htole16(opcode);
800
801		switch (service) {
802		case ICP_CACHESERVICE:
803			if (opcode == ICP_IOCTL) {
804				icmd = &ic->ic_cmd.cmd_packet.ic;
805				icmd->ic_subfunc = htole16(arg1);
806				icmd->ic_channel = htole32(arg2);
807				icmd->ic_bufsize = htole32(arg3);
808				icmd->ic_addr =
809				    htole32(icp->icp_scr_seg[0].ds_addr);
810
811				bus_dmamap_sync(icp->icp_dmat,
812				    icp->icp_scr_dmamap, 0, arg3,
813				    BUS_DMASYNC_PREWRITE |
814				    BUS_DMASYNC_PREREAD);
815			} else {
816				cc = &ic->ic_cmd.cmd_packet.cc;
817				cc->cc_deviceno = htole16(arg1);
818				cc->cc_blockno = htole32(arg2);
819			}
820			break;
821
822		case ICP_SCSIRAWSERVICE:
823			rc = &ic->ic_cmd.cmd_packet.rc;
824			rc->rc_direction = htole32(arg1);
825			rc->rc_bus = arg2;
826			rc->rc_target = arg3;
827			rc->rc_lun = arg3 >> 8;
828			break;
829		}
830
831		ic->ic_service = service;
832		ic->ic_cmdlen = sizeof(ic->ic_cmd);
833		rv = icp_ccb_poll(icp, ic, 10000);
834
835		switch (service) {
836		case ICP_CACHESERVICE:
837			if (opcode == ICP_IOCTL) {
838				bus_dmamap_sync(icp->icp_dmat,
839				    icp->icp_scr_dmamap, 0, arg3,
840				    BUS_DMASYNC_POSTWRITE |
841				    BUS_DMASYNC_POSTREAD);
842			}
843			break;
844		}
845
846		icp_ccb_free(icp, ic);
847	} while (rv != 0 && --retries > 0);
848
849	return (icp->icp_status == ICP_S_OK);
850}
851
852int
853icp_ucmd(struct icp_softc *icp, gdt_ucmd_t *ucmd)
854{
855	struct icp_ccb *ic;
856	struct icp_ucmd_ctx iu;
857	u_int32_t cnt;
858	int error;
859
860	if (ucmd->service == ICP_CACHESERVICE) {
861		if (ucmd->command.cmd_opcode == ICP_IOCTL) {
862			cnt = ucmd->command.cmd_packet.ic.ic_bufsize;
863			if (cnt > GDT_SCRATCH_SZ) {
864				aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
865				    GDT_SCRATCH_SZ, cnt);
866				return (EINVAL);
867			}
868		} else {
869			cnt = ucmd->command.cmd_packet.cc.cc_blockcnt *
870			    ICP_SECTOR_SIZE;
871			if (cnt > GDT_SCRATCH_SZ) {
872				aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
873				    GDT_SCRATCH_SZ, cnt);
874				return (EINVAL);
875			}
876		}
877	} else {
878		cnt = ucmd->command.cmd_packet.rc.rc_sdlen +
879		    ucmd->command.cmd_packet.rc.rc_sense_len;
880		if (cnt > GDT_SCRATCH_SZ) {
881			aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
882			    GDT_SCRATCH_SZ, cnt);
883			return (EINVAL);
884		}
885	}
886
887	iu.iu_ucmd = ucmd;
888	iu.iu_cnt = cnt;
889
890	ic = icp_ccb_alloc_wait(icp);
891	memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
892	ic->ic_cmd.cmd_opcode = htole16(ucmd->command.cmd_opcode);
893
894	if (ucmd->service == ICP_CACHESERVICE) {
895		if (ucmd->command.cmd_opcode == ICP_IOCTL) {
896			struct icp_ioctlcmd *icmd, *uicmd;
897
898			icmd = &ic->ic_cmd.cmd_packet.ic;
899			uicmd = &ucmd->command.cmd_packet.ic;
900
901			icmd->ic_subfunc = htole16(uicmd->ic_subfunc);
902			icmd->ic_channel = htole32(uicmd->ic_channel);
903			icmd->ic_bufsize = htole32(uicmd->ic_bufsize);
904			icmd->ic_addr =
905			    htole32(icp->icp_scr_seg[0].ds_addr +
906				    ICP_SCRATCH_UCMD);
907		} else {
908			struct icp_cachecmd *cc, *ucc;
909
910			cc = &ic->ic_cmd.cmd_packet.cc;
911			ucc = &ucmd->command.cmd_packet.cc;
912
913			cc->cc_deviceno = htole16(ucc->cc_deviceno);
914			cc->cc_blockno = htole32(ucc->cc_blockno);
915			cc->cc_blockcnt = htole32(ucc->cc_blockcnt);
916			cc->cc_addr = htole32(0xffffffffU);
917			cc->cc_nsgent = htole32(1);
918			cc->cc_sg[0].sg_addr =
919			    htole32(icp->icp_scr_seg[0].ds_addr +
920				    ICP_SCRATCH_UCMD);
921			cc->cc_sg[0].sg_len = htole32(cnt);
922		}
923	} else {
924		struct icp_rawcmd *rc, *urc;
925
926		rc = &ic->ic_cmd.cmd_packet.rc;
927		urc = &ucmd->command.cmd_packet.rc;
928
929		rc->rc_direction = htole32(urc->rc_direction);
930		rc->rc_sdata = htole32(0xffffffffU);
931		rc->rc_sdlen = htole32(urc->rc_sdlen);
932		rc->rc_clen = htole32(urc->rc_clen);
933		memcpy(rc->rc_cdb, urc->rc_cdb, sizeof(rc->rc_cdb));
934		rc->rc_target = urc->rc_target;
935		rc->rc_lun = urc->rc_lun;
936		rc->rc_bus = urc->rc_bus;
937		rc->rc_sense_len = htole32(urc->rc_sense_len);
938		rc->rc_sense_addr =
939		    htole32(icp->icp_scr_seg[0].ds_addr +
940			    ICP_SCRATCH_UCMD + urc->rc_sdlen);
941		rc->rc_nsgent = htole32(1);
942		rc->rc_sg[0].sg_addr =
943		    htole32(icp->icp_scr_seg[0].ds_addr + ICP_SCRATCH_UCMD);
944		rc->rc_sg[0].sg_len = htole32(cnt - urc->rc_sense_len);
945	}
946
947	ic->ic_service = ucmd->service;
948	ic->ic_cmdlen = sizeof(ic->ic_cmd);
949	ic->ic_context = &iu;
950
951	/*
952	 * XXX What units are ucmd->timeout in?  Until we know, we
953	 * XXX just pull a number out of thin air.
954	 */
955	if (__predict_false((error = icp_ccb_wait_user(icp, ic, 30000)) != 0))
956		aprint_error_dev(icp->icp_dv, "error %d waiting for ucmd to complete\n",
957		    error);
958
959	/* icp_ucmd_intr() has updated ucmd. */
960	icp_ccb_free(icp, ic);
961
962	return (error);
963}
964
965struct icp_ccb *
966icp_ccb_alloc(struct icp_softc *icp)
967{
968	struct icp_ccb *ic;
969	int s;
970
971	s = splbio();
972	if (__predict_false((ic =
973			     SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL)) {
974		splx(s);
975		return (NULL);
976	}
977	SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
978	splx(s);
979
980	ic->ic_flags = IC_ALLOCED;
981	return (ic);
982}
983
984struct icp_ccb *
985icp_ccb_alloc_wait(struct icp_softc *icp)
986{
987	struct icp_ccb *ic;
988	int s;
989
990	s = splbio();
991	while ((ic = SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL) {
992		icp->icp_flags |= ICP_F_WAIT_CCB;
993		(void) tsleep(&icp->icp_ccb_freelist, PRIBIO, "icpccb", 0);
994	}
995	SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
996	splx(s);
997
998	ic->ic_flags = IC_ALLOCED;
999	return (ic);
1000}
1001
1002void
1003icp_ccb_free(struct icp_softc *icp, struct icp_ccb *ic)
1004{
1005	int s;
1006
1007	s = splbio();
1008	ic->ic_flags = 0;
1009	ic->ic_intr = NULL;
1010	SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_freelist, ic, ic_chain);
1011	if (__predict_false((icp->icp_flags & ICP_F_WAIT_CCB) != 0)) {
1012		icp->icp_flags &= ~ICP_F_WAIT_CCB;
1013		wakeup(&icp->icp_ccb_freelist);
1014	}
1015	splx(s);
1016}
1017
1018void
1019icp_ccb_enqueue(struct icp_softc *icp, struct icp_ccb *ic)
1020{
1021	int s;
1022
1023	s = splbio();
1024
1025	if (ic != NULL) {
1026		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1027			SIMPLEQ_INSERT_TAIL(&icp->icp_ucmd_queue, ic, ic_chain);
1028		else
1029			SIMPLEQ_INSERT_TAIL(&icp->icp_ccb_queue, ic, ic_chain);
1030	}
1031
1032	for (; icp->icp_qfreeze == 0;) {
1033		if (__predict_false((ic =
1034			    SIMPLEQ_FIRST(&icp->icp_ucmd_queue)) != NULL)) {
1035			struct icp_ucmd_ctx *iu = ic->ic_context;
1036			gdt_ucmd_t *ucmd = iu->iu_ucmd;
1037
1038			/*
1039			 * All user-generated commands share the same
1040			 * scratch space, so if one is already running,
1041			 * we have to stall the command queue.
1042			 */
1043			if (icp->icp_ucmd_ccb != NULL)
1044				break;
1045			if ((*icp->icp_test_busy)(icp))
1046				break;
1047			icp->icp_ucmd_ccb = ic;
1048
1049			if (iu->iu_cnt != 0) {
1050				memcpy((char *)icp->icp_scr + ICP_SCRATCH_UCMD,
1051				    ucmd->data, iu->iu_cnt);
1052				bus_dmamap_sync(icp->icp_dmat,
1053				    icp->icp_scr_dmamap,
1054				    ICP_SCRATCH_UCMD, iu->iu_cnt,
1055				    BUS_DMASYNC_PREREAD |
1056				    BUS_DMASYNC_PREWRITE);
1057			}
1058		} else if (__predict_true((ic =
1059				SIMPLEQ_FIRST(&icp->icp_ccb_queue)) != NULL)) {
1060			if ((*icp->icp_test_busy)(icp))
1061				break;
1062		} else {
1063			/* no command found */
1064			break;
1065		}
1066		icp_ccb_submit(icp, ic);
1067		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1068			SIMPLEQ_REMOVE_HEAD(&icp->icp_ucmd_queue, ic_chain);
1069		else
1070			SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_queue, ic_chain);
1071	}
1072
1073	splx(s);
1074}
1075
1076int
1077icp_ccb_map(struct icp_softc *icp, struct icp_ccb *ic, void *data, int size,
1078	    int dir)
1079{
1080	struct icp_sg *sg;
1081	int nsegs, i, rv;
1082	bus_dmamap_t xfer;
1083
1084	xfer = ic->ic_xfer_map;
1085
1086	rv = bus_dmamap_load(icp->icp_dmat, xfer, data, size, NULL,
1087	    BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1088	    ((dir & IC_XFER_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
1089	if (rv != 0)
1090		return (rv);
1091
1092	nsegs = xfer->dm_nsegs;
1093	ic->ic_xfer_size = size;
1094	ic->ic_nsgent = nsegs;
1095	ic->ic_flags |= dir;
1096	sg = ic->ic_sg;
1097
1098	if (sg != NULL) {
1099		for (i = 0; i < nsegs; i++, sg++) {
1100			sg->sg_addr = htole32(xfer->dm_segs[i].ds_addr);
1101			sg->sg_len = htole32(xfer->dm_segs[i].ds_len);
1102		}
1103	} else if (nsegs > 1)
1104		panic("icp_ccb_map: no SG list specified, but nsegs > 1");
1105
1106	if ((dir & IC_XFER_OUT) != 0)
1107		i = BUS_DMASYNC_PREWRITE;
1108	else /* if ((dir & IC_XFER_IN) != 0) */
1109		i = BUS_DMASYNC_PREREAD;
1110
1111	bus_dmamap_sync(icp->icp_dmat, xfer, 0, ic->ic_xfer_size, i);
1112	return (0);
1113}
1114
1115void
1116icp_ccb_unmap(struct icp_softc *icp, struct icp_ccb *ic)
1117{
1118	int i;
1119
1120	if ((ic->ic_flags & IC_XFER_OUT) != 0)
1121		i = BUS_DMASYNC_POSTWRITE;
1122	else /* if ((ic->ic_flags & IC_XFER_IN) != 0) */
1123		i = BUS_DMASYNC_POSTREAD;
1124
1125	bus_dmamap_sync(icp->icp_dmat, ic->ic_xfer_map, 0, ic->ic_xfer_size, i);
1126	bus_dmamap_unload(icp->icp_dmat, ic->ic_xfer_map);
1127}
1128
1129int
1130icp_ccb_poll(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1131{
1132	int s, rv;
1133
1134	s = splbio();
1135
1136	for (timo = ICP_BUSY_WAIT_MS * 100; timo != 0; timo--) {
1137		if (!(*icp->icp_test_busy)(icp))
1138			break;
1139		DELAY(10);
1140	}
1141	if (timo == 0) {
1142		printf("%s: submit: busy\n", device_xname(icp->icp_dv));
1143		return (EAGAIN);
1144	}
1145
1146	icp_ccb_submit(icp, ic);
1147
1148	if (cold) {
1149		for (timo *= 10; timo != 0; timo--) {
1150			DELAY(100);
1151			icp_intr(icp);
1152			if ((ic->ic_flags & IC_COMPLETE) != 0)
1153				break;
1154		}
1155	} else {
1156		ic->ic_flags |= IC_WAITING;
1157		while ((ic->ic_flags & IC_COMPLETE) == 0) {
1158			if ((rv = tsleep(ic, PRIBIO, "icpwccb",
1159					 mstohz(timo))) != 0) {
1160				timo = 0;
1161				break;
1162			}
1163		}
1164	}
1165
1166	if (timo != 0) {
1167		if (ic->ic_status != ICP_S_OK) {
1168#ifdef ICP_DEBUG
1169			printf("%s: request failed; status=0x%04x\n",
1170			    device_xname(icp->icp_dv), ic->ic_status);
1171#endif
1172			rv = EIO;
1173		} else
1174			rv = 0;
1175	} else {
1176		aprint_error_dev(icp->icp_dv, "command timed out\n");
1177		rv = EIO;
1178	}
1179
1180	while ((*icp->icp_test_busy)(icp) != 0)
1181		DELAY(10);
1182
1183	splx(s);
1184
1185	return (rv);
1186}
1187
1188int
1189icp_ccb_wait(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1190{
1191	int s, rv;
1192
1193	ic->ic_flags |= IC_WAITING;
1194
1195	s = splbio();
1196	icp_ccb_enqueue(icp, ic);
1197	while ((ic->ic_flags & IC_COMPLETE) == 0) {
1198		if ((rv = tsleep(ic, PRIBIO, "icpwccb", mstohz(timo))) != 0) {
1199			splx(s);
1200			return (rv);
1201		}
1202	}
1203	splx(s);
1204
1205	if (ic->ic_status != ICP_S_OK) {
1206		aprint_error_dev(icp->icp_dv, "command failed; status=%x\n",
1207		    ic->ic_status);
1208		return (EIO);
1209	}
1210
1211	return (0);
1212}
1213
1214int
1215icp_ccb_wait_user(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1216{
1217	int s, rv;
1218
1219	ic->ic_dv = icp->icp_dv;
1220	ic->ic_intr = icp_ucmd_intr;
1221	ic->ic_flags |= IC_UCMD;
1222
1223	s = splbio();
1224	icp_ccb_enqueue(icp, ic);
1225	while ((ic->ic_flags & IC_COMPLETE) == 0) {
1226		if ((rv = tsleep(ic, PRIBIO, "icpwuccb", mstohz(timo))) != 0) {
1227			splx(s);
1228			return (rv);
1229		}
1230	}
1231	splx(s);
1232
1233	return (0);
1234}
1235
1236void
1237icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic)
1238{
1239
1240	ic->ic_cmdlen = (ic->ic_cmdlen + 3) & ~3;
1241
1242	(*icp->icp_set_sema0)(icp);
1243	DELAY(10);
1244
1245	ic->ic_cmd.cmd_boardnode = htole32(ICP_LOCALBOARD);
1246	ic->ic_cmd.cmd_cmdindex = htole32(ic->ic_ident);
1247
1248	icp->icp_running++;
1249
1250	(*icp->icp_copy_cmd)(icp, ic);
1251	(*icp->icp_release_event)(icp, ic);
1252}
1253
1254int
1255icp_freeze(struct icp_softc *icp)
1256{
1257	int s, error = 0;
1258
1259	s = splbio();
1260	if (icp->icp_qfreeze++ == 0) {
1261		while (icp->icp_running != 0) {
1262			icp->icp_flags |= ICP_F_WAIT_FREEZE;
1263			error = tsleep(&icp->icp_qfreeze, PRIBIO|PCATCH,
1264			    "icpqfrz", 0);
1265			if (error != 0 && --icp->icp_qfreeze == 0 &&
1266			    ICP_HAS_WORK(icp)) {
1267				icp_ccb_enqueue(icp, NULL);
1268				break;
1269			}
1270		}
1271	}
1272	splx(s);
1273
1274	return (error);
1275}
1276
1277void
1278icp_unfreeze(struct icp_softc *icp)
1279{
1280	int s;
1281
1282	s = splbio();
1283	KDASSERT(icp->icp_qfreeze != 0);
1284	if (--icp->icp_qfreeze == 0 && ICP_HAS_WORK(icp))
1285		icp_ccb_enqueue(icp, NULL);
1286	splx(s);
1287}
1288
1289/* XXX Global - should be per-controller? XXX */
1290static gdt_evt_str icp_event_buffer[ICP_MAX_EVENTS];
1291static int icp_event_oldidx;
1292static int icp_event_lastidx;
1293
1294gdt_evt_str *
1295icp_store_event(struct icp_softc *icp, u_int16_t source, u_int16_t idx,
1296    gdt_evt_data *evt)
1297{
1298	gdt_evt_str *e;
1299
1300	/* no source == no event */
1301	if (source == 0)
1302		return (NULL);
1303
1304	e = &icp_event_buffer[icp_event_lastidx];
1305	if (e->event_source == source && e->event_idx == idx &&
1306	    ((evt->size != 0 && e->event_data.size != 0 &&
1307	      memcmp(&e->event_data.eu, &evt->eu, evt->size) == 0) ||
1308	     (evt->size == 0 && e->event_data.size == 0 &&
1309	      strcmp((char *) e->event_data.event_string,
1310	      	     (char *) evt->event_string) == 0))) {
1311		e->last_stamp = time_second;
1312		e->same_count++;
1313	} else {
1314		if (icp_event_buffer[icp_event_lastidx].event_source != 0) {
1315			icp_event_lastidx++;
1316			if (icp_event_lastidx == ICP_MAX_EVENTS)
1317				icp_event_lastidx = 0;
1318			if (icp_event_lastidx == icp_event_oldidx) {
1319				icp_event_oldidx++;
1320				if (icp_event_oldidx == ICP_MAX_EVENTS)
1321					icp_event_oldidx = 0;
1322			}
1323		}
1324		e = &icp_event_buffer[icp_event_lastidx];
1325		e->event_source = source;
1326		e->event_idx = idx;
1327		e->first_stamp = e->last_stamp = time_second;
1328		e->same_count = 1;
1329		e->event_data = *evt;
1330		e->application = 0;
1331	}
1332	return (e);
1333}
1334
1335int
1336icp_read_event(struct icp_softc *icp, int handle, gdt_evt_str *estr)
1337{
1338	gdt_evt_str *e;
1339	int eindex, s;
1340
1341	s = splbio();
1342
1343	if (handle == -1)
1344		eindex = icp_event_oldidx;
1345	else
1346		eindex = handle;
1347
1348	estr->event_source = 0;
1349
1350	if (eindex < 0 || eindex >= ICP_MAX_EVENTS) {
1351		splx(s);
1352		return (eindex);
1353	}
1354
1355	e = &icp_event_buffer[eindex];
1356	if (e->event_source != 0) {
1357		if (eindex != icp_event_lastidx) {
1358			eindex++;
1359			if (eindex == ICP_MAX_EVENTS)
1360				eindex = 0;
1361		} else
1362			eindex = -1;
1363		memcpy(estr, e, sizeof(gdt_evt_str));
1364	}
1365
1366	splx(s);
1367
1368	return (eindex);
1369}
1370
1371void
1372icp_readapp_event(struct icp_softc *icp, u_int8_t application,
1373    gdt_evt_str *estr)
1374{
1375	gdt_evt_str *e;
1376	int found = 0, eindex, s;
1377
1378	s = splbio();
1379
1380	eindex = icp_event_oldidx;
1381	for (;;) {
1382		e = &icp_event_buffer[eindex];
1383		if (e->event_source == 0)
1384			break;
1385		if ((e->application & application) == 0) {
1386			e->application |= application;
1387			found = 1;
1388			break;
1389		}
1390		if (eindex == icp_event_lastidx)
1391			break;
1392		eindex++;
1393		if (eindex == ICP_MAX_EVENTS)
1394			eindex = 0;
1395	}
1396	if (found)
1397		memcpy(estr, e, sizeof(gdt_evt_str));
1398	else
1399		estr->event_source = 0;
1400
1401	splx(s);
1402}
1403
1404void
1405icp_clear_events(struct icp_softc *icp)
1406{
1407	int s;
1408
1409	s = splbio();
1410	icp_event_oldidx = icp_event_lastidx = 0;
1411	memset(icp_event_buffer, 0, sizeof(icp_event_buffer));
1412	splx(s);
1413}
1414