1/*	$NetBSD$	*/
2
3/*-
4 * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Support for I2O IOPs (intelligent I/O processors).
34 */
35
36#include <sys/cdefs.h>
37__KERNEL_RCSID(0, "$NetBSD$");
38
39#include "iop.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/kernel.h>
44#include <sys/device.h>
45#include <sys/queue.h>
46#include <sys/proc.h>
47#include <sys/malloc.h>
48#include <sys/ioctl.h>
49#include <sys/endian.h>
50#include <sys/conf.h>
51#include <sys/kthread.h>
52#include <sys/kauth.h>
53#include <sys/bus.h>
54
55#include <dev/i2o/i2o.h>
56#include <dev/i2o/iopio.h>
57#include <dev/i2o/iopreg.h>
58#include <dev/i2o/iopvar.h>
59
60#include "locators.h"
61
62#define POLL(ms, cond)				\
63do {						\
64	int xi;					\
65	for (xi = (ms) * 10; xi; xi--) {	\
66		if (cond)			\
67			break;			\
68		DELAY(100);			\
69	}					\
70} while (/* CONSTCOND */0);
71
72#ifdef I2ODEBUG
73#define DPRINTF(x)	printf x
74#else
75#define	DPRINTF(x)
76#endif
77
78#define IOP_ICTXHASH_NBUCKETS	16
79#define	IOP_ICTXHASH(ictx)	(&iop_ictxhashtbl[(ictx) & iop_ictxhash])
80
81#define	IOP_MAX_SEGS	(((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
82
83#define	IOP_TCTX_SHIFT	12
84#define	IOP_TCTX_MASK	((1 << IOP_TCTX_SHIFT) - 1)
85
86static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
87static u_long	iop_ictxhash;
88static void	*iop_sdh;
89static struct	i2o_systab *iop_systab;
90static int	iop_systab_size;
91
92extern struct cfdriver iop_cd;
93
94dev_type_open(iopopen);
95dev_type_close(iopclose);
96dev_type_ioctl(iopioctl);
97
98const struct cdevsw iop_cdevsw = {
99	iopopen, iopclose, noread, nowrite, iopioctl,
100	nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
101};
102
103#define	IC_CONFIGURE	0x01
104#define	IC_PRIORITY	0x02
105
106static struct iop_class {
107	u_short	ic_class;
108	u_short	ic_flags;
109	const char *ic_caption;
110} const iop_class[] = {
111	{
112		I2O_CLASS_EXECUTIVE,
113		0,
114		"executive"
115	},
116	{
117		I2O_CLASS_DDM,
118		0,
119		"device driver module"
120	},
121	{
122		I2O_CLASS_RANDOM_BLOCK_STORAGE,
123		IC_CONFIGURE | IC_PRIORITY,
124		"random block storage"
125	},
126	{
127		I2O_CLASS_SEQUENTIAL_STORAGE,
128		IC_CONFIGURE | IC_PRIORITY,
129		"sequential storage"
130	},
131	{
132		I2O_CLASS_LAN,
133		IC_CONFIGURE | IC_PRIORITY,
134		"LAN port"
135	},
136	{
137		I2O_CLASS_WAN,
138		IC_CONFIGURE | IC_PRIORITY,
139		"WAN port"
140	},
141	{
142		I2O_CLASS_FIBRE_CHANNEL_PORT,
143		IC_CONFIGURE,
144		"fibrechannel port"
145	},
146	{
147		I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
148		0,
149		"fibrechannel peripheral"
150	},
151 	{
152 		I2O_CLASS_SCSI_PERIPHERAL,
153 		0,
154 		"SCSI peripheral"
155 	},
156	{
157		I2O_CLASS_ATE_PORT,
158		IC_CONFIGURE,
159		"ATE port"
160	},
161	{
162		I2O_CLASS_ATE_PERIPHERAL,
163		0,
164		"ATE peripheral"
165	},
166	{
167		I2O_CLASS_FLOPPY_CONTROLLER,
168		IC_CONFIGURE,
169		"floppy controller"
170	},
171	{
172		I2O_CLASS_FLOPPY_DEVICE,
173		0,
174		"floppy device"
175	},
176	{
177		I2O_CLASS_BUS_ADAPTER_PORT,
178		IC_CONFIGURE,
179		"bus adapter port"
180	},
181};
182
183static const char * const iop_status[] = {
184	"success",
185	"abort (dirty)",
186	"abort (no data transfer)",
187	"abort (partial transfer)",
188	"error (dirty)",
189	"error (no data transfer)",
190	"error (partial transfer)",
191	"undefined error code",
192	"process abort (dirty)",
193	"process abort (no data transfer)",
194	"process abort (partial transfer)",
195	"transaction error",
196};
197
198static inline u_int32_t	iop_inl(struct iop_softc *, int);
199static inline void	iop_outl(struct iop_softc *, int, u_int32_t);
200
201static inline u_int32_t	iop_inl_msg(struct iop_softc *, int);
202static inline void	iop_outl_msg(struct iop_softc *, int, u_int32_t);
203
204static void	iop_config_interrupts(device_t);
205static void	iop_configure_devices(struct iop_softc *, int, int);
206static void	iop_devinfo(int, char *, size_t);
207static int	iop_print(void *, const char *);
208static void	iop_shutdown(void *);
209
210static void	iop_adjqparam(struct iop_softc *, int);
211static int	iop_handle_reply(struct iop_softc *, u_int32_t);
212static int	iop_hrt_get(struct iop_softc *);
213static int	iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
214static void	iop_intr_event(device_t, struct iop_msg *, void *);
215static int	iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
216			     u_int32_t);
217static void	iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
218static void	iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
219static int	iop_ofifo_init(struct iop_softc *);
220static int	iop_passthrough(struct iop_softc *, struct ioppt *,
221				struct proc *);
222static void	iop_reconf_thread(void *);
223static void	iop_release_mfa(struct iop_softc *, u_int32_t);
224static int	iop_reset(struct iop_softc *);
225static int	iop_sys_enable(struct iop_softc *);
226static int	iop_systab_set(struct iop_softc *);
227static void	iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
228
229#ifdef I2ODEBUG
230static void	iop_reply_print(struct iop_softc *, struct i2o_reply *);
231#endif
232
233static inline u_int32_t
234iop_inl(struct iop_softc *sc, int off)
235{
236
237	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
238	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
239	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
240}
241
242static inline void
243iop_outl(struct iop_softc *sc, int off, u_int32_t val)
244{
245
246	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
247	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
248	    BUS_SPACE_BARRIER_WRITE);
249}
250
251static inline u_int32_t
252iop_inl_msg(struct iop_softc *sc, int off)
253{
254
255	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
256	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
257	return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
258}
259
260static inline void
261iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
262{
263
264	bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
265	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
266	    BUS_SPACE_BARRIER_WRITE);
267}
268
269/*
270 * Initialise the IOP and our interface.
271 */
272void
273iop_init(struct iop_softc *sc, const char *intrstr)
274{
275	struct iop_msg *im;
276	int rv, i, j, state, nsegs;
277	u_int32_t mask;
278	char ident[64];
279
280	state = 0;
281
282	printf("I2O adapter");
283
284	mutex_init(&sc->sc_intrlock, MUTEX_DEFAULT, IPL_VM);
285	mutex_init(&sc->sc_conflock, MUTEX_DEFAULT, IPL_NONE);
286	cv_init(&sc->sc_confcv, "iopconf");
287
288	if (iop_ictxhashtbl == NULL) {
289		iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
290		    true, &iop_ictxhash);
291	}
292
293	/* Disable interrupts at the IOP. */
294	mask = iop_inl(sc, IOP_REG_INTR_MASK);
295	iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
296
297	/* Allocate a scratch DMA map for small miscellaneous shared data. */
298	if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
299	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
300		aprint_error_dev(&sc->sc_dv, "cannot create scratch dmamap\n");
301		return;
302	}
303
304	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
305	    sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
306		aprint_error_dev(&sc->sc_dv, "cannot alloc scratch dmamem\n");
307		goto bail_out;
308	}
309	state++;
310
311	if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
312	    &sc->sc_scr, 0)) {
313		aprint_error_dev(&sc->sc_dv, "cannot map scratch dmamem\n");
314		goto bail_out;
315	}
316	state++;
317
318	if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
319	    PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
320		aprint_error_dev(&sc->sc_dv, "cannot load scratch dmamap\n");
321		goto bail_out;
322	}
323	state++;
324
325#ifdef I2ODEBUG
326	/* So that our debug checks don't choke. */
327	sc->sc_framesize = 128;
328#endif
329
330	/* Avoid syncing the reply map until it's set up. */
331	sc->sc_curib = 0x123;
332
333	/* Reset the adapter and request status. */
334 	if ((rv = iop_reset(sc)) != 0) {
335 		aprint_error_dev(&sc->sc_dv, "not responding (reset)\n");
336		goto bail_out;
337 	}
338
339 	if ((rv = iop_status_get(sc, 1)) != 0) {
340		aprint_error_dev(&sc->sc_dv, "not responding (get status)\n");
341		goto bail_out;
342 	}
343
344	sc->sc_flags |= IOP_HAVESTATUS;
345	iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
346	    ident, sizeof(ident));
347	printf(" <%s>\n", ident);
348
349#ifdef I2ODEBUG
350	printf("%s: orgid=0x%04x version=%d\n",
351	    device_xname(&sc->sc_dv),
352	    le16toh(sc->sc_status.orgid),
353	    (le32toh(sc->sc_status.segnumber) >> 12) & 15);
354	printf("%s: type want have cbase\n", device_xname(&sc->sc_dv));
355	printf("%s: mem  %04x %04x %08x\n", device_xname(&sc->sc_dv),
356	    le32toh(sc->sc_status.desiredprivmemsize),
357	    le32toh(sc->sc_status.currentprivmemsize),
358	    le32toh(sc->sc_status.currentprivmembase));
359	printf("%s: i/o  %04x %04x %08x\n", device_xname(&sc->sc_dv),
360	    le32toh(sc->sc_status.desiredpriviosize),
361	    le32toh(sc->sc_status.currentpriviosize),
362	    le32toh(sc->sc_status.currentpriviobase));
363#endif
364
365	sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
366	if (sc->sc_maxob > IOP_MAX_OUTBOUND)
367		sc->sc_maxob = IOP_MAX_OUTBOUND;
368	sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
369	if (sc->sc_maxib > IOP_MAX_INBOUND)
370		sc->sc_maxib = IOP_MAX_INBOUND;
371	sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
372	if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
373		sc->sc_framesize = IOP_MAX_MSG_SIZE;
374
375#if defined(I2ODEBUG) || defined(DIAGNOSTIC)
376	if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
377		aprint_error_dev(&sc->sc_dv, "frame size too small (%d)\n",
378		    sc->sc_framesize);
379		goto bail_out;
380	}
381#endif
382
383	/* Allocate message wrappers. */
384	im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
385	if (im == NULL) {
386		aprint_error_dev(&sc->sc_dv, "memory allocation failure\n");
387		goto bail_out;
388	}
389	state++;
390	sc->sc_ims = im;
391	SLIST_INIT(&sc->sc_im_freelist);
392
393	for (i = 0; i < sc->sc_maxib; i++, im++) {
394		rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
395		    IOP_MAX_SEGS, IOP_MAX_XFER, 0,
396		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
397		    &im->im_xfer[0].ix_map);
398		if (rv != 0) {
399			aprint_error_dev(&sc->sc_dv, "couldn't create dmamap (%d)", rv);
400			goto bail_out3;
401		}
402
403		im->im_tctx = i;
404		SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
405		cv_init(&im->im_cv, "iopmsg");
406	}
407
408	/* Initialise the IOP's outbound FIFO. */
409	if (iop_ofifo_init(sc) != 0) {
410		aprint_error_dev(&sc->sc_dv, "unable to init oubound FIFO\n");
411		goto bail_out3;
412	}
413
414	/*
415 	 * Defer further configuration until (a) interrupts are working and
416 	 * (b) we have enough information to build the system table.
417 	 */
418	config_interrupts((device_t)sc, iop_config_interrupts);
419
420	/* Configure shutdown hook before we start any device activity. */
421	if (iop_sdh == NULL)
422		iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
423
424	/* Ensure interrupts are enabled at the IOP. */
425	mask = iop_inl(sc, IOP_REG_INTR_MASK);
426	iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
427
428	if (intrstr != NULL)
429		printf("%s: interrupting at %s\n", device_xname(&sc->sc_dv),
430		    intrstr);
431
432#ifdef I2ODEBUG
433	printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
434	    device_xname(&sc->sc_dv), sc->sc_maxib,
435	    le32toh(sc->sc_status.maxinboundmframes),
436	    sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
437#endif
438
439	return;
440
441 bail_out3:
442 	if (state > 3) {
443		for (j = 0; j < i; j++)
444			bus_dmamap_destroy(sc->sc_dmat,
445			    sc->sc_ims[j].im_xfer[0].ix_map);
446		free(sc->sc_ims, M_DEVBUF);
447	}
448 bail_out:
449	if (state > 2)
450		bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
451	if (state > 1)
452		bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
453	if (state > 0)
454		bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
455	bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
456}
457
458/*
459 * Perform autoconfiguration tasks.
460 */
461static void
462iop_config_interrupts(device_t self)
463{
464	struct iop_attach_args ia;
465	struct iop_softc *sc, *iop;
466	struct i2o_systab_entry *ste;
467	int rv, i, niop;
468	int locs[IOPCF_NLOCS];
469
470	sc = device_private(self);
471	mutex_enter(&sc->sc_conflock);
472
473	LIST_INIT(&sc->sc_iilist);
474
475	printf("%s: configuring...\n", device_xname(&sc->sc_dv));
476
477	if (iop_hrt_get(sc) != 0) {
478		printf("%s: unable to retrieve HRT\n", device_xname(&sc->sc_dv));
479		mutex_exit(&sc->sc_conflock);
480		return;
481	}
482
483	/*
484 	 * Build the system table.
485 	 */
486	if (iop_systab == NULL) {
487		for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
488			if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
489				continue;
490			if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
491				continue;
492			if (iop_status_get(iop, 1) != 0) {
493				aprint_error_dev(&sc->sc_dv, "unable to retrieve status\n");
494				iop->sc_flags &= ~IOP_HAVESTATUS;
495				continue;
496			}
497			niop++;
498		}
499		if (niop == 0) {
500			mutex_exit(&sc->sc_conflock);
501			return;
502		}
503
504		i = sizeof(struct i2o_systab_entry) * (niop - 1) +
505		    sizeof(struct i2o_systab);
506		iop_systab_size = i;
507		iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
508
509		iop_systab->numentries = niop;
510		iop_systab->version = I2O_VERSION_11;
511
512		for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
513			if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
514				continue;
515			if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
516				continue;
517
518			ste->orgid = iop->sc_status.orgid;
519			ste->iopid = device_unit(&iop->sc_dv) + 2;
520			ste->segnumber =
521			    htole32(le32toh(iop->sc_status.segnumber) & ~4095);
522			ste->iopcaps = iop->sc_status.iopcaps;
523			ste->inboundmsgframesize =
524			    iop->sc_status.inboundmframesize;
525			ste->inboundmsgportaddresslow =
526			    htole32(iop->sc_memaddr + IOP_REG_IFIFO);
527			ste++;
528		}
529	}
530
531	/*
532	 * Post the system table to the IOP and bring it to the OPERATIONAL
533	 * state.
534	 */
535	if (iop_systab_set(sc) != 0) {
536		aprint_error_dev(&sc->sc_dv, "unable to set system table\n");
537		mutex_exit(&sc->sc_conflock);
538		return;
539	}
540	if (iop_sys_enable(sc) != 0) {
541		aprint_error_dev(&sc->sc_dv, "unable to enable system\n");
542		mutex_exit(&sc->sc_conflock);
543		return;
544	}
545
546	/*
547	 * Set up an event handler for this IOP.
548	 */
549	sc->sc_eventii.ii_dv = self;
550	sc->sc_eventii.ii_intr = iop_intr_event;
551	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
552	sc->sc_eventii.ii_tid = I2O_TID_IOP;
553	iop_initiator_register(sc, &sc->sc_eventii);
554
555	rv = iop_util_eventreg(sc, &sc->sc_eventii,
556	    I2O_EVENT_EXEC_RESOURCE_LIMITS |
557	    I2O_EVENT_EXEC_CONNECTION_FAIL |
558	    I2O_EVENT_EXEC_ADAPTER_FAULT |
559	    I2O_EVENT_EXEC_POWER_FAIL |
560	    I2O_EVENT_EXEC_RESET_PENDING |
561	    I2O_EVENT_EXEC_RESET_IMMINENT |
562	    I2O_EVENT_EXEC_HARDWARE_FAIL |
563	    I2O_EVENT_EXEC_XCT_CHANGE |
564	    I2O_EVENT_EXEC_DDM_AVAILIBILITY |
565	    I2O_EVENT_GEN_DEVICE_RESET |
566	    I2O_EVENT_GEN_STATE_CHANGE |
567	    I2O_EVENT_GEN_GENERAL_WARNING);
568	if (rv != 0) {
569		aprint_error_dev(&sc->sc_dv, "unable to register for events");
570		mutex_exit(&sc->sc_conflock);
571		return;
572	}
573
574	/*
575	 * Attempt to match and attach a product-specific extension.
576	 */
577	ia.ia_class = I2O_CLASS_ANY;
578	ia.ia_tid = I2O_TID_IOP;
579	locs[IOPCF_TID] = I2O_TID_IOP;
580	config_found_sm_loc(self, "iop", locs, &ia, iop_print,
581		config_stdsubmatch);
582
583	/*
584	 * Start device configuration.
585	 */
586	if ((rv = iop_reconfigure(sc, 0)) == -1)
587		aprint_error_dev(&sc->sc_dv, "configure failed (%d)\n", rv);
588
589
590	sc->sc_flags |= IOP_ONLINE;
591	rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc,
592	    &sc->sc_reconf_thread, "%s", device_xname(&sc->sc_dv));
593	mutex_exit(&sc->sc_conflock);
594 	if (rv != 0) {
595		aprint_error_dev(&sc->sc_dv, "unable to create reconfiguration thread (%d)", rv);
596 		return;
597 	}
598}
599
600/*
601 * Reconfiguration thread; listens for LCT change notification, and
602 * initiates re-configuration if received.
603 */
604static void
605iop_reconf_thread(void *cookie)
606{
607	struct iop_softc *sc;
608	struct lwp *l;
609	struct i2o_lct lct;
610	u_int32_t chgind;
611	int rv;
612
613	sc = cookie;
614	chgind = sc->sc_chgind + 1;
615	l = curlwp;
616
617	for (;;) {
618		DPRINTF(("%s: async reconfig: requested 0x%08x\n",
619		    device_xname(&sc->sc_dv), chgind));
620
621		rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
622
623		DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
624		    device_xname(&sc->sc_dv), le32toh(lct.changeindicator), rv));
625
626		mutex_enter(&sc->sc_conflock);
627		if (rv == 0) {
628			iop_reconfigure(sc, le32toh(lct.changeindicator));
629			chgind = sc->sc_chgind + 1;
630		}
631		(void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
632		mutex_exit(&sc->sc_conflock);
633	}
634}
635
636/*
637 * Reconfigure: find new and removed devices.
638 */
639int
640iop_reconfigure(struct iop_softc *sc, u_int chgind)
641{
642	struct iop_msg *im;
643	struct i2o_hba_bus_scan mf;
644	struct i2o_lct_entry *le;
645	struct iop_initiator *ii, *nextii;
646	int rv, tid, i;
647
648	KASSERT(mutex_owned(&sc->sc_conflock));
649
650	/*
651	 * If the reconfiguration request isn't the result of LCT change
652	 * notification, then be more thorough: ask all bus ports to scan
653	 * their busses.  Wait up to 5 minutes for each bus port to complete
654	 * the request.
655	 */
656	if (chgind == 0) {
657		if ((rv = iop_lct_get(sc)) != 0) {
658			DPRINTF(("iop_reconfigure: unable to read LCT\n"));
659			return (rv);
660		}
661
662		le = sc->sc_lct->entry;
663		for (i = 0; i < sc->sc_nlctent; i++, le++) {
664			if ((le16toh(le->classid) & 4095) !=
665			    I2O_CLASS_BUS_ADAPTER_PORT)
666				continue;
667			tid = le16toh(le->localtid) & 4095;
668
669			im = iop_msg_alloc(sc, IM_WAIT);
670
671			mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
672			mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
673			mf.msgictx = IOP_ICTX;
674			mf.msgtctx = im->im_tctx;
675
676			DPRINTF(("%s: scanning bus %d\n", device_xname(&sc->sc_dv),
677			    tid));
678
679			rv = iop_msg_post(sc, im, &mf, 5*60*1000);
680			iop_msg_free(sc, im);
681#ifdef I2ODEBUG
682			if (rv != 0)
683				aprint_error_dev(&sc->sc_dv, "bus scan failed\n");
684#endif
685		}
686	} else if (chgind <= sc->sc_chgind) {
687		DPRINTF(("%s: LCT unchanged (async)\n", device_xname(&sc->sc_dv)));
688		return (0);
689	}
690
691	/* Re-read the LCT and determine if it has changed. */
692	if ((rv = iop_lct_get(sc)) != 0) {
693		DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
694		return (rv);
695	}
696	DPRINTF(("%s: %d LCT entries\n", device_xname(&sc->sc_dv), sc->sc_nlctent));
697
698	chgind = le32toh(sc->sc_lct->changeindicator);
699	if (chgind == sc->sc_chgind) {
700		DPRINTF(("%s: LCT unchanged\n", device_xname(&sc->sc_dv)));
701		return (0);
702	}
703	DPRINTF(("%s: LCT changed\n", device_xname(&sc->sc_dv)));
704	sc->sc_chgind = chgind;
705
706	if (sc->sc_tidmap != NULL)
707		free(sc->sc_tidmap, M_DEVBUF);
708	sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
709	    M_DEVBUF, M_NOWAIT|M_ZERO);
710
711	/* Allow 1 queued command per device while we're configuring. */
712	iop_adjqparam(sc, 1);
713
714	/*
715	 * Match and attach child devices.  We configure high-level devices
716	 * first so that any claims will propagate throughout the LCT,
717	 * hopefully masking off aliased devices as a result.
718	 *
719	 * Re-reading the LCT at this point is a little dangerous, but we'll
720	 * trust the IOP (and the operator) to behave itself...
721	 */
722	iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
723	    IC_CONFIGURE | IC_PRIORITY);
724	if ((rv = iop_lct_get(sc)) != 0) {
725		DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
726	}
727	iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
728	    IC_CONFIGURE);
729
730	for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
731		nextii = LIST_NEXT(ii, ii_list);
732
733		/* Detach devices that were configured, but are now gone. */
734		for (i = 0; i < sc->sc_nlctent; i++)
735			if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
736				break;
737		if (i == sc->sc_nlctent ||
738		    (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
739			config_detach(ii->ii_dv, DETACH_FORCE);
740			continue;
741		}
742
743		/*
744		 * Tell initiators that existed before the re-configuration
745		 * to re-configure.
746		 */
747		if (ii->ii_reconfig == NULL)
748			continue;
749		if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
750			aprint_error_dev(&sc->sc_dv, "%s failed reconfigure (%d)\n",
751			    device_xname(ii->ii_dv), rv);
752	}
753
754	/* Re-adjust queue parameters and return. */
755	if (sc->sc_nii != 0)
756		iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
757		    / sc->sc_nii);
758
759	return (0);
760}
761
762/*
763 * Configure I2O devices into the system.
764 */
765static void
766iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
767{
768	struct iop_attach_args ia;
769	struct iop_initiator *ii;
770	const struct i2o_lct_entry *le;
771	device_t dv;
772	int i, j, nent;
773	u_int usertid;
774	int locs[IOPCF_NLOCS];
775
776	nent = sc->sc_nlctent;
777	for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
778		sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
779
780		/* Ignore the device if it's in use. */
781		usertid = le32toh(le->usertid) & 4095;
782		if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
783			continue;
784
785		ia.ia_class = le16toh(le->classid) & 4095;
786		ia.ia_tid = sc->sc_tidmap[i].it_tid;
787
788		/* Ignore uninteresting devices. */
789		for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
790			if (iop_class[j].ic_class == ia.ia_class)
791				break;
792		if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
793		    (iop_class[j].ic_flags & mask) != maskval)
794			continue;
795
796		/*
797		 * Try to configure the device only if it's not already
798		 * configured.
799 		 */
800 		LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
801 			if (ia.ia_tid == ii->ii_tid) {
802				sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
803				strcpy(sc->sc_tidmap[i].it_dvname,
804				    device_xname(ii->ii_dv));
805 				break;
806			}
807		}
808		if (ii != NULL)
809			continue;
810
811		locs[IOPCF_TID] = ia.ia_tid;
812
813		dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia,
814					 iop_print, config_stdsubmatch);
815		if (dv != NULL) {
816 			sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
817			strcpy(sc->sc_tidmap[i].it_dvname, device_xname(dv));
818		}
819	}
820}
821
822/*
823 * Adjust queue parameters for all child devices.
824 */
825static void
826iop_adjqparam(struct iop_softc *sc, int mpi)
827{
828	struct iop_initiator *ii;
829
830	LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
831		if (ii->ii_adjqparam != NULL)
832			(*ii->ii_adjqparam)(ii->ii_dv, mpi);
833}
834
835static void
836iop_devinfo(int class, char *devinfo, size_t l)
837{
838	int i;
839
840	for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
841		if (class == iop_class[i].ic_class)
842			break;
843
844	if (i == sizeof(iop_class) / sizeof(iop_class[0]))
845		snprintf(devinfo, l, "device (class 0x%x)", class);
846	else
847		strlcpy(devinfo, iop_class[i].ic_caption, l);
848}
849
850static int
851iop_print(void *aux, const char *pnp)
852{
853	struct iop_attach_args *ia;
854	char devinfo[256];
855
856	ia = aux;
857
858	if (pnp != NULL) {
859		iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
860		aprint_normal("%s at %s", devinfo, pnp);
861	}
862	aprint_normal(" tid %d", ia->ia_tid);
863	return (UNCONF);
864}
865
866/*
867 * Shut down all configured IOPs.
868 */
869static void
870iop_shutdown(void *junk)
871{
872	struct iop_softc *sc;
873	int i;
874
875	printf("shutting down iop devices...");
876
877	for (i = 0; i < iop_cd.cd_ndevs; i++) {
878		if ((sc = device_lookup_private(&iop_cd, i)) == NULL)
879			continue;
880		if ((sc->sc_flags & IOP_ONLINE) == 0)
881			continue;
882
883		iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
884		    0, 5000);
885
886		if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
887			/*
888			 * Some AMI firmware revisions will go to sleep and
889			 * never come back after this.
890			 */
891			iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
892			    IOP_ICTX, 0, 1000);
893		}
894	}
895
896	/* Wait.  Some boards could still be flushing, stupidly enough. */
897	delay(5000*1000);
898	printf(" done\n");
899}
900
901/*
902 * Retrieve IOP status.
903 */
904int
905iop_status_get(struct iop_softc *sc, int nosleep)
906{
907	struct i2o_exec_status_get mf;
908	struct i2o_status *st;
909	paddr_t pa;
910	int rv, i;
911
912	pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
913	st = (struct i2o_status *)sc->sc_scr;
914
915	mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
916	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
917	mf.reserved[0] = 0;
918	mf.reserved[1] = 0;
919	mf.reserved[2] = 0;
920	mf.reserved[3] = 0;
921	mf.addrlow = (u_int32_t)pa;
922	mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
923	mf.length = sizeof(sc->sc_status);
924
925	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
926	    BUS_DMASYNC_PREWRITE);
927	memset(st, 0, sizeof(*st));
928	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
929	    BUS_DMASYNC_POSTWRITE);
930
931	if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
932		return (rv);
933
934	for (i = 100; i != 0; i--) {
935		bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
936		    sizeof(*st), BUS_DMASYNC_POSTREAD);
937		if (st->syncbyte == 0xff)
938			break;
939		if (nosleep)
940			DELAY(100*1000);
941		else
942			kpause("iopstat", false, hz / 10, NULL);
943	}
944
945	if (st->syncbyte != 0xff) {
946		aprint_error_dev(&sc->sc_dv, "STATUS_GET timed out\n");
947		rv = EIO;
948	} else {
949		memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
950		rv = 0;
951	}
952
953	return (rv);
954}
955
956/*
957 * Initialize and populate the IOP's outbound FIFO.
958 */
959static int
960iop_ofifo_init(struct iop_softc *sc)
961{
962	bus_addr_t addr;
963	bus_dma_segment_t seg;
964	struct i2o_exec_outbound_init *mf;
965	int i, rseg, rv;
966	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
967
968	sw = (u_int32_t *)sc->sc_scr;
969
970	mf = (struct i2o_exec_outbound_init *)mb;
971	mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
972	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
973	mf->msgictx = IOP_ICTX;
974	mf->msgtctx = 0;
975	mf->pagesize = PAGE_SIZE;
976	mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
977
978	/*
979	 * The I2O spec says that there are two SGLs: one for the status
980	 * word, and one for a list of discarded MFAs.  It continues to say
981	 * that if you don't want to get the list of MFAs, an IGNORE SGL is
982	 * necessary; this isn't the case (and is in fact a bad thing).
983	 */
984	mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
985	    I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
986	mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
987	    (u_int32_t)sc->sc_scr_dmamap->dm_segs[0].ds_addr;
988	mb[0] += 2 << 16;
989
990	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
991	    BUS_DMASYNC_POSTWRITE);
992	*sw = 0;
993	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
994	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
995
996	if ((rv = iop_post(sc, mb)) != 0)
997		return (rv);
998
999	POLL(5000,
1000	    (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1001	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD),
1002	    *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1003
1004	if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1005		aprint_error_dev(&sc->sc_dv, "outbound FIFO init failed (%d)\n",
1006		    le32toh(*sw));
1007		return (EIO);
1008	}
1009
1010	/* Allocate DMA safe memory for the reply frames. */
1011	if (sc->sc_rep_phys == 0) {
1012		sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1013
1014		rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1015		    0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1016		if (rv != 0) {
1017			aprint_error_dev(&sc->sc_dv, "DMA alloc = %d\n",
1018			   rv);
1019			return (rv);
1020		}
1021
1022		rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1023		    &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1024		if (rv != 0) {
1025			aprint_error_dev(&sc->sc_dv, "DMA map = %d\n", rv);
1026			return (rv);
1027		}
1028
1029		rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1030		    sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1031		if (rv != 0) {
1032			aprint_error_dev(&sc->sc_dv, "DMA create = %d\n", rv);
1033			return (rv);
1034		}
1035
1036		rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1037		    sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1038		if (rv != 0) {
1039			aprint_error_dev(&sc->sc_dv, "DMA load = %d\n", rv);
1040			return (rv);
1041		}
1042
1043		sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1044
1045		/* Now safe to sync the reply map. */
1046		sc->sc_curib = 0;
1047	}
1048
1049	/* Populate the outbound FIFO. */
1050	for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1051		iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1052		addr += sc->sc_framesize;
1053	}
1054
1055	return (0);
1056}
1057
1058/*
1059 * Read the specified number of bytes from the IOP's hardware resource table.
1060 */
1061static int
1062iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1063{
1064	struct iop_msg *im;
1065	int rv;
1066	struct i2o_exec_hrt_get *mf;
1067	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1068
1069	im = iop_msg_alloc(sc, IM_WAIT);
1070	mf = (struct i2o_exec_hrt_get *)mb;
1071	mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1072	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1073	mf->msgictx = IOP_ICTX;
1074	mf->msgtctx = im->im_tctx;
1075
1076	iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1077	rv = iop_msg_post(sc, im, mb, 30000);
1078	iop_msg_unmap(sc, im);
1079	iop_msg_free(sc, im);
1080	return (rv);
1081}
1082
1083/*
1084 * Read the IOP's hardware resource table.
1085 */
1086static int
1087iop_hrt_get(struct iop_softc *sc)
1088{
1089	struct i2o_hrt hrthdr, *hrt;
1090	int size, rv;
1091
1092	rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1093	if (rv != 0)
1094		return (rv);
1095
1096	DPRINTF(("%s: %d hrt entries\n", device_xname(&sc->sc_dv),
1097	    le16toh(hrthdr.numentries)));
1098
1099	size = sizeof(struct i2o_hrt) +
1100	    (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1101	hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1102
1103	if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1104		free(hrt, M_DEVBUF);
1105		return (rv);
1106	}
1107
1108	if (sc->sc_hrt != NULL)
1109		free(sc->sc_hrt, M_DEVBUF);
1110	sc->sc_hrt = hrt;
1111	return (0);
1112}
1113
1114/*
1115 * Request the specified number of bytes from the IOP's logical
1116 * configuration table.  If a change indicator is specified, this
1117 * is a verbatim notification request, so the caller is prepared
1118 * to wait indefinitely.
1119 */
1120static int
1121iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1122	     u_int32_t chgind)
1123{
1124	struct iop_msg *im;
1125	struct i2o_exec_lct_notify *mf;
1126	int rv;
1127	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1128
1129	im = iop_msg_alloc(sc, IM_WAIT);
1130	memset(lct, 0, size);
1131
1132	mf = (struct i2o_exec_lct_notify *)mb;
1133	mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1134	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1135	mf->msgictx = IOP_ICTX;
1136	mf->msgtctx = im->im_tctx;
1137	mf->classid = I2O_CLASS_ANY;
1138	mf->changeindicator = chgind;
1139
1140#ifdef I2ODEBUG
1141	printf("iop_lct_get0: reading LCT");
1142	if (chgind != 0)
1143		printf(" (async)");
1144	printf("\n");
1145#endif
1146
1147	iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1148	rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1149	iop_msg_unmap(sc, im);
1150	iop_msg_free(sc, im);
1151	return (rv);
1152}
1153
1154/*
1155 * Read the IOP's logical configuration table.
1156 */
1157int
1158iop_lct_get(struct iop_softc *sc)
1159{
1160	int esize, size, rv;
1161	struct i2o_lct *lct;
1162
1163	esize = le32toh(sc->sc_status.expectedlctsize);
1164	lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1165	if (lct == NULL)
1166		return (ENOMEM);
1167
1168	if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1169		free(lct, M_DEVBUF);
1170		return (rv);
1171	}
1172
1173	size = le16toh(lct->tablesize) << 2;
1174	if (esize != size) {
1175		free(lct, M_DEVBUF);
1176		lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1177		if (lct == NULL)
1178			return (ENOMEM);
1179
1180		if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1181			free(lct, M_DEVBUF);
1182			return (rv);
1183		}
1184	}
1185
1186	/* Swap in the new LCT. */
1187	if (sc->sc_lct != NULL)
1188		free(sc->sc_lct, M_DEVBUF);
1189	sc->sc_lct = lct;
1190	sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1191	    sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1192	    sizeof(struct i2o_lct_entry);
1193	return (0);
1194}
1195
1196/*
1197 * Post a SYS_ENABLE message to the adapter.
1198 */
1199int
1200iop_sys_enable(struct iop_softc *sc)
1201{
1202	struct iop_msg *im;
1203	struct i2o_msg mf;
1204	int rv;
1205
1206	im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1207
1208	mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1209	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1210	mf.msgictx = IOP_ICTX;
1211	mf.msgtctx = im->im_tctx;
1212
1213	rv = iop_msg_post(sc, im, &mf, 30000);
1214	if (rv == 0) {
1215		if ((im->im_flags & IM_FAIL) != 0)
1216			rv = ENXIO;
1217		else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1218		    (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1219		    im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1220			rv = 0;
1221		else
1222			rv = EIO;
1223	}
1224
1225	iop_msg_free(sc, im);
1226	return (rv);
1227}
1228
1229/*
1230 * Request the specified parameter group from the target.  If an initiator
1231 * is specified (a) don't wait for the operation to complete, but instead
1232 * let the initiator's interrupt handler deal with the reply and (b) place a
1233 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1234 */
1235int
1236iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1237		  int size, struct iop_initiator *ii)
1238{
1239	struct iop_msg *im;
1240	struct i2o_util_params_op *mf;
1241	int rv;
1242	struct iop_pgop *pgop;
1243	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1244
1245	im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1246	if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1247		iop_msg_free(sc, im);
1248		return (ENOMEM);
1249	}
1250	im->im_dvcontext = pgop;
1251
1252	mf = (struct i2o_util_params_op *)mb;
1253	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1254	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1255	mf->msgictx = IOP_ICTX;
1256	mf->msgtctx = im->im_tctx;
1257	mf->flags = 0;
1258
1259	pgop->olh.count = htole16(1);
1260	pgop->olh.reserved = htole16(0);
1261	pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1262	pgop->oat.fieldcount = htole16(0xffff);
1263	pgop->oat.group = htole16(group);
1264
1265	memset(buf, 0, size);
1266	iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1267	iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1268	rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1269
1270	/* Detect errors; let partial transfers to count as success. */
1271	if (ii == NULL && rv == 0) {
1272		if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1273		    im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1274			rv = 0;
1275		else
1276			rv = (im->im_reqstatus != 0 ? EIO : 0);
1277
1278		if (rv != 0)
1279			printf("%s: FIELD_GET failed for tid %d group %d\n",
1280			    device_xname(&sc->sc_dv), tid, group);
1281	}
1282
1283	if (ii == NULL || rv != 0) {
1284		iop_msg_unmap(sc, im);
1285		iop_msg_free(sc, im);
1286		free(pgop, M_DEVBUF);
1287	}
1288
1289	return (rv);
1290}
1291
1292/*
1293 * Set a single field in a scalar parameter group.
1294 */
1295int
1296iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1297	      int size, int field)
1298{
1299	struct iop_msg *im;
1300	struct i2o_util_params_op *mf;
1301	struct iop_pgop *pgop;
1302	int rv, totsize;
1303	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1304
1305	totsize = sizeof(*pgop) + size;
1306
1307	im = iop_msg_alloc(sc, IM_WAIT);
1308	if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1309		iop_msg_free(sc, im);
1310		return (ENOMEM);
1311	}
1312
1313	mf = (struct i2o_util_params_op *)mb;
1314	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1315	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1316	mf->msgictx = IOP_ICTX;
1317	mf->msgtctx = im->im_tctx;
1318	mf->flags = 0;
1319
1320	pgop->olh.count = htole16(1);
1321	pgop->olh.reserved = htole16(0);
1322	pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1323	pgop->oat.fieldcount = htole16(1);
1324	pgop->oat.group = htole16(group);
1325	pgop->oat.fields[0] = htole16(field);
1326	memcpy(pgop + 1, buf, size);
1327
1328	iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1329	rv = iop_msg_post(sc, im, mb, 30000);
1330	if (rv != 0)
1331		aprint_error_dev(&sc->sc_dv, "FIELD_SET failed for tid %d group %d\n",
1332		    tid, group);
1333
1334	iop_msg_unmap(sc, im);
1335	iop_msg_free(sc, im);
1336	free(pgop, M_DEVBUF);
1337	return (rv);
1338}
1339
1340/*
1341 * Delete all rows in a tablular parameter group.
1342 */
1343int
1344iop_table_clear(struct iop_softc *sc, int tid, int group)
1345{
1346	struct iop_msg *im;
1347	struct i2o_util_params_op *mf;
1348	struct iop_pgop pgop;
1349	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1350	int rv;
1351
1352	im = iop_msg_alloc(sc, IM_WAIT);
1353
1354	mf = (struct i2o_util_params_op *)mb;
1355	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1356	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1357	mf->msgictx = IOP_ICTX;
1358	mf->msgtctx = im->im_tctx;
1359	mf->flags = 0;
1360
1361	pgop.olh.count = htole16(1);
1362	pgop.olh.reserved = htole16(0);
1363	pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1364	pgop.oat.fieldcount = htole16(0);
1365	pgop.oat.group = htole16(group);
1366	pgop.oat.fields[0] = htole16(0);
1367
1368	iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1369	rv = iop_msg_post(sc, im, mb, 30000);
1370	if (rv != 0)
1371		aprint_error_dev(&sc->sc_dv, "TABLE_CLEAR failed for tid %d group %d\n",
1372		    tid, group);
1373
1374	iop_msg_unmap(sc, im);
1375	iop_msg_free(sc, im);
1376	return (rv);
1377}
1378
1379/*
1380 * Add a single row to a tabular parameter group.  The row can have only one
1381 * field.
1382 */
1383int
1384iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1385		  int size, int row)
1386{
1387	struct iop_msg *im;
1388	struct i2o_util_params_op *mf;
1389	struct iop_pgop *pgop;
1390	int rv, totsize;
1391	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1392
1393	totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1394
1395	im = iop_msg_alloc(sc, IM_WAIT);
1396	if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1397		iop_msg_free(sc, im);
1398		return (ENOMEM);
1399	}
1400
1401	mf = (struct i2o_util_params_op *)mb;
1402	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1403	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1404	mf->msgictx = IOP_ICTX;
1405	mf->msgtctx = im->im_tctx;
1406	mf->flags = 0;
1407
1408	pgop->olh.count = htole16(1);
1409	pgop->olh.reserved = htole16(0);
1410	pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1411	pgop->oat.fieldcount = htole16(1);
1412	pgop->oat.group = htole16(group);
1413	pgop->oat.fields[0] = htole16(0);	/* FieldIdx */
1414	pgop->oat.fields[1] = htole16(1);	/* RowCount */
1415	pgop->oat.fields[2] = htole16(row);	/* KeyValue */
1416	memcpy(&pgop->oat.fields[3], buf, size);
1417
1418	iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1419	rv = iop_msg_post(sc, im, mb, 30000);
1420	if (rv != 0)
1421		aprint_error_dev(&sc->sc_dv, "ADD_ROW failed for tid %d group %d row %d\n",
1422		    tid, group, row);
1423
1424	iop_msg_unmap(sc, im);
1425	iop_msg_free(sc, im);
1426	free(pgop, M_DEVBUF);
1427	return (rv);
1428}
1429
1430/*
1431 * Execute a simple command (no parameters).
1432 */
1433int
1434iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1435	       int async, int timo)
1436{
1437	struct iop_msg *im;
1438	struct i2o_msg mf;
1439	int rv, fl;
1440
1441	fl = (async != 0 ? IM_WAIT : IM_POLL);
1442	im = iop_msg_alloc(sc, fl);
1443
1444	mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1445	mf.msgfunc = I2O_MSGFUNC(tid, function);
1446	mf.msgictx = ictx;
1447	mf.msgtctx = im->im_tctx;
1448
1449	rv = iop_msg_post(sc, im, &mf, timo);
1450	iop_msg_free(sc, im);
1451	return (rv);
1452}
1453
1454/*
1455 * Post the system table to the IOP.
1456 */
1457static int
1458iop_systab_set(struct iop_softc *sc)
1459{
1460	struct i2o_exec_sys_tab_set *mf;
1461	struct iop_msg *im;
1462	bus_space_handle_t bsh;
1463	bus_addr_t boo;
1464	u_int32_t mema[2], ioa[2];
1465	int rv;
1466	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1467
1468	im = iop_msg_alloc(sc, IM_WAIT);
1469
1470	mf = (struct i2o_exec_sys_tab_set *)mb;
1471	mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1472	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1473	mf->msgictx = IOP_ICTX;
1474	mf->msgtctx = im->im_tctx;
1475	mf->iopid = (device_unit(&sc->sc_dv) + 2) << 12;
1476	mf->segnumber = 0;
1477
1478	mema[1] = sc->sc_status.desiredprivmemsize;
1479	ioa[1] = sc->sc_status.desiredpriviosize;
1480
1481	if (mema[1] != 0) {
1482		rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1483		    le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1484		mema[0] = htole32(boo);
1485		if (rv != 0) {
1486			aprint_error_dev(&sc->sc_dv, "can't alloc priv mem space, err = %d\n", rv);
1487			mema[0] = 0;
1488			mema[1] = 0;
1489		}
1490	}
1491
1492	if (ioa[1] != 0) {
1493		rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1494		    le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1495		ioa[0] = htole32(boo);
1496		if (rv != 0) {
1497			aprint_error_dev(&sc->sc_dv, "can't alloc priv i/o space, err = %d\n", rv);
1498			ioa[0] = 0;
1499			ioa[1] = 0;
1500		}
1501	}
1502
1503	iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1504	iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1505	iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1506	rv = iop_msg_post(sc, im, mb, 5000);
1507	iop_msg_unmap(sc, im);
1508	iop_msg_free(sc, im);
1509	return (rv);
1510}
1511
1512/*
1513 * Reset the IOP.  Must be called with interrupts disabled.
1514 */
1515static int
1516iop_reset(struct iop_softc *sc)
1517{
1518	u_int32_t mfa, *sw;
1519	struct i2o_exec_iop_reset mf;
1520	int rv;
1521	paddr_t pa;
1522
1523	sw = (u_int32_t *)sc->sc_scr;
1524	pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
1525
1526	mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1527	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1528	mf.reserved[0] = 0;
1529	mf.reserved[1] = 0;
1530	mf.reserved[2] = 0;
1531	mf.reserved[3] = 0;
1532	mf.statuslow = (u_int32_t)pa;
1533	mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1534
1535	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1536	    BUS_DMASYNC_POSTWRITE);
1537	*sw = htole32(0);
1538	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1539	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1540
1541	if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1542		return (rv);
1543
1544	POLL(2500,
1545	    (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1546	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD), *sw != 0));
1547	if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1548		aprint_error_dev(&sc->sc_dv, "reset rejected, status 0x%x\n",
1549		    le32toh(*sw));
1550		return (EIO);
1551	}
1552
1553	/*
1554	 * IOP is now in the INIT state.  Wait no more than 10 seconds for
1555	 * the inbound queue to become responsive.
1556	 */
1557	POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1558	if (mfa == IOP_MFA_EMPTY) {
1559		aprint_error_dev(&sc->sc_dv, "reset failed\n");
1560		return (EIO);
1561	}
1562
1563	iop_release_mfa(sc, mfa);
1564	return (0);
1565}
1566
1567/*
1568 * Register a new initiator.  Must be called with the configuration lock
1569 * held.
1570 */
1571void
1572iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1573{
1574	static int ictxgen;
1575
1576	/* 0 is reserved (by us) for system messages. */
1577	ii->ii_ictx = ++ictxgen;
1578
1579	/*
1580	 * `Utility initiators' don't make it onto the per-IOP initiator list
1581	 * (which is used only for configuration), but do get one slot on
1582	 * the inbound queue.
1583	 */
1584	if ((ii->ii_flags & II_UTILITY) == 0) {
1585		LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1586		sc->sc_nii++;
1587	} else
1588		sc->sc_nuii++;
1589
1590	cv_init(&ii->ii_cv, "iopevt");
1591
1592	mutex_spin_enter(&sc->sc_intrlock);
1593	LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1594	mutex_spin_exit(&sc->sc_intrlock);
1595}
1596
1597/*
1598 * Unregister an initiator.  Must be called with the configuration lock
1599 * held.
1600 */
1601void
1602iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1603{
1604
1605	if ((ii->ii_flags & II_UTILITY) == 0) {
1606		LIST_REMOVE(ii, ii_list);
1607		sc->sc_nii--;
1608	} else
1609		sc->sc_nuii--;
1610
1611	mutex_spin_enter(&sc->sc_intrlock);
1612	LIST_REMOVE(ii, ii_hash);
1613	mutex_spin_exit(&sc->sc_intrlock);
1614
1615	cv_destroy(&ii->ii_cv);
1616}
1617
1618/*
1619 * Handle a reply frame from the IOP.
1620 */
1621static int
1622iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1623{
1624	struct iop_msg *im;
1625	struct i2o_reply *rb;
1626	struct i2o_fault_notify *fn;
1627	struct iop_initiator *ii;
1628	u_int off, ictx, tctx, status, size;
1629
1630	KASSERT(mutex_owned(&sc->sc_intrlock));
1631
1632	off = (int)(rmfa - sc->sc_rep_phys);
1633	rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
1634
1635	/* Perform reply queue DMA synchronisation. */
1636	bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1637	    sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1638
1639#ifdef I2ODEBUG
1640	if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1641		panic("iop_handle_reply: 64-bit reply");
1642#endif
1643	/*
1644	 * Find the initiator.
1645	 */
1646	ictx = le32toh(rb->msgictx);
1647	if (ictx == IOP_ICTX)
1648		ii = NULL;
1649	else {
1650		ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1651		for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1652			if (ii->ii_ictx == ictx)
1653				break;
1654		if (ii == NULL) {
1655#ifdef I2ODEBUG
1656			iop_reply_print(sc, rb);
1657#endif
1658			aprint_error_dev(&sc->sc_dv, "WARNING: bad ictx returned (%x)\n",
1659			    ictx);
1660			return (-1);
1661		}
1662	}
1663
1664	/*
1665	 * If we received a transport failure notice, we've got to dig the
1666	 * transaction context (if any) out of the original message frame,
1667	 * and then release the original MFA back to the inbound FIFO.
1668	 */
1669	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1670		status = I2O_STATUS_SUCCESS;
1671
1672		fn = (struct i2o_fault_notify *)rb;
1673		tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1674		iop_release_mfa(sc, fn->lowmfa);
1675		iop_tfn_print(sc, fn);
1676	} else {
1677		status = rb->reqstatus;
1678		tctx = le32toh(rb->msgtctx);
1679	}
1680
1681	if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1682		/*
1683		 * This initiator tracks state using message wrappers.
1684		 *
1685		 * Find the originating message wrapper, and if requested
1686		 * notify the initiator.
1687		 */
1688		im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1689		if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1690		    (im->im_flags & IM_ALLOCED) == 0 ||
1691		    tctx != im->im_tctx) {
1692			aprint_error_dev(&sc->sc_dv, "WARNING: bad tctx returned (0x%08x, %p)\n", tctx, im);
1693			if (im != NULL)
1694				aprint_error_dev(&sc->sc_dv, "flags=0x%08x tctx=0x%08x\n",
1695				    im->im_flags, im->im_tctx);
1696#ifdef I2ODEBUG
1697			if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1698				iop_reply_print(sc, rb);
1699#endif
1700			return (-1);
1701		}
1702
1703		if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1704			im->im_flags |= IM_FAIL;
1705
1706#ifdef I2ODEBUG
1707		if ((im->im_flags & IM_REPLIED) != 0)
1708			panic("%s: dup reply", device_xname(&sc->sc_dv));
1709#endif
1710		im->im_flags |= IM_REPLIED;
1711
1712#ifdef I2ODEBUG
1713		if (status != I2O_STATUS_SUCCESS)
1714			iop_reply_print(sc, rb);
1715#endif
1716		im->im_reqstatus = status;
1717		im->im_detstatus = le16toh(rb->detail);
1718
1719		/* Copy the reply frame, if requested. */
1720		if (im->im_rb != NULL) {
1721			size = (le32toh(rb->msgflags) >> 14) & ~3;
1722#ifdef I2ODEBUG
1723			if (size > sc->sc_framesize)
1724				panic("iop_handle_reply: reply too large");
1725#endif
1726			memcpy(im->im_rb, rb, size);
1727		}
1728
1729		/* Notify the initiator. */
1730		if ((im->im_flags & IM_WAIT) != 0)
1731			cv_broadcast(&im->im_cv);
1732		else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1733			if (ii != NULL) {
1734				mutex_spin_exit(&sc->sc_intrlock);
1735				(*ii->ii_intr)(ii->ii_dv, im, rb);
1736				mutex_spin_enter(&sc->sc_intrlock);
1737			}
1738		}
1739	} else {
1740		/*
1741		 * This initiator discards message wrappers.
1742		 *
1743		 * Simply pass the reply frame to the initiator.
1744		 */
1745		if (ii != NULL) {
1746			mutex_spin_exit(&sc->sc_intrlock);
1747			(*ii->ii_intr)(ii->ii_dv, NULL, rb);
1748			mutex_spin_enter(&sc->sc_intrlock);
1749		}
1750	}
1751
1752	return (status);
1753}
1754
1755/*
1756 * Handle an interrupt from the IOP.
1757 */
1758int
1759iop_intr(void *arg)
1760{
1761	struct iop_softc *sc;
1762	u_int32_t rmfa;
1763
1764	sc = arg;
1765
1766	mutex_spin_enter(&sc->sc_intrlock);
1767
1768	if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
1769		mutex_spin_exit(&sc->sc_intrlock);
1770		return (0);
1771	}
1772
1773	for (;;) {
1774		/* Double read to account for IOP bug. */
1775		if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1776			rmfa = iop_inl(sc, IOP_REG_OFIFO);
1777			if (rmfa == IOP_MFA_EMPTY)
1778				break;
1779		}
1780		iop_handle_reply(sc, rmfa);
1781		iop_outl(sc, IOP_REG_OFIFO, rmfa);
1782	}
1783
1784	mutex_spin_exit(&sc->sc_intrlock);
1785	return (1);
1786}
1787
1788/*
1789 * Handle an event signalled by the executive.
1790 */
1791static void
1792iop_intr_event(device_t dv, struct iop_msg *im, void *reply)
1793{
1794	struct i2o_util_event_register_reply *rb;
1795	u_int event;
1796
1797	rb = reply;
1798
1799	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1800		return;
1801
1802	event = le32toh(rb->event);
1803	printf("%s: event 0x%08x received\n", device_xname(dv), event);
1804}
1805
1806/*
1807 * Allocate a message wrapper.
1808 */
1809struct iop_msg *
1810iop_msg_alloc(struct iop_softc *sc, int flags)
1811{
1812	struct iop_msg *im;
1813	static u_int tctxgen;
1814	int i;
1815
1816#ifdef I2ODEBUG
1817	if ((flags & IM_SYSMASK) != 0)
1818		panic("iop_msg_alloc: system flags specified");
1819#endif
1820
1821	mutex_spin_enter(&sc->sc_intrlock);
1822	im = SLIST_FIRST(&sc->sc_im_freelist);
1823#if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1824	if (im == NULL)
1825		panic("iop_msg_alloc: no free wrappers");
1826#endif
1827	SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1828	mutex_spin_exit(&sc->sc_intrlock);
1829
1830	im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1831	tctxgen += (1 << IOP_TCTX_SHIFT);
1832	im->im_flags = flags | IM_ALLOCED;
1833	im->im_rb = NULL;
1834	i = 0;
1835	do {
1836		im->im_xfer[i++].ix_size = 0;
1837	} while (i < IOP_MAX_MSG_XFERS);
1838
1839	return (im);
1840}
1841
1842/*
1843 * Free a message wrapper.
1844 */
1845void
1846iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1847{
1848
1849#ifdef I2ODEBUG
1850	if ((im->im_flags & IM_ALLOCED) == 0)
1851		panic("iop_msg_free: wrapper not allocated");
1852#endif
1853
1854	im->im_flags = 0;
1855	mutex_spin_enter(&sc->sc_intrlock);
1856	SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1857	mutex_spin_exit(&sc->sc_intrlock);
1858}
1859
1860/*
1861 * Map a data transfer.  Write a scatter-gather list into the message frame.
1862 */
1863int
1864iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1865	    void *xferaddr, int xfersize, int out, struct proc *up)
1866{
1867	bus_dmamap_t dm;
1868	bus_dma_segment_t *ds;
1869	struct iop_xfer *ix;
1870	u_int rv, i, nsegs, flg, off, xn;
1871	u_int32_t *p;
1872
1873	for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1874		if (ix->ix_size == 0)
1875			break;
1876
1877#ifdef I2ODEBUG
1878	if (xfersize == 0)
1879		panic("iop_msg_map: null transfer");
1880	if (xfersize > IOP_MAX_XFER)
1881		panic("iop_msg_map: transfer too large");
1882	if (xn == IOP_MAX_MSG_XFERS)
1883		panic("iop_msg_map: too many xfers");
1884#endif
1885
1886	/*
1887	 * Only the first DMA map is static.
1888	 */
1889	if (xn != 0) {
1890		rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1891		    IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1892		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1893		if (rv != 0)
1894			return (rv);
1895	}
1896
1897	dm = ix->ix_map;
1898	rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1899	    (up == NULL ? BUS_DMA_NOWAIT : 0));
1900	if (rv != 0)
1901		goto bad;
1902
1903	/*
1904	 * How many SIMPLE SG elements can we fit in this message?
1905	 */
1906	off = mb[0] >> 16;
1907	p = mb + off;
1908	nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1909
1910	if (dm->dm_nsegs > nsegs) {
1911		bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1912		rv = EFBIG;
1913		DPRINTF(("iop_msg_map: too many segs\n"));
1914		goto bad;
1915	}
1916
1917	nsegs = dm->dm_nsegs;
1918	xfersize = 0;
1919
1920	/*
1921	 * Write out the SG list.
1922	 */
1923	if (out)
1924		flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1925	else
1926		flg = I2O_SGL_SIMPLE;
1927
1928	for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1929		p[0] = (u_int32_t)ds->ds_len | flg;
1930		p[1] = (u_int32_t)ds->ds_addr;
1931		xfersize += ds->ds_len;
1932	}
1933
1934	p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1935	p[1] = (u_int32_t)ds->ds_addr;
1936	xfersize += ds->ds_len;
1937
1938	/* Fix up the transfer record, and sync the map. */
1939	ix->ix_flags = (out ? IX_OUT : IX_IN);
1940	ix->ix_size = xfersize;
1941	bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1942	    out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1943
1944	/*
1945	 * If this is the first xfer we've mapped for this message, adjust
1946	 * the SGL offset field in the message header.
1947	 */
1948	if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1949		mb[0] += (mb[0] >> 12) & 0xf0;
1950		im->im_flags |= IM_SGLOFFADJ;
1951	}
1952	mb[0] += (nsegs << 17);
1953	return (0);
1954
1955 bad:
1956 	if (xn != 0)
1957		bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1958	return (rv);
1959}
1960
1961/*
1962 * Map a block I/O data transfer (different in that there's only one per
1963 * message maximum, and PAGE addressing may be used).  Write a scatter
1964 * gather list into the message frame.
1965 */
1966int
1967iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1968		void *xferaddr, int xfersize, int out)
1969{
1970	bus_dma_segment_t *ds;
1971	bus_dmamap_t dm;
1972	struct iop_xfer *ix;
1973	u_int rv, i, nsegs, off, slen, tlen, flg;
1974	paddr_t saddr, eaddr;
1975	u_int32_t *p;
1976
1977#ifdef I2ODEBUG
1978	if (xfersize == 0)
1979		panic("iop_msg_map_bio: null transfer");
1980	if (xfersize > IOP_MAX_XFER)
1981		panic("iop_msg_map_bio: transfer too large");
1982	if ((im->im_flags & IM_SGLOFFADJ) != 0)
1983		panic("iop_msg_map_bio: SGLOFFADJ");
1984#endif
1985
1986	ix = im->im_xfer;
1987	dm = ix->ix_map;
1988	rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1989	    BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1990	if (rv != 0)
1991		return (rv);
1992
1993	off = mb[0] >> 16;
1994	nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1995
1996	/*
1997	 * If the transfer is highly fragmented and won't fit using SIMPLE
1998	 * elements, use PAGE_LIST elements instead.  SIMPLE elements are
1999	 * potentially more efficient, both for us and the IOP.
2000	 */
2001	if (dm->dm_nsegs > nsegs) {
2002		nsegs = 1;
2003		p = mb + off + 1;
2004
2005		/* XXX This should be done with a bus_space flag. */
2006		for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2007			slen = ds->ds_len;
2008			saddr = ds->ds_addr;
2009
2010			while (slen > 0) {
2011				eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2012				tlen = min(eaddr - saddr, slen);
2013				slen -= tlen;
2014				*p++ = le32toh(saddr);
2015				saddr = eaddr;
2016				nsegs++;
2017			}
2018		}
2019
2020		mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2021		    I2O_SGL_END;
2022		if (out)
2023			mb[off] |= I2O_SGL_DATA_OUT;
2024	} else {
2025		p = mb + off;
2026		nsegs = dm->dm_nsegs;
2027
2028		if (out)
2029			flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2030		else
2031			flg = I2O_SGL_SIMPLE;
2032
2033		for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2034			p[0] = (u_int32_t)ds->ds_len | flg;
2035			p[1] = (u_int32_t)ds->ds_addr;
2036		}
2037
2038		p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2039		    I2O_SGL_END;
2040		p[1] = (u_int32_t)ds->ds_addr;
2041		nsegs <<= 1;
2042	}
2043
2044	/* Fix up the transfer record, and sync the map. */
2045	ix->ix_flags = (out ? IX_OUT : IX_IN);
2046	ix->ix_size = xfersize;
2047	bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2048	    out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2049
2050	/*
2051	 * Adjust the SGL offset and total message size fields.  We don't
2052	 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2053	 */
2054	mb[0] += ((off << 4) + (nsegs << 16));
2055	return (0);
2056}
2057
2058/*
2059 * Unmap all data transfers associated with a message wrapper.
2060 */
2061void
2062iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2063{
2064	struct iop_xfer *ix;
2065	int i;
2066
2067#ifdef I2ODEBUG
2068	if (im->im_xfer[0].ix_size == 0)
2069		panic("iop_msg_unmap: no transfers mapped");
2070#endif
2071
2072	for (ix = im->im_xfer, i = 0;;) {
2073		bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2074		    ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2075		    BUS_DMASYNC_POSTREAD);
2076		bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2077
2078		/* Only the first DMA map is static. */
2079		if (i != 0)
2080			bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2081		if ((++ix)->ix_size == 0)
2082			break;
2083		if (++i >= IOP_MAX_MSG_XFERS)
2084			break;
2085	}
2086}
2087
2088/*
2089 * Post a message frame to the IOP's inbound queue.
2090 */
2091int
2092iop_post(struct iop_softc *sc, u_int32_t *mb)
2093{
2094	u_int32_t mfa;
2095
2096#ifdef I2ODEBUG
2097	if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2098		panic("iop_post: frame too large");
2099#endif
2100
2101	mutex_spin_enter(&sc->sc_intrlock);
2102
2103	/* Allocate a slot with the IOP. */
2104	if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2105		if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2106			mutex_spin_exit(&sc->sc_intrlock);
2107			aprint_error_dev(&sc->sc_dv, "mfa not forthcoming\n");
2108			return (EAGAIN);
2109		}
2110
2111	/* Perform reply buffer DMA synchronisation. */
2112	if (sc->sc_rep_size != 0) {
2113		bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2114		    sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2115	}
2116
2117	/* Copy out the message frame. */
2118	bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2119	    mb[0] >> 16);
2120	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2121	    (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2122
2123	/* Post the MFA back to the IOP. */
2124	iop_outl(sc, IOP_REG_IFIFO, mfa);
2125
2126	mutex_spin_exit(&sc->sc_intrlock);
2127	return (0);
2128}
2129
2130/*
2131 * Post a message to the IOP and deal with completion.
2132 */
2133int
2134iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2135{
2136	u_int32_t *mb;
2137	int rv;
2138
2139	mb = xmb;
2140
2141	/* Terminate the scatter/gather list chain. */
2142	if ((im->im_flags & IM_SGLOFFADJ) != 0)
2143		mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2144
2145	if ((rv = iop_post(sc, mb)) != 0)
2146		return (rv);
2147
2148	if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2149		if ((im->im_flags & IM_POLL) != 0)
2150			iop_msg_poll(sc, im, timo);
2151		else
2152			iop_msg_wait(sc, im, timo);
2153
2154		mutex_spin_enter(&sc->sc_intrlock);
2155		if ((im->im_flags & IM_REPLIED) != 0) {
2156			if ((im->im_flags & IM_NOSTATUS) != 0)
2157				rv = 0;
2158			else if ((im->im_flags & IM_FAIL) != 0)
2159				rv = ENXIO;
2160			else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2161				rv = EIO;
2162			else
2163				rv = 0;
2164		} else
2165			rv = EBUSY;
2166		mutex_spin_exit(&sc->sc_intrlock);
2167	} else
2168		rv = 0;
2169
2170	return (rv);
2171}
2172
2173/*
2174 * Spin until the specified message is replied to.
2175 */
2176static void
2177iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2178{
2179	u_int32_t rmfa;
2180
2181	mutex_spin_enter(&sc->sc_intrlock);
2182
2183	for (timo *= 10; timo != 0; timo--) {
2184		if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2185			/* Double read to account for IOP bug. */
2186			rmfa = iop_inl(sc, IOP_REG_OFIFO);
2187			if (rmfa == IOP_MFA_EMPTY)
2188				rmfa = iop_inl(sc, IOP_REG_OFIFO);
2189			if (rmfa != IOP_MFA_EMPTY) {
2190				iop_handle_reply(sc, rmfa);
2191
2192				/*
2193				 * Return the reply frame to the IOP's
2194				 * outbound FIFO.
2195				 */
2196				iop_outl(sc, IOP_REG_OFIFO, rmfa);
2197			}
2198		}
2199		if ((im->im_flags & IM_REPLIED) != 0)
2200			break;
2201		mutex_spin_exit(&sc->sc_intrlock);
2202		DELAY(100);
2203		mutex_spin_enter(&sc->sc_intrlock);
2204	}
2205
2206	if (timo == 0) {
2207#ifdef I2ODEBUG
2208		printf("%s: poll - no reply\n", device_xname(&sc->sc_dv));
2209		if (iop_status_get(sc, 1) != 0)
2210			printf("iop_msg_poll: unable to retrieve status\n");
2211		else
2212			printf("iop_msg_poll: IOP state = %d\n",
2213			    (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2214#endif
2215	}
2216
2217	mutex_spin_exit(&sc->sc_intrlock);
2218}
2219
2220/*
2221 * Sleep until the specified message is replied to.
2222 */
2223static void
2224iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2225{
2226	int rv;
2227
2228	mutex_spin_enter(&sc->sc_intrlock);
2229	if ((im->im_flags & IM_REPLIED) != 0) {
2230		mutex_spin_exit(&sc->sc_intrlock);
2231		return;
2232	}
2233	rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
2234	mutex_spin_exit(&sc->sc_intrlock);
2235
2236#ifdef I2ODEBUG
2237	if (rv != 0) {
2238		printf("iop_msg_wait: tsleep() == %d\n", rv);
2239		if (iop_status_get(sc, 0) != 0)
2240			printf("iop_msg_wait: unable to retrieve status\n");
2241		else
2242			printf("iop_msg_wait: IOP state = %d\n",
2243			    (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2244	}
2245#endif
2246}
2247
2248/*
2249 * Release an unused message frame back to the IOP's inbound fifo.
2250 */
2251static void
2252iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2253{
2254
2255	/* Use the frame to issue a no-op. */
2256	iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2257	iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2258	iop_outl_msg(sc, mfa + 8, 0);
2259	iop_outl_msg(sc, mfa + 12, 0);
2260
2261	iop_outl(sc, IOP_REG_IFIFO, mfa);
2262}
2263
2264#ifdef I2ODEBUG
2265/*
2266 * Dump a reply frame header.
2267 */
2268static void
2269iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2270{
2271	u_int function, detail;
2272	const char *statusstr;
2273
2274	function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2275	detail = le16toh(rb->detail);
2276
2277	printf("%s: reply:\n", device_xname(&sc->sc_dv));
2278
2279	if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2280		statusstr = iop_status[rb->reqstatus];
2281	else
2282		statusstr = "undefined error code";
2283
2284	printf("%s:   function=0x%02x status=0x%02x (%s)\n",
2285	    device_xname(&sc->sc_dv), function, rb->reqstatus, statusstr);
2286	printf("%s:   detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2287	    device_xname(&sc->sc_dv), detail, le32toh(rb->msgictx),
2288	    le32toh(rb->msgtctx));
2289	printf("%s:   tidi=%d tidt=%d flags=0x%02x\n", device_xname(&sc->sc_dv),
2290	    (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2291	    (le32toh(rb->msgflags) >> 8) & 0xff);
2292}
2293#endif
2294
2295/*
2296 * Dump a transport failure reply.
2297 */
2298static void
2299iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2300{
2301
2302	printf("%s: WARNING: transport failure:\n", device_xname(&sc->sc_dv));
2303
2304	printf("%s:  ictx=0x%08x tctx=0x%08x\n", device_xname(&sc->sc_dv),
2305	    le32toh(fn->msgictx), le32toh(fn->msgtctx));
2306	printf("%s:  failurecode=0x%02x severity=0x%02x\n",
2307	    device_xname(&sc->sc_dv), fn->failurecode, fn->severity);
2308	printf("%s:  highestver=0x%02x lowestver=0x%02x\n",
2309	    device_xname(&sc->sc_dv), fn->highestver, fn->lowestver);
2310}
2311
2312/*
2313 * Translate an I2O ASCII field into a C string.
2314 */
2315void
2316iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2317{
2318	int hc, lc, i, nit;
2319
2320	dlen--;
2321	lc = 0;
2322	hc = 0;
2323	i = 0;
2324
2325	/*
2326	 * DPT use NUL as a space, whereas AMI use it as a terminator.  The
2327	 * spec has nothing to say about it.  Since AMI fields are usually
2328	 * filled with junk after the terminator, ...
2329	 */
2330	nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2331
2332	while (slen-- != 0 && dlen-- != 0) {
2333		if (nit && *src == '\0')
2334			break;
2335		else if (*src <= 0x20 || *src >= 0x7f) {
2336			if (hc)
2337				dst[i++] = ' ';
2338		} else {
2339			hc = 1;
2340			dst[i++] = *src;
2341			lc = i;
2342		}
2343		src++;
2344	}
2345
2346	dst[lc] = '\0';
2347}
2348
2349/*
2350 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2351 */
2352int
2353iop_print_ident(struct iop_softc *sc, int tid)
2354{
2355	struct {
2356		struct	i2o_param_op_results pr;
2357		struct	i2o_param_read_results prr;
2358		struct	i2o_param_device_identity di;
2359	} __packed p;
2360	char buf[32];
2361	int rv;
2362
2363	rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2364	    sizeof(p), NULL);
2365	if (rv != 0)
2366		return (rv);
2367
2368	iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2369	    sizeof(buf));
2370	printf(" <%s, ", buf);
2371	iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2372	    sizeof(buf));
2373	printf("%s, ", buf);
2374	iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2375	printf("%s>", buf);
2376
2377	return (0);
2378}
2379
2380/*
2381 * Claim or unclaim the specified TID.
2382 */
2383int
2384iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2385	       int flags)
2386{
2387	struct iop_msg *im;
2388	struct i2o_util_claim mf;
2389	int rv, func;
2390
2391	func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2392	im = iop_msg_alloc(sc, IM_WAIT);
2393
2394	/* We can use the same structure, as they're identical. */
2395	mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2396	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2397	mf.msgictx = ii->ii_ictx;
2398	mf.msgtctx = im->im_tctx;
2399	mf.flags = flags;
2400
2401	rv = iop_msg_post(sc, im, &mf, 5000);
2402	iop_msg_free(sc, im);
2403	return (rv);
2404}
2405
2406/*
2407 * Perform an abort.
2408 */
2409int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2410		   int tctxabort, int flags)
2411{
2412	struct iop_msg *im;
2413	struct i2o_util_abort mf;
2414	int rv;
2415
2416	im = iop_msg_alloc(sc, IM_WAIT);
2417
2418	mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2419	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2420	mf.msgictx = ii->ii_ictx;
2421	mf.msgtctx = im->im_tctx;
2422	mf.flags = (func << 24) | flags;
2423	mf.tctxabort = tctxabort;
2424
2425	rv = iop_msg_post(sc, im, &mf, 5000);
2426	iop_msg_free(sc, im);
2427	return (rv);
2428}
2429
2430/*
2431 * Enable or disable reception of events for the specified device.
2432 */
2433int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2434{
2435	struct i2o_util_event_register mf;
2436
2437	mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2438	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2439	mf.msgictx = ii->ii_ictx;
2440	mf.msgtctx = 0;
2441	mf.eventmask = mask;
2442
2443	/* This message is replied to only when events are signalled. */
2444	return (iop_post(sc, (u_int32_t *)&mf));
2445}
2446
2447int
2448iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2449{
2450	struct iop_softc *sc;
2451
2452	if ((sc = device_lookup_private(&iop_cd, minor(dev))) == NULL)
2453		return (ENXIO);
2454	if ((sc->sc_flags & IOP_ONLINE) == 0)
2455		return (ENXIO);
2456	if ((sc->sc_flags & IOP_OPEN) != 0)
2457		return (EBUSY);
2458	sc->sc_flags |= IOP_OPEN;
2459
2460	return (0);
2461}
2462
2463int
2464iopclose(dev_t dev, int flag, int mode,
2465    struct lwp *l)
2466{
2467	struct iop_softc *sc;
2468
2469	sc = device_lookup_private(&iop_cd, minor(dev));
2470	sc->sc_flags &= ~IOP_OPEN;
2471
2472	return (0);
2473}
2474
2475int
2476iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
2477{
2478	struct iop_softc *sc;
2479	struct iovec *iov;
2480	int rv, i;
2481
2482	sc = device_lookup_private(&iop_cd, minor(dev));
2483	rv = 0;
2484
2485	switch (cmd) {
2486	case IOPIOCPT:
2487		rv = kauth_authorize_device_passthru(l->l_cred, dev,
2488		    KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2489		if (rv)
2490			return (rv);
2491
2492		return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2493
2494	case IOPIOCGSTATUS:
2495		iov = (struct iovec *)data;
2496		i = sizeof(struct i2o_status);
2497		if (i > iov->iov_len)
2498			i = iov->iov_len;
2499		else
2500			iov->iov_len = i;
2501		if ((rv = iop_status_get(sc, 0)) == 0)
2502			rv = copyout(&sc->sc_status, iov->iov_base, i);
2503		return (rv);
2504
2505	case IOPIOCGLCT:
2506	case IOPIOCGTIDMAP:
2507	case IOPIOCRECONFIG:
2508		break;
2509
2510	default:
2511#if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2512		printf("%s: unknown ioctl %lx\n", device_xname(&sc->sc_dv), cmd);
2513#endif
2514		return (ENOTTY);
2515	}
2516
2517	mutex_enter(&sc->sc_conflock);
2518
2519	switch (cmd) {
2520	case IOPIOCGLCT:
2521		iov = (struct iovec *)data;
2522		i = le16toh(sc->sc_lct->tablesize) << 2;
2523		if (i > iov->iov_len)
2524			i = iov->iov_len;
2525		else
2526			iov->iov_len = i;
2527		rv = copyout(sc->sc_lct, iov->iov_base, i);
2528		break;
2529
2530	case IOPIOCRECONFIG:
2531		rv = iop_reconfigure(sc, 0);
2532		break;
2533
2534	case IOPIOCGTIDMAP:
2535		iov = (struct iovec *)data;
2536		i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2537		if (i > iov->iov_len)
2538			i = iov->iov_len;
2539		else
2540			iov->iov_len = i;
2541		rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2542		break;
2543	}
2544
2545	mutex_exit(&sc->sc_conflock);
2546	return (rv);
2547}
2548
2549static int
2550iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2551{
2552	struct iop_msg *im;
2553	struct i2o_msg *mf;
2554	struct ioppt_buf *ptb;
2555	int rv, i, mapped;
2556
2557	mf = NULL;
2558	im = NULL;
2559	mapped = 1;
2560
2561	if (pt->pt_msglen > sc->sc_framesize ||
2562	    pt->pt_msglen < sizeof(struct i2o_msg) ||
2563	    pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2564	    pt->pt_nbufs < 0 ||
2565#if 0
2566	    pt->pt_replylen < 0 ||
2567#endif
2568            pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2569		return (EINVAL);
2570
2571	for (i = 0; i < pt->pt_nbufs; i++)
2572		if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2573			rv = ENOMEM;
2574			goto bad;
2575		}
2576
2577	mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2578	if (mf == NULL)
2579		return (ENOMEM);
2580
2581	if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2582		goto bad;
2583
2584	im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2585	im->im_rb = (struct i2o_reply *)mf;
2586	mf->msgictx = IOP_ICTX;
2587	mf->msgtctx = im->im_tctx;
2588
2589	for (i = 0; i < pt->pt_nbufs; i++) {
2590		ptb = &pt->pt_bufs[i];
2591		rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2592		    ptb->ptb_datalen, ptb->ptb_out != 0, p);
2593		if (rv != 0)
2594			goto bad;
2595		mapped = 1;
2596	}
2597
2598	if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2599		goto bad;
2600
2601	i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2602	if (i > sc->sc_framesize)
2603		i = sc->sc_framesize;
2604	if (i > pt->pt_replylen)
2605		i = pt->pt_replylen;
2606	rv = copyout(im->im_rb, pt->pt_reply, i);
2607
2608 bad:
2609	if (mapped != 0)
2610		iop_msg_unmap(sc, im);
2611	if (im != NULL)
2612		iop_msg_free(sc, im);
2613	if (mf != NULL)
2614		free(mf, M_DEVBUF);
2615	return (rv);
2616}
2617