mly.c revision 67446
1144966Svkashyap/*-
2169400Sscottl * Copyright (c) 2000 Michael Smith
3144966Svkashyap * Copyright (c) 2000 BSDi
4144966Svkashyap * All rights reserved.
5144966Svkashyap *
6144966Svkashyap * Redistribution and use in source and binary forms, with or without
7144966Svkashyap * modification, are permitted provided that the following conditions
8144966Svkashyap * are met:
9144966Svkashyap * 1. Redistributions of source code must retain the above copyright
10144966Svkashyap *    notice, this list of conditions and the following disclaimer.
11144966Svkashyap * 2. Redistributions in binary form must reproduce the above copyright
12144966Svkashyap *    notice, this list of conditions and the following disclaimer in the
13144966Svkashyap *    documentation and/or other materials provided with the distribution.
14144966Svkashyap *
15144966Svkashyap * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16144966Svkashyap * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17144966Svkashyap * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18144966Svkashyap * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19144966Svkashyap * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20144966Svkashyap * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21144966Svkashyap * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22144966Svkashyap * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23144966Svkashyap * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24144966Svkashyap * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25144966Svkashyap * SUCH DAMAGE.
26144966Svkashyap *
27144966Svkashyap *	$FreeBSD: head/sys/dev/mly/mly.c 67446 2000-10-22 19:39:17Z phk $
28144966Svkashyap */
29144966Svkashyap
30144966Svkashyap#include <sys/param.h>
31144966Svkashyap#include <sys/systm.h>
32144966Svkashyap#include <sys/malloc.h>
33144966Svkashyap#include <sys/kernel.h>
34169400Sscottl#include <sys/bus.h>
35172496Sscottl#include <sys/conf.h>
36144966Svkashyap#include <sys/ctype.h>
37144966Svkashyap
38144966Svkashyap#include <machine/bus_memio.h>
39144966Svkashyap#include <machine/bus.h>
40144966Svkashyap#include <machine/resource.h>
41144966Svkashyap#include <sys/rman.h>
42144966Svkashyap
43144966Svkashyap#include <cam/scsi/scsi_all.h>
44144966Svkashyap
45144966Svkashyap/* XXX: This is not where we should get fldoff() from. */
46144966Svkashyap#include <struct.h>
47144966Svkashyap
48144966Svkashyap#include <dev/mly/mlyreg.h>
49144966Svkashyap#include <dev/mly/mlyvar.h>
50144966Svkashyap#define MLY_DEFINE_TABLES
51144966Svkashyap#include <dev/mly/mly_tables.h>
52144966Svkashyap
53144966Svkashyapstatic int	mly_get_controllerinfo(struct mly_softc *sc);
54144966Svkashyapstatic void	mly_scan_devices(struct mly_softc *sc);
55152213Svkashyapstatic void	mly_rescan_btl(struct mly_softc *sc, int bus, int target);
56152213Svkashyapstatic void	mly_complete_rescan(struct mly_command *mc);
57169400Sscottlstatic int	mly_get_eventstatus(struct mly_softc *sc);
58172496Sscottlstatic int	mly_enable_mmbox(struct mly_softc *sc);
59144966Svkashyapstatic int	mly_flush(struct mly_softc *sc);
60152213Svkashyapstatic int	mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data,
61152213Svkashyap			  size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length);
62152213Svkashyapstatic void	mly_fetch_event(struct mly_softc *sc);
63152213Svkashyapstatic void	mly_complete_event(struct mly_command *mc);
64152213Svkashyapstatic void	mly_process_event(struct mly_softc *sc, struct mly_event *me);
65152213Svkashyapstatic void	mly_periodic(void *data);
66152213Svkashyap
67152213Svkashyapstatic int	mly_immediate_command(struct mly_command *mc);
68169400Sscottlstatic int	mly_start(struct mly_command *mc);
69152213Svkashyapstatic void	mly_complete(void *context, int pending);
70152213Svkashyap
71144966Svkashyapstatic int	mly_get_slot(struct mly_command *mc);
72144966Svkashyapstatic void	mly_alloc_command_cluster_map(void *arg, bus_dma_segment_t *segs, int nseg, int error);
73144966Svkashyapstatic void	mly_alloc_command_cluster(struct mly_softc *sc);
74144966Svkashyapstatic void	mly_map_command(struct mly_command *mc);
75144966Svkashyapstatic void	mly_unmap_command(struct mly_command *mc);
76144966Svkashyap
77144966Svkashyapstatic int	mly_fwhandshake(struct mly_softc *sc);
78144966Svkashyap
79208969Sdelphijstatic void	mly_describe_controller(struct mly_softc *sc);
80144966Svkashyap#ifdef MLY_DEBUG
81144966Svkashyapstatic void	mly_printstate(struct mly_softc *sc);
82144966Svkashyapstatic void	mly_print_command(struct mly_command *mc);
83144966Svkashyapstatic void	mly_print_packet(struct mly_command *mc);
84144966Svkashyapstatic void	mly_panic(struct mly_softc *sc, char *reason);
85144966Svkashyap#endif
86144966Svkashyap
87144966Svkashyap/********************************************************************************
88144966Svkashyap ********************************************************************************
89144966Svkashyap                                                                 Device Interface
90152213Svkashyap ********************************************************************************
91144966Svkashyap ********************************************************************************/
92144966Svkashyap
93144966Svkashyap/********************************************************************************
94144966Svkashyap * Initialise the controller and softc
95144966Svkashyap */
96144966Svkashyapint
97144966Svkashyapmly_attach(struct mly_softc *sc)
98144966Svkashyap{
99144966Svkashyap    int		error;
100144966Svkashyap
101144966Svkashyap    debug_called(1);
102144966Svkashyap
103144966Svkashyap    /*
104144966Svkashyap     * Initialise per-controller queues.
105144966Svkashyap     */
106144966Svkashyap    TAILQ_INIT(&sc->mly_freecmds);
107144966Svkashyap    TAILQ_INIT(&sc->mly_ready);
108144966Svkashyap    TAILQ_INIT(&sc->mly_completed);
109144966Svkashyap    TAILQ_INIT(&sc->mly_clusters);
110144966Svkashyap
111144966Svkashyap#if __FreeBSD_version >= 500005
112144966Svkashyap    /*
113144966Svkashyap     * Initialise command-completion task.
114144966Svkashyap     */
115144966Svkashyap    TASK_INIT(&sc->mly_task_complete, 0, mly_complete, sc);
116144966Svkashyap#endif
117144966Svkashyap
118144966Svkashyap    /* disable interrupts before we start talking to the controller */
119144966Svkashyap    MLY_MASK_INTERRUPTS(sc);
120144966Svkashyap
121144966Svkashyap    /*
122144966Svkashyap     * Wait for the controller to come ready, handshake with the firmware if required.
123144966Svkashyap     * This is typically only necessary on platforms where the controller BIOS does not
124144966Svkashyap     * run.
125144966Svkashyap     */
126144966Svkashyap    if ((error = mly_fwhandshake(sc)))
127144966Svkashyap	return(error);
128144966Svkashyap
129144966Svkashyap    /*
130144966Svkashyap     * Initialise the slot allocator so that we can issue commands.
131144966Svkashyap     */
132144966Svkashyap    sc->mly_max_commands = MLY_SLOT_MAX;
133144966Svkashyap    sc->mly_last_slot = MLY_SLOT_START;
134144966Svkashyap
135144966Svkashyap    /*
136144966Svkashyap     * Obtain controller feature information
137144966Svkashyap     */
138144966Svkashyap    if ((error = mly_get_controllerinfo(sc)))
139144966Svkashyap	return(error);
140144966Svkashyap
141144966Svkashyap    /*
142144966Svkashyap     * Update the slot allocator limit based on the controller inquiry.
143144966Svkashyap     */
144144966Svkashyap    sc->mly_max_commands = imin(sc->mly_controllerinfo->maximum_parallel_commands, MLY_SLOT_MAX);
145144966Svkashyap
146208969Sdelphij    /*
147144966Svkashyap     * Get the current event counter for health purposes, populate the initial
148144966Svkashyap     * health status buffer.
149144966Svkashyap     */
150144966Svkashyap    if ((error = mly_get_eventstatus(sc)))
151144966Svkashyap	return(error);
152144966Svkashyap
153144966Svkashyap    /*
154144966Svkashyap     * Enable memory-mailbox mode
155144966Svkashyap     */
156144966Svkashyap    if ((error = mly_enable_mmbox(sc)))
157144966Svkashyap	return(error);
158144966Svkashyap
159144966Svkashyap    /*
160144966Svkashyap     * Attach to CAM.
161144966Svkashyap     */
162144966Svkashyap    if ((error = mly_cam_attach(sc)))
163144966Svkashyap	return(error);
164144966Svkashyap
165144966Svkashyap    /*
166144966Svkashyap     * Print a little information about the controller
167144966Svkashyap     */
168144966Svkashyap    mly_describe_controller(sc);
169144966Svkashyap
170144966Svkashyap    /*
171144966Svkashyap     * Mark all attached devices for rescan
172144966Svkashyap     */
173144966Svkashyap    mly_scan_devices(sc);
174144966Svkashyap
175144966Svkashyap    /*
176144966Svkashyap     * Instigate the first status poll immediately.  Rescan completions won't
177144966Svkashyap     * happen until interrupts are enabled, which should still be before
178144966Svkashyap     * the SCSI subsystem gets to us. (XXX assuming CAM and interrupt-driven
179144966Svkashyap     * discovery here...)
180144966Svkashyap     */
181144966Svkashyap    mly_periodic((void *)sc);
182144966Svkashyap
183144966Svkashyap    /* enable interrupts now */
184144966Svkashyap    MLY_UNMASK_INTERRUPTS(sc);
185144966Svkashyap
186144966Svkashyap    return(0);
187144966Svkashyap}
188144966Svkashyap
189144966Svkashyap/********************************************************************************
190144966Svkashyap * Bring the controller to a state where it can be safely left alone.
191144966Svkashyap */
192209860Sdelphijvoid
193144966Svkashyapmly_detach(struct mly_softc *sc)
194144966Svkashyap{
195144966Svkashyap
196144966Svkashyap    debug_called(1);
197144966Svkashyap
198144966Svkashyap    /* kill the periodic event */
199144966Svkashyap    untimeout(mly_periodic, sc, sc->mly_periodic);
200144966Svkashyap
201144966Svkashyap    sc->mly_state |= MLY_STATE_SUSPEND;
202144966Svkashyap
203144966Svkashyap    /* flush controller */
204144966Svkashyap    mly_printf(sc, "flushing cache...");
205144966Svkashyap    printf("%s\n", mly_flush(sc) ? "failed" : "done");
206144966Svkashyap
207144966Svkashyap    MLY_MASK_INTERRUPTS(sc);
208144966Svkashyap}
209209860Sdelphij
210144966Svkashyap/********************************************************************************
211144966Svkashyap ********************************************************************************
212144966Svkashyap                                                                 Command Wrappers
213144966Svkashyap ********************************************************************************
214144966Svkashyap ********************************************************************************/
215144966Svkashyap
216144966Svkashyap/********************************************************************************
217144966Svkashyap * Fill in the mly_controllerinfo and mly_controllerparam fields in the softc.
218144966Svkashyap */
219209860Sdelphijstatic int
220144966Svkashyapmly_get_controllerinfo(struct mly_softc *sc)
221144966Svkashyap{
222144966Svkashyap    struct mly_command_ioctl	mci;
223144966Svkashyap    u_int8_t			status;
224144966Svkashyap    int				error;
225144966Svkashyap
226144966Svkashyap    debug_called(1);
227144966Svkashyap
228144966Svkashyap    if (sc->mly_controllerinfo != NULL)
229144966Svkashyap	free(sc->mly_controllerinfo, M_DEVBUF);
230144966Svkashyap
231144966Svkashyap    /* build the getcontrollerinfo ioctl and send it */
232144966Svkashyap    bzero(&mci, sizeof(mci));
233144966Svkashyap    sc->mly_controllerinfo = NULL;
234144966Svkashyap    mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO;
235144966Svkashyap    if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerinfo, sizeof(*sc->mly_controllerinfo),
236144966Svkashyap			   &status, NULL, NULL)))
237144966Svkashyap	return(error);
238144966Svkashyap    if (status != 0)
239144966Svkashyap	return(EIO);
240144966Svkashyap
241144966Svkashyap    if (sc->mly_controllerparam != NULL)
242144966Svkashyap	free(sc->mly_controllerparam, M_DEVBUF);
243144966Svkashyap
244144966Svkashyap    /* build the getcontrollerparameter ioctl and send it */
245144966Svkashyap    bzero(&mci, sizeof(mci));
246144966Svkashyap    sc->mly_controllerparam = NULL;
247144966Svkashyap    mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER;
248144966Svkashyap    if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerparam, sizeof(*sc->mly_controllerparam),
249144966Svkashyap			   &status, NULL, NULL)))
250144966Svkashyap	return(error);
251144966Svkashyap    if (status != 0)
252144966Svkashyap	return(EIO);
253144966Svkashyap
254144966Svkashyap    return(0);
255144966Svkashyap}
256144966Svkashyap
257144966Svkashyap/********************************************************************************
258144966Svkashyap * Schedule all possible devices for a rescan.
259144966Svkashyap *
260144966Svkashyap */
261144966Svkashyapstatic void
262144966Svkashyapmly_scan_devices(struct mly_softc *sc)
263144966Svkashyap{
264144966Svkashyap    int		bus, target, nchn;
265144966Svkashyap
266144966Svkashyap    debug_called(1);
267144966Svkashyap
268144966Svkashyap    /*
269144966Svkashyap     * Clear any previous BTL information.
270144966Svkashyap     */
271144966Svkashyap    bzero(&sc->mly_btl, sizeof(sc->mly_btl));
272144966Svkashyap
273144966Svkashyap    /*
274144966Svkashyap     * Mark all devices as requiring a rescan, and let the early periodic scan collect them.
275144966Svkashyap     */
276144966Svkashyap    nchn = sc->mly_controllerinfo->physical_channels_present +
277144966Svkashyap	sc->mly_controllerinfo->virtual_channels_present;
278144966Svkashyap    for (bus = 0; bus < nchn; bus++)
279144966Svkashyap	for (target = 0; target < MLY_MAX_TARGETS; target++)
280144966Svkashyap	    sc->mly_btl[bus][target].mb_flags = MLY_BTL_RESCAN;
281144966Svkashyap
282144966Svkashyap}
283144966Svkashyap
284144966Svkashyap/********************************************************************************
285144966Svkashyap * Rescan a device, possibly as a consequence of getting an event which suggests
286144966Svkashyap * that it may have changed.
287144966Svkashyap */
288144966Svkashyapstatic void
289144966Svkashyapmly_rescan_btl(struct mly_softc *sc, int bus, int target)
290144966Svkashyap{
291144966Svkashyap    struct mly_command		*mc;
292144966Svkashyap    struct mly_command_ioctl	*mci;
293144966Svkashyap
294144966Svkashyap    debug_called(2);
295144966Svkashyap
296144966Svkashyap    /* get a command */
297144966Svkashyap    mc = NULL;
298144966Svkashyap    if (mly_alloc_command(sc, &mc))
299144966Svkashyap	return;				/* we'll be retried soon */
300144966Svkashyap
301144966Svkashyap    /* set up the data buffer */
302144966Svkashyap    if ((mc->mc_data = malloc(sizeof(union mly_devinfo), M_DEVBUF, M_NOWAIT)) == NULL) {
303144966Svkashyap	mly_release_command(mc);
304144966Svkashyap	return;				/* we'll get retried the next time a command completes */
305144966Svkashyap    }
306144966Svkashyap    bzero(mc->mc_data, sizeof(union mly_devinfo));
307144966Svkashyap    mc->mc_flags |= MLY_CMD_DATAIN;
308144966Svkashyap    mc->mc_complete = mly_complete_rescan;
309144966Svkashyap
310144966Svkashyap    sc->mly_btl[bus][target].mb_flags &= ~MLY_BTL_RESCAN;
311144966Svkashyap
312144966Svkashyap    /*
313144966Svkashyap     * Build the ioctl.
314144966Svkashyap     *
315144966Svkashyap     * At this point we are committed to sending this request, as it
316144966Svkashyap     * will be the only one constructed for this particular update.
317144966Svkashyap     */
318144966Svkashyap    mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
319144966Svkashyap    mci->opcode = MDACMD_IOCTL;
320144966Svkashyap    mci->addr.phys.controller = 0;
321144966Svkashyap    mci->timeout.value = 30;
322144966Svkashyap    mci->timeout.scale = MLY_TIMEOUT_SECONDS;
323144966Svkashyap    if (bus >= sc->mly_controllerinfo->physical_channels_present) {
324144966Svkashyap	mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getlogdevinfovalid);
325144966Svkashyap	mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID;
326144966Svkashyap	mci->addr.log.logdev = ((bus - sc->mly_controllerinfo->physical_channels_present) * MLY_MAX_TARGETS)
327144966Svkashyap	    + target;
328144966Svkashyap	debug(2, "logical device %d", mci->addr.log.logdev);
329144966Svkashyap    } else {
330144966Svkashyap	mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getphysdevinfovalid);
331144966Svkashyap	mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID;
332144966Svkashyap	mci->addr.phys.lun = 0;
333144966Svkashyap	mci->addr.phys.target = target;
334152213Svkashyap	mci->addr.phys.channel = bus;
335152213Svkashyap	debug(2, "physical device %d:%d", mci->addr.phys.channel, mci->addr.phys.target);
336152213Svkashyap    }
337152213Svkashyap
338152213Svkashyap    /*
339152213Svkashyap     * Use the ready queue to get this command dispatched.
340144966Svkashyap     */
341144966Svkashyap    mly_enqueue_ready(mc);
342144966Svkashyap    mly_startio(sc);
343144966Svkashyap}
344144966Svkashyap
345144966Svkashyap/********************************************************************************
346144966Svkashyap * Handle the completion of a rescan operation
347144966Svkashyap */
348144966Svkashyapstatic void
349144966Svkashyapmly_complete_rescan(struct mly_command *mc)
350144966Svkashyap{
351144966Svkashyap    struct mly_softc				*sc = mc->mc_sc;
352212008Sdelphij    struct mly_ioctl_getlogdevinfovalid		*ldi;
353212008Sdelphij    struct mly_ioctl_getphysdevinfovalid	*pdi;
354212008Sdelphij    int						bus, target;
355152213Svkashyap
356152213Svkashyap    debug_called(2);
357212008Sdelphij
358212008Sdelphij    /* iff the command completed OK, we should use the result to update our data */
359212008Sdelphij    if (mc->mc_status == 0) {
360212008Sdelphij	if (mc->mc_length == sizeof(*ldi)) {
361152213Svkashyap	    ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data;
362212008Sdelphij	    bus = MLY_LOGDEV_BUS(sc, ldi->logical_device_number);
363144966Svkashyap	    target = MLY_LOGDEV_TARGET(ldi->logical_device_number);
364144966Svkashyap	    sc->mly_btl[bus][target].mb_flags = MLY_BTL_LOGICAL;	/* clears all other flags */
365144966Svkashyap	    sc->mly_btl[bus][target].mb_type = ldi->raid_level;
366144966Svkashyap	    sc->mly_btl[bus][target].mb_state = ldi->state;
367144966Svkashyap	    debug(2, "BTL rescan for %d returns %s, %s", ldi->logical_device_number,
368144966Svkashyap		  mly_describe_code(mly_table_device_type, ldi->raid_level),
369144966Svkashyap		  mly_describe_code(mly_table_device_state, ldi->state));
370144966Svkashyap	} else if (mc->mc_length == sizeof(*pdi)) {
371144966Svkashyap	    pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data;
372144966Svkashyap	    bus = pdi->channel;
373144966Svkashyap	    target = pdi->target;
374144966Svkashyap	    sc->mly_btl[bus][target].mb_flags = MLY_BTL_PHYSICAL;	/* clears all other flags */
375144966Svkashyap	    sc->mly_btl[bus][target].mb_type = MLY_DEVICE_TYPE_PHYSICAL;
376144966Svkashyap	    sc->mly_btl[bus][target].mb_state = pdi->state;
377144966Svkashyap	    if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED)
378144966Svkashyap		sc->mly_btl[bus][target].mb_flags |= MLY_BTL_PROTECTED;
379144966Svkashyap	    debug(2, "BTL rescan for %d:%d returns %s", bus, target,
380144966Svkashyap		  mly_describe_code(mly_table_device_state, pdi->state));
381144966Svkashyap	} else {
382144966Svkashyap	    mly_printf(sc, "BTL rescan result corrupted\n");
383144966Svkashyap	}
384144966Svkashyap    } else {
385144966Svkashyap	/*
386144966Svkashyap	 * A request sent for a device beyond the last device present will fail.
387144966Svkashyap	 * We don't care about this, so we do nothing about it.
388144966Svkashyap	 */
389144966Svkashyap    }
390144966Svkashyap    free(mc->mc_data, M_DEVBUF);
391144966Svkashyap    mly_release_command(mc);
392144966Svkashyap}
393144966Svkashyap
394144966Svkashyap/********************************************************************************
395144966Svkashyap * Get the current health status and set the 'next event' counter to suit.
396144966Svkashyap */
397144966Svkashyapstatic int
398144966Svkashyapmly_get_eventstatus(struct mly_softc *sc)
399144966Svkashyap{
400144966Svkashyap    struct mly_command_ioctl	mci;
401144966Svkashyap    struct mly_health_status	*mh;
402144966Svkashyap    u_int8_t			status;
403144966Svkashyap    int				error;
404144966Svkashyap
405144966Svkashyap    /* build the gethealthstatus ioctl and send it */
406144966Svkashyap    bzero(&mci, sizeof(mci));
407144966Svkashyap    mh = NULL;
408144966Svkashyap    mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS;
409144966Svkashyap
410144966Svkashyap    if ((error = mly_ioctl(sc, &mci, (void **)&mh, sizeof(*mh), &status, NULL, NULL)))
411144966Svkashyap	return(error);
412144966Svkashyap    if (status != 0)
413144966Svkashyap	return(EIO);
414144966Svkashyap
415144966Svkashyap    /* get the event counter */
416144966Svkashyap    sc->mly_event_change = mh->change_counter;
417144966Svkashyap    sc->mly_event_waiting = mh->next_event;
418144966Svkashyap    sc->mly_event_counter = mh->next_event;
419144966Svkashyap
420144966Svkashyap    /* save the health status into the memory mailbox */
421144966Svkashyap    bcopy(mh, &sc->mly_mmbox->mmm_health.status, sizeof(*mh));
422144966Svkashyap
423144966Svkashyap    debug(1, "initial change counter %d, event counter %d", mh->change_counter, mh->next_event);
424144966Svkashyap
425144966Svkashyap    free(mh, M_DEVBUF);
426144966Svkashyap    return(0);
427144966Svkashyap}
428144966Svkashyap
429144966Svkashyap/********************************************************************************
430144966Svkashyap * Enable the memory mailbox mode.
431144966Svkashyap */
432144966Svkashyapstatic int
433144966Svkashyapmly_enable_mmbox(struct mly_softc *sc)
434144966Svkashyap{
435144966Svkashyap    struct mly_command_ioctl	mci;
436144966Svkashyap    u_int8_t			*sp, status;
437144966Svkashyap    int				error;
438144966Svkashyap
439144966Svkashyap    debug_called(1);
440144966Svkashyap
441144966Svkashyap    /* build the ioctl and send it */
442144966Svkashyap    bzero(&mci, sizeof(mci));
443144966Svkashyap    mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX;
444144966Svkashyap    /* set buffer addresses */
445144966Svkashyap    mci.param.setmemorymailbox.command_mailbox_physaddr = sc->mly_mmbox_busaddr + fldoff(mly_mmbox, mmm_command);
446144966Svkashyap    mci.param.setmemorymailbox.status_mailbox_physaddr = sc->mly_mmbox_busaddr + fldoff(mly_mmbox, mmm_status);
447144966Svkashyap    mci.param.setmemorymailbox.health_buffer_physaddr = sc->mly_mmbox_busaddr + fldoff(mly_mmbox, mmm_health);
448144966Svkashyap
449144966Svkashyap    /* set buffer sizes - abuse of data_size field is revolting */
450144966Svkashyap    sp = (u_int8_t *)&mci.data_size;
451144966Svkashyap    sp[0] = ((sizeof(union mly_command_packet) * MLY_MMBOX_COMMANDS) / 1024);
452144966Svkashyap    sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) / 1024;
453144966Svkashyap    mci.param.setmemorymailbox.health_buffer_size = sizeof(union mly_health_region) / 1024;
454144966Svkashyap
455144966Svkashyap    debug(1, "memory mailbox at %p (0x%llx/%d 0x%llx/%d 0x%llx/%d", sc->mly_mmbox,
456144966Svkashyap	  mci.param.setmemorymailbox.command_mailbox_physaddr, sp[0],
457144966Svkashyap	  mci.param.setmemorymailbox.status_mailbox_physaddr, sp[1],
458144966Svkashyap	  mci.param.setmemorymailbox.health_buffer_physaddr, mci.param.setmemorymailbox.health_buffer_size);
459144966Svkashyap
460144966Svkashyap    if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL)))
461144966Svkashyap	return(error);
462144966Svkashyap    if (status != 0)
463144966Svkashyap	return(EIO);
464144966Svkashyap    sc->mly_state |= MLY_STATE_MMBOX_ACTIVE;
465144966Svkashyap    debug(1, "memory mailbox active");
466144966Svkashyap    return(0);
467144966Svkashyap}
468144966Svkashyap
469144966Svkashyap/********************************************************************************
470144966Svkashyap * Flush all pending I/O from the controller.
471144966Svkashyap */
472144966Svkashyapstatic int
473144966Svkashyapmly_flush(struct mly_softc *sc)
474144966Svkashyap{
475144966Svkashyap    struct mly_command_ioctl	mci;
476144966Svkashyap    u_int8_t			status;
477144966Svkashyap    int				error;
478144966Svkashyap
479144966Svkashyap    debug_called(1);
480144966Svkashyap
481144966Svkashyap    /* build the ioctl */
482144966Svkashyap    bzero(&mci, sizeof(mci));
483144966Svkashyap    mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA;
484144966Svkashyap    mci.param.deviceoperation.operation_device = MLY_OPDEVICE_PHYSICAL_CONTROLLER;
485152213Svkashyap
486152213Svkashyap    /* pass it off to the controller */
487152213Svkashyap    if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL)))
488152213Svkashyap	return(error);
489152213Svkashyap
490152213Svkashyap    return((status == 0) ? 0 : EIO);
491144966Svkashyap}
492144966Svkashyap
493144966Svkashyap/********************************************************************************
494144966Svkashyap * Perform an ioctl command.
495144966Svkashyap *
496144966Svkashyap * If (data) is not NULL, the command requires data transfer.  If (*data) is NULL
497144966Svkashyap * the command requires data transfer from the controller, and we will allocate
498144966Svkashyap * a buffer for it.  If (*data) is not NULL, the command requires data transfer
499144966Svkashyap * to the controller.
500144966Svkashyap *
501144966Svkashyap * XXX passing in the whole ioctl structure is ugly.  Better ideas?
502144966Svkashyap *
503144966Svkashyap * XXX we don't even try to handle the case where datasize > 4k.  We should.
504144966Svkashyap */
505144966Svkashyapstatic int
506144966Svkashyapmly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, size_t datasize,
507144966Svkashyap	  u_int8_t *status, void *sense_buffer, size_t *sense_length)
508144966Svkashyap{
509144966Svkashyap    struct mly_command		*mc;
510144966Svkashyap    struct mly_command_ioctl	*mci;
511144966Svkashyap    int				error;
512144966Svkashyap
513144966Svkashyap    debug_called(1);
514144966Svkashyap
515144966Svkashyap    mc = NULL;
516144966Svkashyap    if (mly_alloc_command(sc, &mc)) {
517144966Svkashyap	error = ENOMEM;
518144966Svkashyap	goto out;
519144966Svkashyap    }
520144966Svkashyap
521144966Svkashyap    /* copy the ioctl structure, but save some important fields and then fixup */
522144966Svkashyap    mci = &mc->mc_packet->ioctl;
523144966Svkashyap    ioctl->sense_buffer_address = mci->sense_buffer_address;
524144966Svkashyap    ioctl->maximum_sense_size = mci->maximum_sense_size;
525144966Svkashyap    *mci = *ioctl;
526144966Svkashyap    mci->opcode = MDACMD_IOCTL;
527144966Svkashyap    mci->timeout.value = 30;
528144966Svkashyap    mci->timeout.scale = MLY_TIMEOUT_SECONDS;
529144966Svkashyap
530144966Svkashyap    /* handle the data buffer */
531144966Svkashyap    if (data != NULL) {
532144966Svkashyap	if (*data == NULL) {
533144966Svkashyap	    /* allocate data buffer */
534144966Svkashyap	    if ((mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT)) == NULL) {
535144966Svkashyap		error = ENOMEM;
536144966Svkashyap		goto out;
537144966Svkashyap	    }
538144966Svkashyap	    mc->mc_flags |= MLY_CMD_DATAIN;
539144966Svkashyap	} else {
540152213Svkashyap	    mc->mc_data = *data;
541144966Svkashyap	    mc->mc_flags |= MLY_CMD_DATAOUT;
542144966Svkashyap	}
543144966Svkashyap	mc->mc_length = datasize;
544144966Svkashyap	mc->mc_packet->generic.data_size = datasize;
545144966Svkashyap    }
546152213Svkashyap
547152213Svkashyap    /* run the command */
548152213Svkashyap    if ((error = mly_immediate_command(mc)))
549152213Svkashyap	goto out;
550152213Svkashyap
551144966Svkashyap    /* clean up and return any data */
552144966Svkashyap    *status = mc->mc_status;
553152213Svkashyap    if ((mc->mc_sense > 0) && (sense_buffer != NULL)) {
554152213Svkashyap	bcopy(mc->mc_packet, sense_buffer, mc->mc_sense);
555152213Svkashyap	*sense_length = mc->mc_sense;
556144966Svkashyap	goto out;
557144966Svkashyap    }
558144966Svkashyap
559212008Sdelphij    /* should we return a data pointer? */
560212008Sdelphij    if ((data != NULL) && (*data == NULL))
561212008Sdelphij	*data = mc->mc_data;
562212008Sdelphij
563144966Svkashyap    /* command completed OK */
564144966Svkashyap    error = 0;
565144966Svkashyap
566144966Svkashyapout:
567144966Svkashyap    if (mc != NULL) {
568144966Svkashyap	/* do we need to free a data buffer we allocated? */
569197409Srdivacky	if (error && (mc->mc_data != NULL) && (*data == NULL))
570144966Svkashyap	    free(mc->mc_data, M_DEVBUF);
571144966Svkashyap	mly_release_command(mc);
572144966Svkashyap    }
573144966Svkashyap    return(error);
574144966Svkashyap}
575144966Svkashyap
576144966Svkashyap/********************************************************************************
577144966Svkashyap * Fetch one event from the controller.
578144966Svkashyap */
579144966Svkashyapstatic void
580144966Svkashyapmly_fetch_event(struct mly_softc *sc)
581144966Svkashyap{
582144966Svkashyap    struct mly_command		*mc;
583144966Svkashyap    struct mly_command_ioctl	*mci;
584144966Svkashyap    int				s;
585144966Svkashyap    u_int32_t			event;
586144966Svkashyap
587144966Svkashyap    debug_called(2);
588144966Svkashyap
589144966Svkashyap    /* get a command */
590144966Svkashyap    mc = NULL;
591144966Svkashyap    if (mly_alloc_command(sc, &mc))
592144966Svkashyap	return;				/* we'll get retried the next time a command completes */
593144966Svkashyap
594144966Svkashyap    /* set up the data buffer */
595144966Svkashyap    if ((mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF, M_NOWAIT)) == NULL) {
596144966Svkashyap	mly_release_command(mc);
597144966Svkashyap	return;				/* we'll get retried the next time a command completes */
598144966Svkashyap    }
599144966Svkashyap    bzero(mc->mc_data, sizeof(struct mly_event));
600144966Svkashyap    mc->mc_length = sizeof(struct mly_event);
601152213Svkashyap    mc->mc_flags |= MLY_CMD_DATAIN;
602152213Svkashyap    mc->mc_complete = mly_complete_event;
603144966Svkashyap
604    /*
605     * Get an event number to fetch.  It's possible that we've raced with another
606     * context for the last event, in which case there will be no more events.
607     */
608    s = splcam();
609    if (sc->mly_event_counter == sc->mly_event_waiting) {
610	mly_release_command(mc);
611	splx(s);
612	return;
613    }
614    event = sc->mly_event_counter++;
615    splx(s);
616
617    /*
618     * Build the ioctl.
619     *
620     * At this point we are committed to sending this request, as it
621     * will be the only one constructed for this particular event number.
622     */
623    mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
624    mci->opcode = MDACMD_IOCTL;
625    mci->data_size = sizeof(struct mly_event);
626    mci->addr.phys.lun = (event >> 16) & 0xff;
627    mci->addr.phys.target = (event >> 24) & 0xff;
628    mci->addr.phys.channel = 0;
629    mci->addr.phys.controller = 0;
630    mci->timeout.value = 30;
631    mci->timeout.scale = MLY_TIMEOUT_SECONDS;
632    mci->sub_ioctl = MDACIOCTL_GETEVENT;
633    mci->param.getevent.sequence_number_low = event & 0xffff;
634
635    debug(2, "fetch event %u", event);
636
637    /*
638     * Use the ready queue to get this command dispatched.
639     */
640    mly_enqueue_ready(mc);
641    mly_startio(sc);
642}
643
644/********************************************************************************
645 * Handle the completion of an event poll.
646 *
647 * Note that we don't actually have to instigate another poll; the completion of
648 * this command will trigger that if there are any more events to poll for.
649 */
650static void
651mly_complete_event(struct mly_command *mc)
652{
653    struct mly_softc	*sc = mc->mc_sc;
654    struct mly_event	*me = (struct mly_event *)mc->mc_data;
655
656    debug_called(2);
657
658    /*
659     * If the event was successfully fetched, process it.
660     */
661    if (mc->mc_status == SCSI_STATUS_OK) {
662	mly_process_event(sc, me);
663	free(me, M_DEVBUF);
664    }
665    mly_release_command(mc);
666}
667
668/********************************************************************************
669 * Process a controller event.
670 */
671static void
672mly_process_event(struct mly_softc *sc, struct mly_event *me)
673{
674    struct scsi_sense_data	*ssd = (struct scsi_sense_data *)&me->sense[0];
675    char			*fp, *tp;
676    int				bus, target, event, class, action;
677
678    /*
679     * Errors can be reported using vendor-unique sense data.  In this case, the
680     * event code will be 0x1c (Request sense data present), the sense key will
681     * be 0x09 (vendor specific), the MSB of the ASC will be set, and the
682     * actual event code will be a 16-bit value comprised of the ASCQ (low byte)
683     * and low seven bits of the ASC (low seven bits of the high byte).
684     */
685    if ((me->code == 0x1c) &&
686	((ssd->flags & SSD_KEY) == SSD_KEY_Vendor_Specific) &&
687	(ssd->add_sense_code & 0x80)) {
688	event = ((int)(ssd->add_sense_code & ~0x80) << 8) + ssd->add_sense_code_qual;
689    } else {
690	event = me->code;
691    }
692
693    /* look up event, get codes */
694    fp = mly_describe_code(mly_table_event, event);
695
696    debug(2, "Event %d  code 0x%x", me->sequence_number, me->code);
697
698    /* quiet event? */
699    class = fp[0];
700    if (isupper(class) && bootverbose)
701	class = tolower(class);
702
703    /* get action code, text string */
704    action = fp[1];
705    tp = &fp[2];
706
707    /*
708     * Print some information about the event.
709     *
710     * This code uses a table derived from the corresponding portion of the Linux
711     * driver, and thus the parser is very similar.
712     */
713    switch(class) {
714    case 'p':		/* error on physical device */
715	mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
716	if (action == 'r')
717	    sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
718	break;
719    case 'l':		/* error on logical unit */
720    case 'm':		/* message about logical unit */
721	bus = MLY_LOGDEV_BUS(sc, me->lun);
722	target = MLY_LOGDEV_TARGET(me->lun);
723	mly_name_device(sc, bus, target);
724	mly_printf(sc, "logical device %d (%s) %s\n", me->lun, sc->mly_btl[bus][target].mb_name, tp);
725	if (action == 'r')
726	    sc->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN;
727	break;
728      break;
729    case 's':		/* report of sense data */
730	if (((ssd->flags & SSD_KEY) == SSD_KEY_NO_SENSE) ||
731	    (((ssd->flags & SSD_KEY) == SSD_KEY_NOT_READY) &&
732	     (ssd->add_sense_code == 0x04) &&
733	     ((ssd->add_sense_code_qual == 0x01) || (ssd->add_sense_code_qual == 0x02))))
734	    break;	/* ignore NO_SENSE or NOT_READY in one case */
735
736	mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
737	mly_printf(sc, "  sense key %d  asc %02x  ascq %02x\n",
738		      ssd->flags & SSD_KEY, ssd->add_sense_code, ssd->add_sense_code_qual);
739	mly_printf(sc, "  info %4D  csi %4D\n", ssd->info, "", ssd->cmd_spec_info, "");
740	if (action == 'r')
741	    sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
742	break;
743    case 'e':
744	mly_printf(sc, tp, me->target, me->lun);
745	break;
746    case 'c':
747	mly_printf(sc, "controller %s\n", tp);
748	break;
749    case '?':
750	mly_printf(sc, "%s - %d\n", tp, me->code);
751	break;
752    default:	/* probably a 'noisy' event being ignored */
753	break;
754    }
755}
756
757/********************************************************************************
758 * Perform periodic activities.
759 */
760static void
761mly_periodic(void *data)
762{
763    struct mly_softc	*sc = (struct mly_softc *)data;
764    int			nchn, bus, target;
765
766    debug_called(2);
767
768    /*
769     * Scan devices.
770     */
771    nchn = sc->mly_controllerinfo->physical_channels_present +
772	sc->mly_controllerinfo->virtual_channels_present;
773    for (bus = 0; bus < nchn; bus++) {
774	for (target = 0; target < MLY_MAX_TARGETS; target++) {
775
776	    /* ignore the controller in this scan */
777	    if (target == sc->mly_controllerparam->initiator_id)
778		continue;
779
780	    /* perform device rescan? */
781	    if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_RESCAN)
782		mly_rescan_btl(sc, bus, target);
783	}
784    }
785
786    sc->mly_periodic = timeout(mly_periodic, sc, hz);
787}
788
789/********************************************************************************
790 ********************************************************************************
791                                                               Command Processing
792 ********************************************************************************
793 ********************************************************************************/
794
795/********************************************************************************
796 * Run a command and wait for it to complete.
797 *
798 */
799static int
800mly_immediate_command(struct mly_command *mc)
801{
802    struct mly_softc	*sc = mc->mc_sc;
803    int			error, s;
804
805    debug_called(2);
806
807    /* spinning at splcam is ugly, but we're only used during controller init */
808    s = splcam();
809    if ((error = mly_start(mc)))
810	return(error);
811
812    if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) {
813	/* sleep on the command */
814	while(MLY_CMD_STATE(mc) != MLY_CMD_COMPLETE) {
815	    tsleep(mc, PRIBIO, "mlywait", 0);
816	}
817    } else {
818	/* spin and collect status while we do */
819	while(MLY_CMD_STATE(mc) != MLY_CMD_COMPLETE)
820	    mly_done(mc->mc_sc);
821    }
822    splx(s);
823    return(0);
824}
825
826/********************************************************************************
827 * Start as much queued I/O as possible on the controller
828 */
829void
830mly_startio(struct mly_softc *sc)
831{
832    struct mly_command	*mc;
833
834    debug_called(2);
835
836    for (;;) {
837
838	/* try for a ready command */
839	mc = mly_dequeue_ready(sc);
840
841	/* try to build a command from a queued ccb */
842	if (!mc)
843	    mly_cam_command(sc, &mc);
844
845	/* no command == nothing to do */
846	if (!mc)
847	    break;
848
849	/* try to post the command */
850	if (mly_start(mc)) {
851	    /* controller busy, or no resources - defer for later */
852	    mly_requeue_ready(mc);
853	    break;
854	}
855    }
856}
857
858/********************************************************************************
859 * Deliver a command to the controller; allocate controller resources at the
860 * last moment.
861 */
862static int
863mly_start(struct mly_command *mc)
864{
865    struct mly_softc		*sc = mc->mc_sc;
866    union mly_command_packet	*pkt;
867    int				s;
868
869    debug_called(2);
870
871    /*
872     * Set the command up for delivery to the controller.  This may fail
873     * due to resource shortages.
874     */
875    if (mly_get_slot(mc))
876	return(EBUSY);
877    mly_map_command(mc);
878
879    s = splcam();
880    /*
881     * Do we have to use the hardware mailbox?
882     */
883    if (!(sc->mly_state & MLY_STATE_MMBOX_ACTIVE)) {
884	/*
885	 * Check to see if the controller is ready for us.
886	 */
887	if (MLY_IDBR_TRUE(sc, MLY_HM_CMDSENT)) {
888	    splx(s);
889	    return(EBUSY);
890	}
891
892	/*
893	 * It's ready, send the command.
894	 */
895	MLY_SET_MBOX(sc, sc->mly_command_mailbox, &mc->mc_packetphys);
896	MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_CMDSENT);
897
898    } else {	/* use memory-mailbox mode */
899
900	pkt = &sc->mly_mmbox->mmm_command[sc->mly_mmbox_command_index];
901
902	/* check to see if the next slot is free yet */
903	if (pkt->mmbox.flag != 0) {
904	    splx(s);
905	    return(EBUSY);
906	}
907
908	/* copy in new command */
909	bcopy(mc->mc_packet->mmbox.data, pkt->mmbox.data, sizeof(pkt->mmbox.data));
910	/* barrier to ensure completion of previous write before we write the flag */
911	bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE);	/* tag/handle? */
912	/* copy flag last */
913	pkt->mmbox.flag = mc->mc_packet->mmbox.flag;
914	/* barrier to ensure completion of previous write before we notify the controller */
915	bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE);	/* tag/handle */
916
917	/* signal controller, update index */
918	MLY_SET_REG(sc, sc->mly_idbr, MLY_AM_CMDSENT);
919	sc->mly_mmbox_command_index = (sc->mly_mmbox_command_index + 1) % MLY_MMBOX_COMMANDS;
920    }
921
922    splx(s);
923    return(0);
924}
925
926/********************************************************************************
927 * Pick up command status from the controller, schedule a completion event
928 */
929void
930mly_done(struct mly_softc *sc)
931{
932    struct mly_command		*mc;
933    union mly_status_packet	*sp;
934    u_int16_t			slot;
935    int				s, worked;
936
937    s = splcam();
938    worked = 0;
939
940    /* pick up hardware-mailbox commands */
941    if (MLY_ODBR_TRUE(sc, MLY_HM_STSREADY)) {
942	slot = MLY_GET_REG2(sc, sc->mly_status_mailbox);
943	if (slot < MLY_SLOT_MAX) {
944	    mc = sc->mly_busycmds[slot];
945	    if (mc != NULL) {
946		mc->mc_status = MLY_GET_REG(sc, sc->mly_status_mailbox + 2);
947		mc->mc_sense = MLY_GET_REG(sc, sc->mly_status_mailbox + 3);
948		mc->mc_resid = MLY_GET_REG4(sc, sc->mly_status_mailbox + 4);
949		mly_enqueue_completed(mc);
950		sc->mly_busycmds[slot] = NULL;
951		worked = 1;
952	    } else {
953		mly_printf(sc, "got HM completion for nonbusy slot %u\n", slot);
954	    }
955	} else {
956	    /* slot 0xffff may mean "extremely bogus command" */
957	    mly_printf(sc, "got HM completion for illegal slot %u\n", slot);
958	}
959	/* unconditionally acknowledge status */
960	MLY_SET_REG(sc, sc->mly_odbr, MLY_HM_STSREADY);
961	MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK);
962    }
963
964    /* pick up memory-mailbox commands */
965    if (MLY_ODBR_TRUE(sc, MLY_AM_STSREADY)) {
966	for (;;) {
967	    sp = &sc->mly_mmbox->mmm_status[sc->mly_mmbox_status_index];
968
969	    /* check for more status */
970	    if (sp->mmbox.flag == 0)
971		break;
972
973	    /* get slot number */
974	    slot = sp->status.command_id;
975	    if (slot < MLY_SLOT_MAX) {
976		mc = sc->mly_busycmds[slot];
977		if (mc != NULL) {
978		    mc->mc_status = sp->status.status;
979		    mc->mc_sense = sp->status.sense_length;
980		    mc->mc_resid = sp->status.residue;
981		    mly_enqueue_completed(mc);
982		    sc->mly_busycmds[slot] = NULL;
983		    worked = 1;
984		} else {
985		    mly_printf(sc, "got AM completion for nonbusy slot %u\n", slot);
986		}
987	    } else {
988		/* slot 0xffff may mean "extremely bogus command" */
989		mly_printf(sc, "got AM completion for illegal slot %u at %d\n", slot, sc->mly_mmbox_status_index);
990	    }
991
992	    /* clear and move to next slot */
993	    sp->mmbox.flag = 0;
994	    sc->mly_mmbox_status_index = (sc->mly_mmbox_status_index + 1) % MLY_MMBOX_STATUS;
995	}
996	/* acknowledge that we have collected status value(s) */
997	MLY_SET_REG(sc, sc->mly_odbr, MLY_AM_STSREADY);
998    }
999
1000    splx(s);
1001    if (worked) {
1002#if __FreeBSD_version >= 500005
1003	if (sc->mly_state & MLY_STATE_INTERRUPTS_ON)
1004	    taskqueue_enqueue(taskqueue_swi, &sc->mly_task_complete);
1005	else
1006#endif
1007	    mly_complete(sc, 0);
1008    }
1009}
1010
1011/********************************************************************************
1012 * Process completed commands
1013 */
1014static void
1015mly_complete(void *context, int pending)
1016{
1017    struct mly_softc	*sc = (struct mly_softc *)context;
1018    struct mly_command	*mc;
1019    void	        (* mc_complete)(struct mly_command *mc);
1020
1021
1022    debug_called(2);
1023
1024    /*
1025     * Spin pulling commands off the completed queue and processing them.
1026     */
1027    while ((mc = mly_dequeue_completed(sc)) != NULL) {
1028
1029	/*
1030	 * Free controller resources, mark command complete.
1031	 *
1032	 * Note that as soon as we mark the command complete, it may be freed
1033	 * out from under us, so we need to save the mc_complete field in
1034	 * order to later avoid dereferencing mc.  (We would not expect to
1035	 * have a polling/sleeping consumer with mc_complete != NULL).
1036	 */
1037	mly_unmap_command(mc);
1038	mc_complete = mc->mc_complete;
1039	MLY_CMD_SETSTATE(mc, MLY_CMD_COMPLETE);
1040
1041	/*
1042	 * Call completion handler or wake up sleeping consumer.
1043	 */
1044	if (mc_complete != NULL) {
1045	    mc_complete(mc);
1046	} else {
1047	    wakeup(mc);
1048	}
1049    }
1050
1051    /*
1052     * We may have freed up controller resources which would allow us
1053     * to push more commands onto the controller, so we check here.
1054     */
1055    mly_startio(sc);
1056
1057    /*
1058     * The controller may have updated the health status information,
1059     * so check for it here.
1060     *
1061     * Note that we only check for health status after a completed command.  It
1062     * might be wise to ping the controller occasionally if it's been idle for
1063     * a while just to check up on it.  While a filesystem is mounted, or I/O is
1064     * active this isn't really an issue.
1065     */
1066    if (sc->mly_mmbox->mmm_health.status.change_counter != sc->mly_event_change) {
1067	sc->mly_event_change = sc->mly_mmbox->mmm_health.status.change_counter;
1068	debug(1, "event change %d, event status update, %d -> %d", sc->mly_event_change,
1069	      sc->mly_event_waiting, sc->mly_mmbox->mmm_health.status.next_event);
1070	sc->mly_event_waiting = sc->mly_mmbox->mmm_health.status.next_event;
1071    }
1072    if (sc->mly_event_counter != sc->mly_event_waiting)
1073	mly_fetch_event(sc);
1074}
1075
1076/********************************************************************************
1077 ********************************************************************************
1078                                                        Command Buffer Management
1079 ********************************************************************************
1080 ********************************************************************************/
1081
1082/********************************************************************************
1083 * Give a command a slot in our lookup table, so that we can recover it when
1084 * the controller returns the slot number.
1085 *
1086 * Slots are freed in mly_done().
1087 */
1088static int
1089mly_get_slot(struct mly_command *mc)
1090{
1091    struct mly_softc	*sc = mc->mc_sc;
1092    u_int16_t		slot;
1093    int			tries;
1094
1095    debug_called(3);
1096
1097    if (mc->mc_flags & MLY_CMD_SLOTTED)
1098	return(0);
1099
1100    /*
1101     * Optimisation for the controller-busy case - check to see whether
1102     * we are already over the limit and stop immediately.
1103     */
1104    if (sc->mly_busy_count >= sc->mly_max_commands)
1105	return(EBUSY);
1106
1107    /*
1108     * Scan forward from the last slot that we assigned looking for a free
1109     * slot.  Don't scan more than the maximum number of commands that we
1110     * support (we should never reach the limit here due to the optimisation
1111     * above)
1112     */
1113    slot = sc->mly_last_slot;
1114    for (tries = sc->mly_max_commands; tries > 0; tries--) {
1115	if (sc->mly_busycmds[slot] == NULL) {
1116	    sc->mly_busycmds[slot] = mc;
1117	    mc->mc_slot = slot;
1118	    mc->mc_packet->generic.command_id = slot;
1119	    mc->mc_flags |= MLY_CMD_SLOTTED;
1120	    sc->mly_last_slot = slot;
1121	    return(0);
1122	}
1123	slot++;
1124	if (slot >= MLY_SLOT_MAX)
1125	    slot = MLY_SLOT_START;
1126    }
1127    return(EBUSY);
1128}
1129
1130/********************************************************************************
1131 * Allocate a command.
1132 */
1133int
1134mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp)
1135{
1136    struct mly_command	*mc;
1137
1138    debug_called(3);
1139
1140    if ((mc = mly_dequeue_free(sc)) == NULL) {
1141	mly_alloc_command_cluster(sc);
1142	mc = mly_dequeue_free(sc);
1143    }
1144    if (mc != NULL)
1145	TAILQ_REMOVE(&sc->mly_freecmds, mc, mc_link);
1146
1147    if (mc == NULL)
1148	return(ENOMEM);
1149
1150    MLY_CMD_SETSTATE(mc, MLY_CMD_SETUP);
1151    *mcp = mc;
1152    return(0);
1153}
1154
1155/********************************************************************************
1156 * Release a command back to the freelist.
1157 */
1158void
1159mly_release_command(struct mly_command *mc)
1160{
1161    debug_called(3);
1162
1163    /*
1164     * Fill in parts of the command that may cause confusion if
1165     * a consumer doesn't when we are later allocated.
1166     */
1167    MLY_CMD_SETSTATE(mc, MLY_CMD_FREE);
1168    mc->mc_data = NULL;
1169    mc->mc_flags = 0;
1170    mc->mc_complete = NULL;
1171    mc->mc_private = NULL;
1172
1173    /*
1174     * By default, we set up to overwrite the command packet with
1175     * sense information.
1176     */
1177    mc->mc_packet->generic.sense_buffer_address = mc->mc_packetphys;
1178    mc->mc_packet->generic.maximum_sense_size = sizeof(union mly_command_packet);
1179
1180    mly_enqueue_free(mc);
1181}
1182
1183/********************************************************************************
1184 * Map helper for command cluster allocation.
1185 *
1186 * Note that there are never more command packets in a cluster than will fit in
1187 * a page, so there is no need to look at anything other than the base of the
1188 * allocation (which will be page-aligned).
1189 */
1190static void
1191mly_alloc_command_cluster_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1192{
1193    struct mly_command_cluster	*mcc = (struct mly_command_cluster *)arg;
1194
1195    debug_called(2);
1196
1197    mcc->mcc_packetphys = segs[0].ds_addr;
1198}
1199
1200/********************************************************************************
1201 * Allocate and initialise a cluster of commands.
1202 */
1203static void
1204mly_alloc_command_cluster(struct mly_softc *sc)
1205{
1206    struct mly_command_cluster	*mcc;
1207    struct mly_command		*mc;
1208    int				i;
1209
1210    debug_called(1);
1211
1212    mcc = malloc(sizeof(struct mly_command_cluster), M_DEVBUF, M_NOWAIT);
1213    if (mcc != NULL) {
1214
1215	/*
1216	 * Allocate enough space for all the command packets for this cluster and
1217	 * map them permanently into controller-visible space.
1218	 */
1219	if (bus_dmamem_alloc(sc->mly_packet_dmat, (void **)&mcc->mcc_packet,
1220			     BUS_DMA_NOWAIT, &mcc->mcc_packetmap)) {
1221	    free(mcc, M_DEVBUF);
1222	    return;
1223	}
1224	bus_dmamap_load(sc->mly_packet_dmat, mcc->mcc_packetmap, mcc->mcc_packet,
1225			MLY_CMD_CLUSTERCOUNT * sizeof(union mly_command_packet),
1226			mly_alloc_command_cluster_map, mcc, 0);
1227
1228	mly_enqueue_cluster(sc, mcc);
1229	for (i = 0; i < MLY_CMD_CLUSTERCOUNT; i++) {
1230	    mc = &mcc->mcc_command[i];
1231	    bzero(mc, sizeof(*mc));
1232	    mc->mc_sc = sc;
1233	    mc->mc_packet = mcc->mcc_packet + i;
1234	    mc->mc_packetphys = mcc->mcc_packetphys + (i * sizeof(union mly_command_packet));
1235	    if (!bus_dmamap_create(sc->mly_buffer_dmat, 0, &mc->mc_datamap))
1236		mly_release_command(mc);
1237	}
1238    }
1239}
1240
1241/********************************************************************************
1242 * Command-mapping helper function - populate this command slot's s/g table
1243 * with the s/g entries for this command.
1244 */
1245static void
1246mly_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1247{
1248    struct mly_command		*mc = (struct mly_command *)arg;
1249    struct mly_softc		*sc = mc->mc_sc;
1250    struct mly_command_generic	*gen = &(mc->mc_packet->generic);
1251    struct mly_sg_entry		*sg;
1252    int				i, tabofs;
1253
1254    debug_called(3);
1255
1256    /* can we use the transfer structure directly? */
1257    if (nseg <= 2) {
1258	sg = &gen->transfer.direct.sg[0];
1259	gen->command_control.extended_sg_table = 0;
1260    } else {
1261	tabofs = (mc->mc_slot * MLY_MAXSGENTRIES);
1262	sg = sc->mly_sg_table + tabofs;
1263	gen->transfer.indirect.entries[0] = nseg;
1264	gen->transfer.indirect.table_physaddr[0] = sc->mly_sg_busaddr + (tabofs * sizeof(struct mly_sg_entry));
1265	gen->command_control.extended_sg_table = 1;
1266    }
1267
1268    /* copy the s/g table */
1269    for (i = 0; i < nseg; i++) {
1270	sg[i].physaddr = segs[i].ds_addr;
1271	sg[i].length = segs[i].ds_len;
1272    }
1273
1274}
1275
1276#if 0
1277/********************************************************************************
1278 * Command-mapping helper function - save the cdb's physical address.
1279 *
1280 * We don't support 'large' SCSI commands at this time, so this is unused.
1281 */
1282static void
1283mly_map_command_cdb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1284{
1285    struct mly_command			*mc = (struct mly_command *)arg;
1286
1287    debug_called(3);
1288
1289    /* XXX can we safely assume that a CDB will never cross a page boundary? */
1290    if ((segs[0].ds_addr % PAGE_SIZE) >
1291	((segs[0].ds_addr + mc->mc_packet->scsi_large.cdb_length) % PAGE_SIZE))
1292	panic("cdb crosses page boundary");
1293
1294    /* fix up fields in the command packet */
1295    mc->mc_packet->scsi_large.cdb_physaddr = segs[0].ds_addr;
1296}
1297#endif
1298
1299/********************************************************************************
1300 * Map a command into controller-visible space
1301 */
1302static void
1303mly_map_command(struct mly_command *mc)
1304{
1305    struct mly_softc	*sc = mc->mc_sc;
1306
1307    debug_called(2);
1308
1309    /* don't map more than once */
1310    if (mc->mc_flags & MLY_CMD_MAPPED)
1311	return;
1312
1313    /* does the command have a data buffer? */
1314    if (mc->mc_data != NULL)
1315	bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mc->mc_length,
1316			mly_map_command_sg, mc, 0);
1317
1318    if (mc->mc_flags & MLY_CMD_DATAIN)
1319	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREREAD);
1320    if (mc->mc_flags & MLY_CMD_DATAOUT)
1321	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREWRITE);
1322
1323    mc->mc_flags |= MLY_CMD_MAPPED;
1324}
1325
1326/********************************************************************************
1327 * Unmap a command from controller-visible space
1328 */
1329static void
1330mly_unmap_command(struct mly_command *mc)
1331{
1332    struct mly_softc	*sc = mc->mc_sc;
1333
1334    debug_called(2);
1335
1336    if (!(mc->mc_flags & MLY_CMD_MAPPED))
1337	return;
1338
1339    if (mc->mc_flags & MLY_CMD_DATAIN)
1340	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTREAD);
1341    if (mc->mc_flags & MLY_CMD_DATAOUT)
1342	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTWRITE);
1343
1344    /* does the command have a data buffer? */
1345    if (mc->mc_data != NULL)
1346	bus_dmamap_unload(sc->mly_buffer_dmat, mc->mc_datamap);
1347
1348    mc->mc_flags &= ~MLY_CMD_MAPPED;
1349}
1350
1351/********************************************************************************
1352 ********************************************************************************
1353                                                                 Hardware Control
1354 ********************************************************************************
1355 ********************************************************************************/
1356
1357/********************************************************************************
1358 * Handshake with the firmware while the card is being initialised.
1359 */
1360static int
1361mly_fwhandshake(struct mly_softc *sc)
1362{
1363    u_int8_t	error, param0, param1;
1364    int		spinup = 0;
1365
1366    debug_called(1);
1367
1368    /* set HM_STSACK and let the firmware initialise */
1369    MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK);
1370    DELAY(1000);	/* too short? */
1371
1372    /* if HM_STSACK is still true, the controller is initialising */
1373    if (!MLY_IDBR_TRUE(sc, MLY_HM_STSACK))
1374	return(0);
1375    mly_printf(sc, "controller initialisation started\n");
1376
1377    /* spin waiting for initialisation to finish, or for a message to be delivered */
1378    while (MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) {
1379	/* check for a message */
1380	if (MLY_ERROR_VALID(sc)) {
1381	    error = MLY_GET_REG(sc, sc->mly_error_status) & ~MLY_MSG_EMPTY;
1382	    param0 = MLY_GET_REG(sc, sc->mly_command_mailbox);
1383	    param1 = MLY_GET_REG(sc, sc->mly_command_mailbox + 1);
1384
1385	    switch(error) {
1386	    case MLY_MSG_SPINUP:
1387		if (!spinup) {
1388		    mly_printf(sc, "drive spinup in progress\n");
1389		    spinup = 1;			/* only print this once (should print drive being spun?) */
1390		}
1391		break;
1392	    case MLY_MSG_RACE_RECOVERY_FAIL:
1393		mly_printf(sc, "mirror race recovery failed, one or more drives offline\n");
1394		break;
1395	    case MLY_MSG_RACE_IN_PROGRESS:
1396		mly_printf(sc, "mirror race recovery in progress\n");
1397		break;
1398	    case MLY_MSG_RACE_ON_CRITICAL:
1399		mly_printf(sc, "mirror race recovery on a critical drive\n");
1400		break;
1401	    case MLY_MSG_PARITY_ERROR:
1402		mly_printf(sc, "FATAL MEMORY PARITY ERROR\n");
1403		return(ENXIO);
1404	    default:
1405		mly_printf(sc, "unknown initialisation code 0x%x\n", error);
1406	    }
1407	}
1408    }
1409    return(0);
1410}
1411
1412/********************************************************************************
1413 ********************************************************************************
1414                                                        Debugging and Diagnostics
1415 ********************************************************************************
1416 ********************************************************************************/
1417
1418/********************************************************************************
1419 * Print some information about the controller.
1420 */
1421static void
1422mly_describe_controller(struct mly_softc *sc)
1423{
1424    struct mly_ioctl_getcontrollerinfo	*mi = sc->mly_controllerinfo;
1425
1426    mly_printf(sc, "%16s, %d channel%s, firmware %d.%02d-%d-%02d (%02d%02d%02d%02d), %dMB RAM\n",
1427	       mi->controller_name, mi->physical_channels_present, (mi->physical_channels_present) > 1 ? "s" : "",
1428	       mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build,	/* XXX turn encoding? */
1429	       mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day,
1430	       mi->memory_size);
1431
1432    if (bootverbose) {
1433	mly_printf(sc, "%s %s (%x), %dMHz %d-bit %.16s\n",
1434		   mly_describe_code(mly_table_oemname, mi->oem_information),
1435		   mly_describe_code(mly_table_controllertype, mi->controller_type), mi->controller_type,
1436		   mi->interface_speed, mi->interface_width, mi->interface_name);
1437	mly_printf(sc, "%dMB %dMHz %d-bit %s%s%s, cache %dMB\n",
1438		   mi->memory_size, mi->memory_speed, mi->memory_width,
1439		   mly_describe_code(mly_table_memorytype, mi->memory_type),
1440		   mi->memory_parity ? "+parity": "",mi->memory_ecc ? "+ECC": "",
1441		   mi->cache_size);
1442	mly_printf(sc, "CPU: %s @ %dMHZ\n",
1443		   mly_describe_code(mly_table_cputype, mi->cpu[0].type), mi->cpu[0].speed);
1444	if (mi->l2cache_size != 0)
1445	    mly_printf(sc, "%dKB L2 cache\n", mi->l2cache_size);
1446	if (mi->exmemory_size != 0)
1447	    mly_printf(sc, "%dMB %dMHz %d-bit private %s%s%s\n",
1448		       mi->exmemory_size, mi->exmemory_speed, mi->exmemory_width,
1449		       mly_describe_code(mly_table_memorytype, mi->exmemory_type),
1450		       mi->exmemory_parity ? "+parity": "",mi->exmemory_ecc ? "+ECC": "");
1451	mly_printf(sc, "battery backup %s\n", mi->bbu_present ? "present" : "not installed");
1452	mly_printf(sc, "maximum data transfer %d blocks, maximum sg entries/command %d\n",
1453		   mi->maximum_block_count, mi->maximum_sg_entries);
1454	mly_printf(sc, "logical devices present/critical/offline %d/%d/%d\n",
1455		   mi->logical_devices_present, mi->logical_devices_critical, mi->logical_devices_offline);
1456	mly_printf(sc, "physical devices present %d\n",
1457		   mi->physical_devices_present);
1458	mly_printf(sc, "physical disks present/offline %d/%d\n",
1459		   mi->physical_disks_present, mi->physical_disks_offline);
1460	mly_printf(sc, "%d physical channel%s, %d virtual channel%s of %d possible\n",
1461		   mi->physical_channels_present, mi->physical_channels_present == 1 ? "" : "s",
1462		   mi->virtual_channels_present, mi->virtual_channels_present == 1 ? "" : "s",
1463		   mi->virtual_channels_possible);
1464	mly_printf(sc, "%d parallel commands supported\n", mi->maximum_parallel_commands);
1465	mly_printf(sc, "%dMB flash ROM, %d of %d maximum cycles\n",
1466		   mi->flash_size, mi->flash_age, mi->flash_maximum_age);
1467    }
1468}
1469
1470#ifdef MLY_DEBUG
1471/********************************************************************************
1472 * Print some controller state
1473 */
1474static void
1475mly_printstate(struct mly_softc *sc)
1476{
1477    mly_printf(sc, "IDBR %02x  ODBR %02x  ERROR %02x  (%x %x %x)\n",
1478		  MLY_GET_REG(sc, sc->mly_idbr),
1479		  MLY_GET_REG(sc, sc->mly_odbr),
1480		  MLY_GET_REG(sc, sc->mly_error_status),
1481		  sc->mly_idbr,
1482		  sc->mly_odbr,
1483		  sc->mly_error_status);
1484    mly_printf(sc, "IMASK %02x  ISTATUS %02x\n",
1485		  MLY_GET_REG(sc, sc->mly_interrupt_mask),
1486		  MLY_GET_REG(sc, sc->mly_interrupt_status));
1487    mly_printf(sc, "COMMAND %02x %02x %02x %02x %02x %02x %02x %02x\n",
1488		  MLY_GET_REG(sc, sc->mly_command_mailbox),
1489		  MLY_GET_REG(sc, sc->mly_command_mailbox + 1),
1490		  MLY_GET_REG(sc, sc->mly_command_mailbox + 2),
1491		  MLY_GET_REG(sc, sc->mly_command_mailbox + 3),
1492		  MLY_GET_REG(sc, sc->mly_command_mailbox + 4),
1493		  MLY_GET_REG(sc, sc->mly_command_mailbox + 5),
1494		  MLY_GET_REG(sc, sc->mly_command_mailbox + 6),
1495		  MLY_GET_REG(sc, sc->mly_command_mailbox + 7));
1496    mly_printf(sc, "STATUS  %02x %02x %02x %02x %02x %02x %02x %02x\n",
1497		  MLY_GET_REG(sc, sc->mly_status_mailbox),
1498		  MLY_GET_REG(sc, sc->mly_status_mailbox + 1),
1499		  MLY_GET_REG(sc, sc->mly_status_mailbox + 2),
1500		  MLY_GET_REG(sc, sc->mly_status_mailbox + 3),
1501		  MLY_GET_REG(sc, sc->mly_status_mailbox + 4),
1502		  MLY_GET_REG(sc, sc->mly_status_mailbox + 5),
1503		  MLY_GET_REG(sc, sc->mly_status_mailbox + 6),
1504		  MLY_GET_REG(sc, sc->mly_status_mailbox + 7));
1505    mly_printf(sc, "        %04x        %08x\n",
1506		  MLY_GET_REG2(sc, sc->mly_status_mailbox),
1507		  MLY_GET_REG4(sc, sc->mly_status_mailbox + 4));
1508}
1509
1510struct mly_softc	*mly_softc0 = NULL;
1511void
1512mly_printstate0(void)
1513{
1514    if (mly_softc0 != NULL)
1515	mly_printstate(mly_softc0);
1516}
1517
1518/********************************************************************************
1519 * Print a command
1520 */
1521static void
1522mly_print_command(struct mly_command *mc)
1523{
1524    struct mly_softc	*sc = mc->mc_sc;
1525
1526    mly_printf(sc, "COMMAND @ %p\n", mc);
1527    mly_printf(sc, "  slot      %d\n", mc->mc_slot);
1528    mly_printf(sc, "  state     %d\n", MLY_CMD_STATE(mc));
1529    mly_printf(sc, "  status    0x%x\n", mc->mc_status);
1530    mly_printf(sc, "  sense len %d\n", mc->mc_sense);
1531    mly_printf(sc, "  resid     %d\n", mc->mc_resid);
1532    mly_printf(sc, "  packet    %p/0x%llx\n", mc->mc_packet, mc->mc_packetphys);
1533    if (mc->mc_packet != NULL)
1534	mly_print_packet(mc);
1535    mly_printf(sc, "  data      %p/%d\n", mc->mc_data, mc->mc_length);
1536    mly_printf(sc, "  flags     %b\n", mc->mc_flags, "\20\11slotted\12mapped\13priority\14datain\15dataout\n");
1537    mly_printf(sc, "  complete  %p\n", mc->mc_complete);
1538    mly_printf(sc, "  private   %p\n", mc->mc_private);
1539}
1540
1541/********************************************************************************
1542 * Print a command packet
1543 */
1544static void
1545mly_print_packet(struct mly_command *mc)
1546{
1547    struct mly_softc			*sc = mc->mc_sc;
1548    struct mly_command_generic		*ge = (struct mly_command_generic *)mc->mc_packet;
1549    struct mly_command_scsi_small	*ss = (struct mly_command_scsi_small *)mc->mc_packet;
1550    struct mly_command_scsi_large	*sl = (struct mly_command_scsi_large *)mc->mc_packet;
1551    struct mly_command_ioctl		*io = (struct mly_command_ioctl *)mc->mc_packet;
1552    int					transfer;
1553
1554    mly_printf(sc, "   command_id           %d\n", ge->command_id);
1555    mly_printf(sc, "   opcode               %d\n", ge->opcode);
1556    mly_printf(sc, "   command_control      fua %d  dpo %d  est %d  dd %s  nas %d ddis %d\n",
1557		  ge->command_control.force_unit_access,
1558		  ge->command_control.disable_page_out,
1559		  ge->command_control.extended_sg_table,
1560		  (ge->command_control.data_direction == MLY_CCB_WRITE) ? "WRITE" : "READ",
1561		  ge->command_control.no_auto_sense,
1562		  ge->command_control.disable_disconnect);
1563    mly_printf(sc, "   data_size            %d\n", ge->data_size);
1564    mly_printf(sc, "   sense_buffer_address 0x%llx\n", ge->sense_buffer_address);
1565    mly_printf(sc, "   lun                  %d\n", ge->addr.phys.lun);
1566    mly_printf(sc, "   target               %d\n", ge->addr.phys.target);
1567    mly_printf(sc, "   channel              %d\n", ge->addr.phys.channel);
1568    mly_printf(sc, "   logical device       %d\n", ge->addr.log.logdev);
1569    mly_printf(sc, "   controller           %d\n", ge->addr.phys.controller);
1570    mly_printf(sc, "   timeout              %d %s\n",
1571		  ge->timeout.value,
1572		  (ge->timeout.scale == MLY_TIMEOUT_SECONDS) ? "seconds" :
1573		  ((ge->timeout.scale == MLY_TIMEOUT_MINUTES) ? "minutes" : "hours"));
1574    mly_printf(sc, "   maximum_sense_size   %d\n", ge->maximum_sense_size);
1575    switch(ge->opcode) {
1576    case MDACMD_SCSIPT:
1577    case MDACMD_SCSI:
1578	mly_printf(sc, "   cdb length           %d\n", ss->cdb_length);
1579	mly_printf(sc, "   cdb                  %*D\n", ss->cdb_length, ss->cdb, " ");
1580	transfer = 1;
1581	break;
1582    case MDACMD_SCSILC:
1583    case MDACMD_SCSILCPT:
1584	mly_printf(sc, "   cdb length           %d\n", sl->cdb_length);
1585	mly_printf(sc, "   cdb                  0x%llx\n", sl->cdb_physaddr);
1586	transfer = 1;
1587	break;
1588    case MDACMD_IOCTL:
1589	mly_printf(sc, "   sub_ioctl            0x%x\n", io->sub_ioctl);
1590	switch(io->sub_ioctl) {
1591	case MDACIOCTL_SETMEMORYMAILBOX:
1592	    mly_printf(sc, "   health_buffer_size   %d\n",
1593			  io->param.setmemorymailbox.health_buffer_size);
1594	    mly_printf(sc, "   health_buffer_phys   0x%llx\n",
1595			  io->param.setmemorymailbox.health_buffer_physaddr);
1596	    mly_printf(sc, "   command_mailbox      0x%llx\n",
1597			  io->param.setmemorymailbox.command_mailbox_physaddr);
1598	    mly_printf(sc, "   status_mailbox       0x%llx\n",
1599			  io->param.setmemorymailbox.status_mailbox_physaddr);
1600	    transfer = 0;
1601	    break;
1602
1603	case MDACIOCTL_SETREALTIMECLOCK:
1604	case MDACIOCTL_GETHEALTHSTATUS:
1605	case MDACIOCTL_GETCONTROLLERINFO:
1606	case MDACIOCTL_GETLOGDEVINFOVALID:
1607	case MDACIOCTL_GETPHYSDEVINFOVALID:
1608	case MDACIOCTL_GETPHYSDEVSTATISTICS:
1609	case MDACIOCTL_GETLOGDEVSTATISTICS:
1610	case MDACIOCTL_GETCONTROLLERSTATISTICS:
1611	case MDACIOCTL_GETBDT_FOR_SYSDRIVE:
1612	case MDACIOCTL_CREATENEWCONF:
1613	case MDACIOCTL_ADDNEWCONF:
1614	case MDACIOCTL_GETDEVCONFINFO:
1615	case MDACIOCTL_GETFREESPACELIST:
1616	case MDACIOCTL_MORE:
1617	case MDACIOCTL_SETPHYSDEVPARAMETER:
1618	case MDACIOCTL_GETPHYSDEVPARAMETER:
1619	case MDACIOCTL_GETLOGDEVPARAMETER:
1620	case MDACIOCTL_SETLOGDEVPARAMETER:
1621	    mly_printf(sc, "   param                %10D\n", io->param.data.param, " ");
1622	    transfer = 1;
1623	    break;
1624
1625	case MDACIOCTL_GETEVENT:
1626	    mly_printf(sc, "   event                %d\n",
1627		       io->param.getevent.sequence_number_low + ((u_int32_t)io->addr.log.logdev << 16));
1628	    transfer = 1;
1629	    break;
1630
1631	case MDACIOCTL_SETRAIDDEVSTATE:
1632	    mly_printf(sc, "   state                %d\n", io->param.setraiddevstate.state);
1633	    transfer = 0;
1634	    break;
1635
1636	case MDACIOCTL_XLATEPHYSDEVTORAIDDEV:
1637	    mly_printf(sc, "   raid_device          %d\n", io->param.xlatephysdevtoraiddev.raid_device);
1638	    mly_printf(sc, "   controller           %d\n", io->param.xlatephysdevtoraiddev.controller);
1639	    mly_printf(sc, "   channel              %d\n", io->param.xlatephysdevtoraiddev.channel);
1640	    mly_printf(sc, "   target               %d\n", io->param.xlatephysdevtoraiddev.target);
1641	    mly_printf(sc, "   lun                  %d\n", io->param.xlatephysdevtoraiddev.lun);
1642	    transfer = 0;
1643	    break;
1644
1645	case MDACIOCTL_GETGROUPCONFINFO:
1646	    mly_printf(sc, "   group                %d\n", io->param.getgroupconfinfo.group);
1647	    transfer = 1;
1648	    break;
1649
1650	case MDACIOCTL_GET_SUBSYSTEM_DATA:
1651	case MDACIOCTL_SET_SUBSYSTEM_DATA:
1652	case MDACIOCTL_STARTDISOCVERY:
1653	case MDACIOCTL_INITPHYSDEVSTART:
1654	case MDACIOCTL_INITPHYSDEVSTOP:
1655	case MDACIOCTL_INITRAIDDEVSTART:
1656	case MDACIOCTL_INITRAIDDEVSTOP:
1657	case MDACIOCTL_REBUILDRAIDDEVSTART:
1658	case MDACIOCTL_REBUILDRAIDDEVSTOP:
1659	case MDACIOCTL_MAKECONSISTENTDATASTART:
1660	case MDACIOCTL_MAKECONSISTENTDATASTOP:
1661	case MDACIOCTL_CONSISTENCYCHECKSTART:
1662	case MDACIOCTL_CONSISTENCYCHECKSTOP:
1663	case MDACIOCTL_RESETDEVICE:
1664	case MDACIOCTL_FLUSHDEVICEDATA:
1665	case MDACIOCTL_PAUSEDEVICE:
1666	case MDACIOCTL_UNPAUSEDEVICE:
1667	case MDACIOCTL_LOCATEDEVICE:
1668	case MDACIOCTL_SETMASTERSLAVEMODE:
1669	case MDACIOCTL_DELETERAIDDEV:
1670	case MDACIOCTL_REPLACEINTERNALDEV:
1671	case MDACIOCTL_CLEARCONF:
1672	case MDACIOCTL_GETCONTROLLERPARAMETER:
1673	case MDACIOCTL_SETCONTRLLERPARAMETER:
1674	case MDACIOCTL_CLEARCONFSUSPMODE:
1675	case MDACIOCTL_STOREIMAGE:
1676	case MDACIOCTL_READIMAGE:
1677	case MDACIOCTL_FLASHIMAGES:
1678	case MDACIOCTL_RENAMERAIDDEV:
1679	default:			/* no idea what to print */
1680	    transfer = 0;
1681	    break;
1682	}
1683	break;
1684
1685    case MDACMD_IOCTLCHECK:
1686    case MDACMD_MEMCOPY:
1687    default:
1688	transfer = 0;
1689	break;	/* print nothing */
1690    }
1691    if (transfer) {
1692	if (ge->command_control.extended_sg_table) {
1693	    mly_printf(sc, "   sg table             0x%llx/%d\n",
1694			  ge->transfer.indirect.table_physaddr[0], ge->transfer.indirect.entries[0]);
1695	} else {
1696	    mly_printf(sc, "   0000                 0x%llx/%lld\n",
1697			  ge->transfer.direct.sg[0].physaddr, ge->transfer.direct.sg[0].length);
1698	    mly_printf(sc, "   0001                 0x%llx/%lld\n",
1699			  ge->transfer.direct.sg[1].physaddr, ge->transfer.direct.sg[1].length);
1700	}
1701    }
1702}
1703
1704/********************************************************************************
1705 * Panic in a slightly informative fashion
1706 */
1707static void
1708mly_panic(struct mly_softc *sc, char *reason)
1709{
1710    mly_printstate(sc);
1711    panic(reason);
1712}
1713#endif
1714