mly.c revision 64987
1/*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *	$FreeBSD: head/sys/dev/mly/mly.c 64987 2000-08-23 03:22:41Z msmith $
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/malloc.h>
33#include <sys/kernel.h>
34#include <sys/bus.h>
35#include <sys/conf.h>
36#include <sys/ctype.h>
37
38#include <machine/bus_memio.h>
39#include <machine/bus.h>
40#include <machine/clock.h>
41#include <machine/resource.h>
42#include <sys/rman.h>
43
44#include <cam/scsi/scsi_all.h>
45
46#include <dev/mly/mlyreg.h>
47#include <dev/mly/mlyvar.h>
48#define MLY_DEFINE_TABLES
49#include <dev/mly/mly_tables.h>
50
51static int	mly_get_controllerinfo(struct mly_softc *sc);
52static void	mly_scan_devices(struct mly_softc *sc);
53static void	mly_rescan_btl(struct mly_softc *sc, int bus, int target);
54static void	mly_complete_rescan(struct mly_command *mc);
55static int	mly_get_eventstatus(struct mly_softc *sc);
56static int	mly_enable_mmbox(struct mly_softc *sc);
57static int	mly_flush(struct mly_softc *sc);
58static int	mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data,
59			  size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length);
60static void	mly_fetch_event(struct mly_softc *sc);
61static void	mly_complete_event(struct mly_command *mc);
62static void	mly_process_event(struct mly_softc *sc, struct mly_event *me);
63static void	mly_periodic(void *data);
64
65static int	mly_immediate_command(struct mly_command *mc);
66static int	mly_start(struct mly_command *mc);
67static void	mly_complete(void *context, int pending);
68
69static int	mly_get_slot(struct mly_command *mc);
70static void	mly_alloc_command_cluster_map(void *arg, bus_dma_segment_t *segs, int nseg, int error);
71static void	mly_alloc_command_cluster(struct mly_softc *sc);
72static void	mly_map_command(struct mly_command *mc);
73static void	mly_unmap_command(struct mly_command *mc);
74
75static int	mly_fwhandshake(struct mly_softc *sc);
76
77static void	mly_describe_controller(struct mly_softc *sc);
78#ifdef MLY_DEBUG
79static void	mly_printstate(struct mly_softc *sc);
80static void	mly_print_command(struct mly_command *mc);
81static void	mly_print_packet(struct mly_command *mc);
82static void	mly_panic(struct mly_softc *sc, char *reason);
83#endif
84
85/********************************************************************************
86 ********************************************************************************
87                                                                 Device Interface
88 ********************************************************************************
89 ********************************************************************************/
90
91/********************************************************************************
92 * Initialise the controller and softc
93 */
94int
95mly_attach(struct mly_softc *sc)
96{
97    int		error;
98
99    debug_called(1);
100
101    /*
102     * Initialise per-controller queues.
103     */
104    TAILQ_INIT(&sc->mly_freecmds);
105    TAILQ_INIT(&sc->mly_ready);
106    TAILQ_INIT(&sc->mly_completed);
107    TAILQ_INIT(&sc->mly_clusters);
108
109#if __FreeBSD_version >= 500005
110    /*
111     * Initialise command-completion task.
112     */
113    TASK_INIT(&sc->mly_task_complete, 0, mly_complete, sc);
114#endif
115
116    /* disable interrupts before we start talking to the controller */
117    MLY_MASK_INTERRUPTS(sc);
118
119    /*
120     * Wait for the controller to come ready, handshake with the firmware if required.
121     * This is typically only necessary on platforms where the controller BIOS does not
122     * run.
123     */
124    if ((error = mly_fwhandshake(sc)))
125	return(error);
126
127    /*
128     * Initialise the slot allocator so that we can issue commands.
129     */
130    sc->mly_max_commands = MLY_SLOT_MAX;
131    sc->mly_last_slot = MLY_SLOT_START;
132
133    /*
134     * Obtain controller feature information
135     */
136    if ((error = mly_get_controllerinfo(sc)))
137	return(error);
138
139    /*
140     * Update the slot allocator limit based on the controller inquiry.
141     */
142    sc->mly_max_commands = imin(sc->mly_controllerinfo->maximum_parallel_commands, MLY_SLOT_MAX);
143
144    /*
145     * Get the current event counter for health purposes, populate the initial
146     * health status buffer.
147     */
148    if ((error = mly_get_eventstatus(sc)))
149	return(error);
150
151    /*
152     * Enable memory-mailbox mode
153     */
154    if ((error = mly_enable_mmbox(sc)))
155	return(error);
156
157    /*
158     * Attach to CAM.
159     */
160    if ((error = mly_cam_attach(sc)))
161	return(error);
162
163    /*
164     * Print a little information about the controller
165     */
166    mly_describe_controller(sc);
167
168    /*
169     * Mark all attached devices for rescan
170     */
171    mly_scan_devices(sc);
172
173    /*
174     * Instigate the first status poll immediately.  Rescan completions won't
175     * happen until interrupts are enabled, which should still be before
176     * the SCSI subsystem gets to us. (XXX assuming CAM and interrupt-driven
177     * discovery here...)
178     */
179    mly_periodic((void *)sc);
180
181    /* enable interrupts now */
182    MLY_UNMASK_INTERRUPTS(sc);
183
184    return(0);
185}
186
187/********************************************************************************
188 * Bring the controller to a state where it can be safely left alone.
189 */
190void
191mly_detach(struct mly_softc *sc)
192{
193
194    debug_called(1);
195
196    /* kill the periodic event */
197    untimeout(mly_periodic, sc, sc->mly_periodic);
198
199    sc->mly_state |= MLY_STATE_SUSPEND;
200
201    /* flush controller */
202    mly_printf(sc, "flushing cache...");
203    printf("%s\n", mly_flush(sc) ? "failed" : "done");
204
205    MLY_MASK_INTERRUPTS(sc);
206}
207
208/********************************************************************************
209 ********************************************************************************
210                                                                 Command Wrappers
211 ********************************************************************************
212 ********************************************************************************/
213
214/********************************************************************************
215 * Fill in the mly_controllerinfo and mly_controllerparam fields in the softc.
216 */
217static int
218mly_get_controllerinfo(struct mly_softc *sc)
219{
220    struct mly_command_ioctl	mci;
221    u_int8_t			status;
222    int				error;
223
224    debug_called(1);
225
226    if (sc->mly_controllerinfo != NULL)
227	free(sc->mly_controllerinfo, M_DEVBUF);
228
229    /* build the getcontrollerinfo ioctl and send it */
230    bzero(&mci, sizeof(mci));
231    sc->mly_controllerinfo = NULL;
232    mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO;
233    if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerinfo, sizeof(*sc->mly_controllerinfo),
234			   &status, NULL, NULL)))
235	return(error);
236    if (status != 0)
237	return(EIO);
238
239    if (sc->mly_controllerparam != NULL)
240	free(sc->mly_controllerparam, M_DEVBUF);
241
242    /* build the getcontrollerparameter ioctl and send it */
243    bzero(&mci, sizeof(mci));
244    sc->mly_controllerparam = NULL;
245    mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER;
246    if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerparam, sizeof(*sc->mly_controllerparam),
247			   &status, NULL, NULL)))
248	return(error);
249    if (status != 0)
250	return(EIO);
251
252    return(0);
253}
254
255/********************************************************************************
256 * Schedule all possible devices for a rescan.
257 *
258 */
259static void
260mly_scan_devices(struct mly_softc *sc)
261{
262    int		bus, target, nchn;
263
264    debug_called(1);
265
266    /*
267     * Clear any previous BTL information.
268     */
269    bzero(&sc->mly_btl, sizeof(sc->mly_btl));
270
271    /*
272     * Mark all devices as requiring a rescan, and let the early periodic scan collect them.
273     */
274    nchn = sc->mly_controllerinfo->physical_channels_present +
275	sc->mly_controllerinfo->virtual_channels_present;
276    for (bus = 0; bus < nchn; bus++)
277	for (target = 0; target < MLY_MAX_TARGETS; target++)
278	    sc->mly_btl[bus][target].mb_flags = MLY_BTL_RESCAN;
279
280}
281
282/********************************************************************************
283 * Rescan a device, possibly as a consequence of getting an event which suggests
284 * that it may have changed.
285 */
286static void
287mly_rescan_btl(struct mly_softc *sc, int bus, int target)
288{
289    struct mly_command		*mc;
290    struct mly_command_ioctl	*mci;
291
292    debug_called(2);
293
294    /* get a command */
295    mc = NULL;
296    if (mly_alloc_command(sc, &mc))
297	return;				/* we'll be retried soon */
298
299    /* set up the data buffer */
300    if ((mc->mc_data = malloc(sizeof(union mly_devinfo), M_DEVBUF, M_NOWAIT)) == NULL) {
301	mly_release_command(mc);
302	return;				/* we'll get retried the next time a command completes */
303    }
304    bzero(mc->mc_data, sizeof(union mly_devinfo));
305    mc->mc_flags |= MLY_CMD_DATAIN;
306    mc->mc_complete = mly_complete_rescan;
307
308    sc->mly_btl[bus][target].mb_flags &= ~MLY_BTL_RESCAN;
309
310    /*
311     * Build the ioctl.
312     *
313     * At this point we are committed to sending this request, as it
314     * will be the only one constructed for this particular update.
315     */
316    mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
317    mci->opcode = MDACMD_IOCTL;
318    mci->addr.phys.controller = 0;
319    mci->timeout.value = 30;
320    mci->timeout.scale = MLY_TIMEOUT_SECONDS;
321    if (bus >= sc->mly_controllerinfo->physical_channels_present) {
322	mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getlogdevinfovalid);
323	mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID;
324	mci->addr.log.logdev = ((bus - sc->mly_controllerinfo->physical_channels_present) * MLY_MAX_TARGETS)
325	    + target;
326	debug(2, "logical device %d", mci->addr.log.logdev);
327    } else {
328	mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getphysdevinfovalid);
329	mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID;
330	mci->addr.phys.lun = 0;
331	mci->addr.phys.target = target;
332	mci->addr.phys.channel = bus;
333	debug(2, "physical device %d:%d", mci->addr.phys.channel, mci->addr.phys.target);
334    }
335
336    /*
337     * Use the ready queue to get this command dispatched.
338     */
339    mly_enqueue_ready(mc);
340    mly_startio(sc);
341}
342
343/********************************************************************************
344 * Handle the completion of a rescan operation
345 */
346static void
347mly_complete_rescan(struct mly_command *mc)
348{
349    struct mly_softc				*sc = mc->mc_sc;
350    struct mly_ioctl_getlogdevinfovalid		*ldi;
351    struct mly_ioctl_getphysdevinfovalid	*pdi;
352    int						bus, target;
353
354    debug_called(2);
355
356    /* iff the command completed OK, we should use the result to update our data */
357    if (mc->mc_status == 0) {
358	if (mc->mc_length == sizeof(*ldi)) {
359	    ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data;
360	    bus = MLY_LOGDEV_BUS(sc, ldi->logical_device_number);
361	    target = MLY_LOGDEV_TARGET(ldi->logical_device_number);
362	    sc->mly_btl[bus][target].mb_flags = MLY_BTL_LOGICAL;	/* clears all other flags */
363	    sc->mly_btl[bus][target].mb_type = ldi->raid_level;
364	    sc->mly_btl[bus][target].mb_state = ldi->state;
365	    debug(2, "BTL rescan for %d returns %s, %s", ldi->logical_device_number,
366		  mly_describe_code(mly_table_device_type, ldi->raid_level),
367		  mly_describe_code(mly_table_device_state, ldi->state));
368	} else if (mc->mc_length == sizeof(*pdi)) {
369	    pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data;
370	    bus = pdi->channel;
371	    target = pdi->target;
372	    sc->mly_btl[bus][target].mb_flags = MLY_BTL_PHYSICAL;	/* clears all other flags */
373	    sc->mly_btl[bus][target].mb_type = MLY_DEVICE_TYPE_PHYSICAL;
374	    sc->mly_btl[bus][target].mb_state = pdi->state;
375	    if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED)
376		sc->mly_btl[bus][target].mb_flags |= MLY_BTL_PROTECTED;
377	    debug(2, "BTL rescan for %d:%d returns %s", bus, target,
378		  mly_describe_code(mly_table_device_state, pdi->state));
379	} else {
380	    mly_printf(sc, "BTL rescan result corrupted\n");
381	}
382    } else {
383	/*
384	 * A request sent for a device beyond the last device present will fail.
385	 * We don't care about this, so we do nothing about it.
386	 */
387    }
388    free(mc->mc_data, M_DEVBUF);
389    mly_release_command(mc);
390}
391
392/********************************************************************************
393 * Get the current health status and set the 'next event' counter to suit.
394 */
395static int
396mly_get_eventstatus(struct mly_softc *sc)
397{
398    struct mly_command_ioctl	mci;
399    struct mly_health_status	*mh;
400    u_int8_t			status;
401    int				error;
402
403    /* build the gethealthstatus ioctl and send it */
404    bzero(&mci, sizeof(mci));
405    mh = NULL;
406    mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS;
407
408    if ((error = mly_ioctl(sc, &mci, (void **)&mh, sizeof(*mh), &status, NULL, NULL)))
409	return(error);
410    if (status != 0)
411	return(EIO);
412
413    /* get the event counter */
414    sc->mly_event_change = mh->change_counter;
415    sc->mly_event_waiting = mh->next_event;
416    sc->mly_event_counter = mh->next_event;
417
418    /* save the health status into the memory mailbox */
419    bcopy(mh, &sc->mly_mmbox->mmm_health.status, sizeof(*mh));
420
421    debug(1, "initial change counter %d, event counter %d", mh->change_counter, mh->next_event);
422
423    free(mh, M_DEVBUF);
424    return(0);
425}
426
427/********************************************************************************
428 * Enable the memory mailbox mode.
429 */
430static int
431mly_enable_mmbox(struct mly_softc *sc)
432{
433    struct mly_command_ioctl	mci;
434    u_int8_t			*sp, status;
435    int				error;
436
437    debug_called(1);
438
439    /* build the ioctl and send it */
440    bzero(&mci, sizeof(mci));
441    mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX;
442    /* set buffer addresses */
443    mci.param.setmemorymailbox.command_mailbox_physaddr = sc->mly_mmbox_busaddr + fldoff(mly_mmbox, mmm_command);
444    mci.param.setmemorymailbox.status_mailbox_physaddr = sc->mly_mmbox_busaddr + fldoff(mly_mmbox, mmm_status);
445    mci.param.setmemorymailbox.health_buffer_physaddr = sc->mly_mmbox_busaddr + fldoff(mly_mmbox, mmm_health);
446
447    /* set buffer sizes - abuse of data_size field is revolting */
448    sp = (u_int8_t *)&mci.data_size;
449    sp[0] = ((sizeof(union mly_command_packet) * MLY_MMBOX_COMMANDS) / 1024);
450    sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) / 1024;
451    mci.param.setmemorymailbox.health_buffer_size = sizeof(union mly_health_region) / 1024;
452
453    debug(1, "memory mailbox at %p (0x%llx/%d 0x%llx/%d 0x%llx/%d", sc->mly_mmbox,
454	  mci.param.setmemorymailbox.command_mailbox_physaddr, sp[0],
455	  mci.param.setmemorymailbox.status_mailbox_physaddr, sp[1],
456	  mci.param.setmemorymailbox.health_buffer_physaddr, mci.param.setmemorymailbox.health_buffer_size);
457
458    if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL)))
459	return(error);
460    if (status != 0)
461	return(EIO);
462    sc->mly_state |= MLY_STATE_MMBOX_ACTIVE;
463    debug(1, "memory mailbox active");
464    return(0);
465}
466
467/********************************************************************************
468 * Flush all pending I/O from the controller.
469 */
470static int
471mly_flush(struct mly_softc *sc)
472{
473    struct mly_command_ioctl	mci;
474    u_int8_t			status;
475    int				error;
476
477    debug_called(1);
478
479    /* build the ioctl */
480    bzero(&mci, sizeof(mci));
481    mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA;
482    mci.param.deviceoperation.operation_device = MLY_OPDEVICE_PHYSICAL_CONTROLLER;
483
484    /* pass it off to the controller */
485    if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL)))
486	return(error);
487
488    return((status == 0) ? 0 : EIO);
489}
490
491/********************************************************************************
492 * Perform an ioctl command.
493 *
494 * If (data) is not NULL, the command requires data transfer.  If (*data) is NULL
495 * the command requires data transfer from the controller, and we will allocate
496 * a buffer for it.  If (*data) is not NULL, the command requires data transfer
497 * to the controller.
498 *
499 * XXX passing in the whole ioctl structure is ugly.  Better ideas?
500 *
501 * XXX we don't even try to handle the case where datasize > 4k.  We should.
502 */
503static int
504mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, size_t datasize,
505	  u_int8_t *status, void *sense_buffer, size_t *sense_length)
506{
507    struct mly_command		*mc;
508    struct mly_command_ioctl	*mci;
509    int				error;
510
511    debug_called(1);
512
513    mc = NULL;
514    if (mly_alloc_command(sc, &mc)) {
515	error = ENOMEM;
516	goto out;
517    }
518
519    /* copy the ioctl structure, but save some important fields and then fixup */
520    mci = &mc->mc_packet->ioctl;
521    ioctl->sense_buffer_address = mci->sense_buffer_address;
522    ioctl->maximum_sense_size = mci->maximum_sense_size;
523    *mci = *ioctl;
524    mci->opcode = MDACMD_IOCTL;
525    mci->timeout.value = 30;
526    mci->timeout.scale = MLY_TIMEOUT_SECONDS;
527
528    /* handle the data buffer */
529    if (data != NULL) {
530	if (*data == NULL) {
531	    /* allocate data buffer */
532	    if ((mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT)) == NULL) {
533		error = ENOMEM;
534		goto out;
535	    }
536	    mc->mc_flags |= MLY_CMD_DATAIN;
537	} else {
538	    mc->mc_data = *data;
539	    mc->mc_flags |= MLY_CMD_DATAOUT;
540	}
541	mc->mc_length = datasize;
542	mc->mc_packet->generic.data_size = datasize;
543    }
544
545    /* run the command */
546    if ((error = mly_immediate_command(mc)))
547	goto out;
548
549    /* clean up and return any data */
550    *status = mc->mc_status;
551    if ((mc->mc_sense > 0) && (sense_buffer != NULL)) {
552	bcopy(mc->mc_packet, sense_buffer, mc->mc_sense);
553	*sense_length = mc->mc_sense;
554	goto out;
555    }
556
557    /* should we return a data pointer? */
558    if ((data != NULL) && (*data == NULL))
559	*data = mc->mc_data;
560
561    /* command completed OK */
562    error = 0;
563
564out:
565    if (mc != NULL) {
566	/* do we need to free a data buffer we allocated? */
567	if (error && (mc->mc_data != NULL) && (*data == NULL))
568	    free(mc->mc_data, M_DEVBUF);
569	mly_release_command(mc);
570    }
571    return(error);
572}
573
574/********************************************************************************
575 * Fetch one event from the controller.
576 */
577static void
578mly_fetch_event(struct mly_softc *sc)
579{
580    struct mly_command		*mc;
581    struct mly_command_ioctl	*mci;
582    int				s;
583    u_int32_t			event;
584
585    debug_called(2);
586
587    /* get a command */
588    mc = NULL;
589    if (mly_alloc_command(sc, &mc))
590	return;				/* we'll get retried the next time a command completes */
591
592    /* set up the data buffer */
593    if ((mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF, M_NOWAIT)) == NULL) {
594	mly_release_command(mc);
595	return;				/* we'll get retried the next time a command completes */
596    }
597    bzero(mc->mc_data, sizeof(struct mly_event));
598    mc->mc_length = sizeof(struct mly_event);
599    mc->mc_flags |= MLY_CMD_DATAIN;
600    mc->mc_complete = mly_complete_event;
601
602    /*
603     * Get an event number to fetch.  It's possible that we've raced with another
604     * context for the last event, in which case there will be no more events.
605     */
606    s = splcam();
607    if (sc->mly_event_counter == sc->mly_event_waiting) {
608	mly_release_command(mc);
609	splx(s);
610	return;
611    }
612    event = sc->mly_event_counter++;
613    splx(s);
614
615    /*
616     * Build the ioctl.
617     *
618     * At this point we are committed to sending this request, as it
619     * will be the only one constructed for this particular event number.
620     */
621    mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
622    mci->opcode = MDACMD_IOCTL;
623    mci->data_size = sizeof(struct mly_event);
624    mci->addr.phys.lun = (event >> 16) & 0xff;
625    mci->addr.phys.target = (event >> 24) & 0xff;
626    mci->addr.phys.channel = 0;
627    mci->addr.phys.controller = 0;
628    mci->timeout.value = 30;
629    mci->timeout.scale = MLY_TIMEOUT_SECONDS;
630    mci->sub_ioctl = MDACIOCTL_GETEVENT;
631    mci->param.getevent.sequence_number_low = event & 0xffff;
632
633    debug(2, "fetch event %u", event);
634
635    /*
636     * Use the ready queue to get this command dispatched.
637     */
638    mly_enqueue_ready(mc);
639    mly_startio(sc);
640}
641
642/********************************************************************************
643 * Handle the completion of an event poll.
644 *
645 * Note that we don't actually have to instigate another poll; the completion of
646 * this command will trigger that if there are any more events to poll for.
647 */
648static void
649mly_complete_event(struct mly_command *mc)
650{
651    struct mly_softc	*sc = mc->mc_sc;
652    struct mly_event	*me = (struct mly_event *)mc->mc_data;
653
654    debug_called(2);
655
656    /*
657     * If the event was successfully fetched, process it.
658     */
659    if (mc->mc_status == SCSI_STATUS_OK) {
660	mly_process_event(sc, me);
661	free(me, M_DEVBUF);
662    }
663    mly_release_command(mc);
664}
665
666/********************************************************************************
667 * Process a controller event.
668 */
669static void
670mly_process_event(struct mly_softc *sc, struct mly_event *me)
671{
672    struct scsi_sense_data	*ssd = (struct scsi_sense_data *)&me->sense[0];
673    char			*fp, *tp;
674    int				bus, target, event, class, action;
675
676    /*
677     * Errors can be reported using vendor-unique sense data.  In this case, the
678     * event code will be 0x1c (Request sense data present), the sense key will
679     * be 0x09 (vendor specific), the MSB of the ASC will be set, and the
680     * actual event code will be a 16-bit value comprised of the ASCQ (low byte)
681     * and low seven bits of the ASC (low seven bits of the high byte).
682     */
683    if ((me->code == 0x1c) &&
684	((ssd->flags & SSD_KEY) == SSD_KEY_Vendor_Specific) &&
685	(ssd->add_sense_code & 0x80)) {
686	event = ((int)(ssd->add_sense_code & ~0x80) << 8) + ssd->add_sense_code_qual;
687    } else {
688	event = me->code;
689    }
690
691    /* look up event, get codes */
692    fp = mly_describe_code(mly_table_event, event);
693
694    debug(2, "Event %d  code 0x%x", me->sequence_number, me->code);
695
696    /* quiet event? */
697    class = fp[0];
698    if (isupper(class) && bootverbose)
699	class = tolower(class);
700
701    /* get action code, text string */
702    action = fp[1];
703    tp = &fp[2];
704
705    /*
706     * Print some information about the event.
707     *
708     * This code uses a table derived from the corresponding portion of the Linux
709     * driver, and thus the parser is very similar.
710     */
711    switch(class) {
712    case 'p':		/* error on physical device */
713	mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
714	if (action == 'r')
715	    sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
716	break;
717    case 'l':		/* error on logical unit */
718    case 'm':		/* message about logical unit */
719	bus = MLY_LOGDEV_BUS(sc, me->lun);
720	target = MLY_LOGDEV_TARGET(me->lun);
721	mly_name_device(sc, bus, target);
722	mly_printf(sc, "logical device %d (%s) %s\n", me->lun, sc->mly_btl[bus][target].mb_name, tp);
723	if (action == 'r')
724	    sc->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN;
725	break;
726      break;
727    case 's':		/* report of sense data */
728	if (((ssd->flags & SSD_KEY) == SSD_KEY_NO_SENSE) ||
729	    (((ssd->flags & SSD_KEY) == SSD_KEY_NOT_READY) &&
730	     (ssd->add_sense_code == 0x04) &&
731	     ((ssd->add_sense_code_qual == 0x01) || (ssd->add_sense_code_qual == 0x02))))
732	    break;	/* ignore NO_SENSE or NOT_READY in one case */
733
734	mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
735	mly_printf(sc, "  sense key %d  asc %02x  ascq %02x\n",
736		      ssd->flags & SSD_KEY, ssd->add_sense_code, ssd->add_sense_code_qual);
737	mly_printf(sc, "  info %4D  csi %4D\n", ssd->info, "", ssd->cmd_spec_info, "");
738	if (action == 'r')
739	    sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
740	break;
741    case 'e':
742	mly_printf(sc, tp, me->target, me->lun);
743	break;
744    case 'c':
745	mly_printf(sc, "controller %s\n", tp);
746	break;
747    case '?':
748	mly_printf(sc, "%s - %d\n", tp, me->code);
749	break;
750    default:	/* probably a 'noisy' event being ignored */
751	break;
752    }
753}
754
755/********************************************************************************
756 * Perform periodic activities.
757 */
758static void
759mly_periodic(void *data)
760{
761    struct mly_softc	*sc = (struct mly_softc *)data;
762    int			nchn, bus, target;
763
764    debug_called(2);
765
766    /*
767     * Scan devices.
768     */
769    nchn = sc->mly_controllerinfo->physical_channels_present +
770	sc->mly_controllerinfo->virtual_channels_present;
771    for (bus = 0; bus < nchn; bus++) {
772	for (target = 0; target < MLY_MAX_TARGETS; target++) {
773
774	    /* ignore the controller in this scan */
775	    if (target == sc->mly_controllerparam->initiator_id)
776		continue;
777
778	    /* perform device rescan? */
779	    if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_RESCAN)
780		mly_rescan_btl(sc, bus, target);
781	}
782    }
783
784    sc->mly_periodic = timeout(mly_periodic, sc, hz);
785}
786
787/********************************************************************************
788 ********************************************************************************
789                                                               Command Processing
790 ********************************************************************************
791 ********************************************************************************/
792
793/********************************************************************************
794 * Run a command and wait for it to complete.
795 *
796 */
797static int
798mly_immediate_command(struct mly_command *mc)
799{
800    struct mly_softc	*sc = mc->mc_sc;
801    int			error, s;
802
803    debug_called(2);
804
805    /* spinning at splcam is ugly, but we're only used during controller init */
806    s = splcam();
807    if ((error = mly_start(mc)))
808	return(error);
809
810    if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) {
811	/* sleep on the command */
812	while(MLY_CMD_STATE(mc) != MLY_CMD_COMPLETE) {
813	    tsleep(mc, PRIBIO, "mlywait", 0);
814	}
815    } else {
816	/* spin and collect status while we do */
817	while(MLY_CMD_STATE(mc) != MLY_CMD_COMPLETE)
818	    mly_done(mc->mc_sc);
819    }
820    splx(s);
821    return(0);
822}
823
824/********************************************************************************
825 * Start as much queued I/O as possible on the controller
826 */
827void
828mly_startio(struct mly_softc *sc)
829{
830    struct mly_command	*mc;
831
832    debug_called(2);
833
834    for (;;) {
835
836	/* try for a ready command */
837	mc = mly_dequeue_ready(sc);
838
839	/* try to build a command from a queued ccb */
840	if (!mc)
841	    mly_cam_command(sc, &mc);
842
843	/* no command == nothing to do */
844	if (!mc)
845	    break;
846
847	/* try to post the command */
848	if (mly_start(mc)) {
849	    /* controller busy, or no resources - defer for later */
850	    mly_requeue_ready(mc);
851	    break;
852	}
853    }
854}
855
856/********************************************************************************
857 * Deliver a command to the controller; allocate controller resources at the
858 * last moment.
859 */
860static int
861mly_start(struct mly_command *mc)
862{
863    struct mly_softc		*sc = mc->mc_sc;
864    union mly_command_packet	*pkt;
865    int				s;
866
867    debug_called(2);
868
869    /*
870     * Set the command up for delivery to the controller.  This may fail
871     * due to resource shortages.
872     */
873    if (mly_get_slot(mc))
874	return(EBUSY);
875    mly_map_command(mc);
876
877    s = splcam();
878    /*
879     * Do we have to use the hardware mailbox?
880     */
881    if (!(sc->mly_state & MLY_STATE_MMBOX_ACTIVE)) {
882	/*
883	 * Check to see if the controller is ready for us.
884	 */
885	if (MLY_IDBR_TRUE(sc, MLY_HM_CMDSENT)) {
886	    splx(s);
887	    return(EBUSY);
888	}
889
890	/*
891	 * It's ready, send the command.
892	 */
893	MLY_SET_MBOX(sc, sc->mly_command_mailbox, &mc->mc_packetphys);
894	MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_CMDSENT);
895
896    } else {	/* use memory-mailbox mode */
897
898	pkt = &sc->mly_mmbox->mmm_command[sc->mly_mmbox_command_index];
899
900	/* check to see if the next slot is free yet */
901	if (pkt->mmbox.flag != 0) {
902	    splx(s);
903	    return(EBUSY);
904	}
905
906	/* copy in new command */
907	bcopy(mc->mc_packet->mmbox.data, pkt->mmbox.data, sizeof(pkt->mmbox.data));
908	/* barrier to ensure completion of previous write before we write the flag */
909	bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE);	/* tag/handle? */
910	/* copy flag last */
911	pkt->mmbox.flag = mc->mc_packet->mmbox.flag;
912	/* barrier to ensure completion of previous write before we notify the controller */
913	bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE);	/* tag/handle */
914
915	/* signal controller, update index */
916	MLY_SET_REG(sc, sc->mly_idbr, MLY_AM_CMDSENT);
917	sc->mly_mmbox_command_index = (sc->mly_mmbox_command_index + 1) % MLY_MMBOX_COMMANDS;
918    }
919
920    splx(s);
921    return(0);
922}
923
924/********************************************************************************
925 * Pick up command status from the controller, schedule a completion event
926 */
927void
928mly_done(struct mly_softc *sc)
929{
930    struct mly_command		*mc;
931    union mly_status_packet	*sp;
932    u_int16_t			slot;
933    int				s, worked;
934
935    s = splcam();
936    worked = 0;
937
938    /* pick up hardware-mailbox commands */
939    if (MLY_ODBR_TRUE(sc, MLY_HM_STSREADY)) {
940	slot = MLY_GET_REG2(sc, sc->mly_status_mailbox);
941	if (slot < MLY_SLOT_MAX) {
942	    mc = sc->mly_busycmds[slot];
943	    if (mc != NULL) {
944		mc->mc_status = MLY_GET_REG(sc, sc->mly_status_mailbox + 2);
945		mc->mc_sense = MLY_GET_REG(sc, sc->mly_status_mailbox + 3);
946		mc->mc_resid = MLY_GET_REG4(sc, sc->mly_status_mailbox + 4);
947		mly_enqueue_completed(mc);
948		sc->mly_busycmds[slot] = NULL;
949		worked = 1;
950	    } else {
951		mly_printf(sc, "got HM completion for nonbusy slot %u\n", slot);
952	    }
953	} else {
954	    /* slot 0xffff may mean "extremely bogus command" */
955	    mly_printf(sc, "got HM completion for illegal slot %u\n", slot);
956	}
957	/* unconditionally acknowledge status */
958	MLY_SET_REG(sc, sc->mly_odbr, MLY_HM_STSREADY);
959	MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK);
960    }
961
962    /* pick up memory-mailbox commands */
963    if (MLY_ODBR_TRUE(sc, MLY_AM_STSREADY)) {
964	for (;;) {
965	    sp = &sc->mly_mmbox->mmm_status[sc->mly_mmbox_status_index];
966
967	    /* check for more status */
968	    if (sp->mmbox.flag == 0)
969		break;
970
971	    /* get slot number */
972	    slot = sp->status.command_id;
973	    if (slot < MLY_SLOT_MAX) {
974		mc = sc->mly_busycmds[slot];
975		if (mc != NULL) {
976		    mc->mc_status = sp->status.status;
977		    mc->mc_sense = sp->status.sense_length;
978		    mc->mc_resid = sp->status.residue;
979		    mly_enqueue_completed(mc);
980		    sc->mly_busycmds[slot] = NULL;
981		    worked = 1;
982		} else {
983		    mly_printf(sc, "got AM completion for nonbusy slot %u\n", slot);
984		}
985	    } else {
986		/* slot 0xffff may mean "extremely bogus command" */
987		mly_printf(sc, "got AM completion for illegal slot %u at %d\n", slot, sc->mly_mmbox_status_index);
988	    }
989
990	    /* clear and move to next slot */
991	    sp->mmbox.flag = 0;
992	    sc->mly_mmbox_status_index = (sc->mly_mmbox_status_index + 1) % MLY_MMBOX_STATUS;
993	}
994	/* acknowledge that we have collected status value(s) */
995	MLY_SET_REG(sc, sc->mly_odbr, MLY_AM_STSREADY);
996    }
997
998    splx(s);
999    if (worked) {
1000#if __FreeBSD_version >= 500005
1001	if (sc->mly_state & MLY_STATE_INTERRUPTS_ON)
1002	    taskqueue_enqueue(taskqueue_swi, &sc->mly_task_complete);
1003	else
1004#endif
1005	    mly_complete(sc, 0);
1006    }
1007}
1008
1009/********************************************************************************
1010 * Process completed commands
1011 */
1012static void
1013mly_complete(void *context, int pending)
1014{
1015    struct mly_softc	*sc = (struct mly_softc *)context;
1016    struct mly_command	*mc;
1017    void	        (* mc_complete)(struct mly_command *mc);
1018
1019
1020    debug_called(2);
1021
1022    /*
1023     * Spin pulling commands off the completed queue and processing them.
1024     */
1025    while ((mc = mly_dequeue_completed(sc)) != NULL) {
1026
1027	/*
1028	 * Free controller resources, mark command complete.
1029	 *
1030	 * Note that as soon as we mark the command complete, it may be freed
1031	 * out from under us, so we need to save the mc_complete field in
1032	 * order to later avoid dereferencing mc.  (We would not expect to
1033	 * have a polling/sleeping consumer with mc_complete != NULL).
1034	 */
1035	mly_unmap_command(mc);
1036	mc_complete = mc->mc_complete;
1037	MLY_CMD_SETSTATE(mc, MLY_CMD_COMPLETE);
1038
1039	/*
1040	 * Call completion handler or wake up sleeping consumer.
1041	 */
1042	if (mc_complete != NULL) {
1043	    mc_complete(mc);
1044	} else {
1045	    wakeup(mc);
1046	}
1047    }
1048
1049    /*
1050     * We may have freed up controller resources which would allow us
1051     * to push more commands onto the controller, so we check here.
1052     */
1053    mly_startio(sc);
1054
1055    /*
1056     * The controller may have updated the health status information,
1057     * so check for it here.
1058     *
1059     * Note that we only check for health status after a completed command.  It
1060     * might be wise to ping the controller occasionally if it's been idle for
1061     * a while just to check up on it.  While a filesystem is mounted, or I/O is
1062     * active this isn't really an issue.
1063     */
1064    if (sc->mly_mmbox->mmm_health.status.change_counter != sc->mly_event_change) {
1065	sc->mly_event_change = sc->mly_mmbox->mmm_health.status.change_counter;
1066	debug(1, "event change %d, event status update, %d -> %d", sc->mly_event_change,
1067	      sc->mly_event_waiting, sc->mly_mmbox->mmm_health.status.next_event);
1068	sc->mly_event_waiting = sc->mly_mmbox->mmm_health.status.next_event;
1069    }
1070    if (sc->mly_event_counter != sc->mly_event_waiting)
1071	mly_fetch_event(sc);
1072}
1073
1074/********************************************************************************
1075 ********************************************************************************
1076                                                        Command Buffer Management
1077 ********************************************************************************
1078 ********************************************************************************/
1079
1080/********************************************************************************
1081 * Give a command a slot in our lookup table, so that we can recover it when
1082 * the controller returns the slot number.
1083 *
1084 * Slots are freed in mly_done().
1085 */
1086static int
1087mly_get_slot(struct mly_command *mc)
1088{
1089    struct mly_softc	*sc = mc->mc_sc;
1090    u_int16_t		slot;
1091    int			tries;
1092
1093    debug_called(3);
1094
1095    if (mc->mc_flags & MLY_CMD_SLOTTED)
1096	return(0);
1097
1098    /*
1099     * Optimisation for the controller-busy case - check to see whether
1100     * we are already over the limit and stop immediately.
1101     */
1102    if (sc->mly_busy_count >= sc->mly_max_commands)
1103	return(EBUSY);
1104
1105    /*
1106     * Scan forward from the last slot that we assigned looking for a free
1107     * slot.  Don't scan more than the maximum number of commands that we
1108     * support (we should never reach the limit here due to the optimisation
1109     * above)
1110     */
1111    slot = sc->mly_last_slot;
1112    for (tries = sc->mly_max_commands; tries > 0; tries--) {
1113	if (sc->mly_busycmds[slot] == NULL) {
1114	    sc->mly_busycmds[slot] = mc;
1115	    mc->mc_slot = slot;
1116	    mc->mc_packet->generic.command_id = slot;
1117	    mc->mc_flags |= MLY_CMD_SLOTTED;
1118	    sc->mly_last_slot = slot;
1119	    return(0);
1120	}
1121	slot++;
1122	if (slot >= MLY_SLOT_MAX)
1123	    slot = MLY_SLOT_START;
1124    }
1125    return(EBUSY);
1126}
1127
1128/********************************************************************************
1129 * Allocate a command.
1130 */
1131int
1132mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp)
1133{
1134    struct mly_command	*mc;
1135
1136    debug_called(3);
1137
1138    if ((mc = mly_dequeue_free(sc)) == NULL) {
1139	mly_alloc_command_cluster(sc);
1140	mc = mly_dequeue_free(sc);
1141    }
1142    if (mc != NULL)
1143	TAILQ_REMOVE(&sc->mly_freecmds, mc, mc_link);
1144
1145    if (mc == NULL)
1146	return(ENOMEM);
1147
1148    MLY_CMD_SETSTATE(mc, MLY_CMD_SETUP);
1149    *mcp = mc;
1150    return(0);
1151}
1152
1153/********************************************************************************
1154 * Release a command back to the freelist.
1155 */
1156void
1157mly_release_command(struct mly_command *mc)
1158{
1159    debug_called(3);
1160
1161    /*
1162     * Fill in parts of the command that may cause confusion if
1163     * a consumer doesn't when we are later allocated.
1164     */
1165    MLY_CMD_SETSTATE(mc, MLY_CMD_FREE);
1166    mc->mc_data = NULL;
1167    mc->mc_flags = 0;
1168    mc->mc_complete = NULL;
1169    mc->mc_private = NULL;
1170
1171    /*
1172     * By default, we set up to overwrite the command packet with
1173     * sense information.
1174     */
1175    mc->mc_packet->generic.sense_buffer_address = mc->mc_packetphys;
1176    mc->mc_packet->generic.maximum_sense_size = sizeof(union mly_command_packet);
1177
1178    mly_enqueue_free(mc);
1179}
1180
1181/********************************************************************************
1182 * Map helper for command cluster allocation.
1183 *
1184 * Note that there are never more command packets in a cluster than will fit in
1185 * a page, so there is no need to look at anything other than the base of the
1186 * allocation (which will be page-aligned).
1187 */
1188static void
1189mly_alloc_command_cluster_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1190{
1191    struct mly_command_cluster	*mcc = (struct mly_command_cluster *)arg;
1192
1193    debug_called(2);
1194
1195    mcc->mcc_packetphys = segs[0].ds_addr;
1196}
1197
1198/********************************************************************************
1199 * Allocate and initialise a cluster of commands.
1200 */
1201static void
1202mly_alloc_command_cluster(struct mly_softc *sc)
1203{
1204    struct mly_command_cluster	*mcc;
1205    struct mly_command		*mc;
1206    int				i;
1207
1208    debug_called(1);
1209
1210    mcc = malloc(sizeof(struct mly_command_cluster), M_DEVBUF, M_NOWAIT);
1211    if (mcc != NULL) {
1212
1213	/*
1214	 * Allocate enough space for all the command packets for this cluster and
1215	 * map them permanently into controller-visible space.
1216	 */
1217	if (bus_dmamem_alloc(sc->mly_packet_dmat, (void **)&mcc->mcc_packet,
1218			     BUS_DMA_NOWAIT, &mcc->mcc_packetmap)) {
1219	    free(mcc, M_DEVBUF);
1220	    return;
1221	}
1222	bus_dmamap_load(sc->mly_packet_dmat, mcc->mcc_packetmap, mcc->mcc_packet,
1223			MLY_CMD_CLUSTERCOUNT * sizeof(union mly_command_packet),
1224			mly_alloc_command_cluster_map, mcc, 0);
1225
1226	mly_enqueue_cluster(sc, mcc);
1227	for (i = 0; i < MLY_CMD_CLUSTERCOUNT; i++) {
1228	    mc = &mcc->mcc_command[i];
1229	    bzero(mc, sizeof(*mc));
1230	    mc->mc_sc = sc;
1231	    mc->mc_packet = mcc->mcc_packet + i;
1232	    mc->mc_packetphys = mcc->mcc_packetphys + (i * sizeof(union mly_command_packet));
1233	    if (!bus_dmamap_create(sc->mly_buffer_dmat, 0, &mc->mc_datamap))
1234		mly_release_command(mc);
1235	}
1236    }
1237}
1238
1239/********************************************************************************
1240 * Command-mapping helper function - populate this command slot's s/g table
1241 * with the s/g entries for this command.
1242 */
1243static void
1244mly_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1245{
1246    struct mly_command		*mc = (struct mly_command *)arg;
1247    struct mly_softc		*sc = mc->mc_sc;
1248    struct mly_command_generic	*gen = &(mc->mc_packet->generic);
1249    struct mly_sg_entry		*sg;
1250    int				i, tabofs;
1251
1252    debug_called(3);
1253
1254    /* can we use the transfer structure directly? */
1255    if (nseg <= 2) {
1256	sg = &gen->transfer.direct.sg[0];
1257	gen->command_control.extended_sg_table = 0;
1258    } else {
1259	tabofs = (mc->mc_slot * MLY_MAXSGENTRIES);
1260	sg = sc->mly_sg_table + tabofs;
1261	gen->transfer.indirect.entries[0] = nseg;
1262	gen->transfer.indirect.table_physaddr[0] = sc->mly_sg_busaddr + (tabofs * sizeof(struct mly_sg_entry));
1263	gen->command_control.extended_sg_table = 1;
1264    }
1265
1266    /* copy the s/g table */
1267    for (i = 0; i < nseg; i++) {
1268	sg[i].physaddr = segs[i].ds_addr;
1269	sg[i].length = segs[i].ds_len;
1270    }
1271
1272}
1273
1274#if 0
1275/********************************************************************************
1276 * Command-mapping helper function - save the cdb's physical address.
1277 *
1278 * We don't support 'large' SCSI commands at this time, so this is unused.
1279 */
1280static void
1281mly_map_command_cdb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1282{
1283    struct mly_command			*mc = (struct mly_command *)arg;
1284
1285    debug_called(3);
1286
1287    /* XXX can we safely assume that a CDB will never cross a page boundary? */
1288    if ((segs[0].ds_addr % PAGE_SIZE) >
1289	((segs[0].ds_addr + mc->mc_packet->scsi_large.cdb_length) % PAGE_SIZE))
1290	panic("cdb crosses page boundary");
1291
1292    /* fix up fields in the command packet */
1293    mc->mc_packet->scsi_large.cdb_physaddr = segs[0].ds_addr;
1294}
1295#endif
1296
1297/********************************************************************************
1298 * Map a command into controller-visible space
1299 */
1300static void
1301mly_map_command(struct mly_command *mc)
1302{
1303    struct mly_softc	*sc = mc->mc_sc;
1304
1305    debug_called(2);
1306
1307    /* don't map more than once */
1308    if (mc->mc_flags & MLY_CMD_MAPPED)
1309	return;
1310
1311    /* does the command have a data buffer? */
1312    if (mc->mc_data != NULL)
1313	bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mc->mc_length,
1314			mly_map_command_sg, mc, 0);
1315
1316    if (mc->mc_flags & MLY_CMD_DATAIN)
1317	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREREAD);
1318    if (mc->mc_flags & MLY_CMD_DATAOUT)
1319	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREWRITE);
1320
1321    mc->mc_flags |= MLY_CMD_MAPPED;
1322}
1323
1324/********************************************************************************
1325 * Unmap a command from controller-visible space
1326 */
1327static void
1328mly_unmap_command(struct mly_command *mc)
1329{
1330    struct mly_softc	*sc = mc->mc_sc;
1331
1332    debug_called(2);
1333
1334    if (!(mc->mc_flags & MLY_CMD_MAPPED))
1335	return;
1336
1337    if (mc->mc_flags & MLY_CMD_DATAIN)
1338	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTREAD);
1339    if (mc->mc_flags & MLY_CMD_DATAOUT)
1340	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTWRITE);
1341
1342    /* does the command have a data buffer? */
1343    if (mc->mc_data != NULL)
1344	bus_dmamap_unload(sc->mly_buffer_dmat, mc->mc_datamap);
1345
1346    mc->mc_flags &= ~MLY_CMD_MAPPED;
1347}
1348
1349/********************************************************************************
1350 ********************************************************************************
1351                                                                 Hardware Control
1352 ********************************************************************************
1353 ********************************************************************************/
1354
1355/********************************************************************************
1356 * Handshake with the firmware while the card is being initialised.
1357 */
1358static int
1359mly_fwhandshake(struct mly_softc *sc)
1360{
1361    u_int8_t	error, param0, param1;
1362    int		spinup = 0;
1363
1364    debug_called(1);
1365
1366    /* set HM_STSACK and let the firmware initialise */
1367    MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK);
1368    DELAY(1000);	/* too short? */
1369
1370    /* if HM_STSACK is still true, the controller is initialising */
1371    if (!MLY_IDBR_TRUE(sc, MLY_HM_STSACK))
1372	return(0);
1373    mly_printf(sc, "controller initialisation started\n");
1374
1375    /* spin waiting for initialisation to finish, or for a message to be delivered */
1376    while (MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) {
1377	/* check for a message */
1378	if (MLY_ERROR_VALID(sc)) {
1379	    error = MLY_GET_REG(sc, sc->mly_error_status) & ~MLY_MSG_EMPTY;
1380	    param0 = MLY_GET_REG(sc, sc->mly_command_mailbox);
1381	    param1 = MLY_GET_REG(sc, sc->mly_command_mailbox + 1);
1382
1383	    switch(error) {
1384	    case MLY_MSG_SPINUP:
1385		if (!spinup) {
1386		    mly_printf(sc, "drive spinup in progress\n");
1387		    spinup = 1;			/* only print this once (should print drive being spun?) */
1388		}
1389		break;
1390	    case MLY_MSG_RACE_RECOVERY_FAIL:
1391		mly_printf(sc, "mirror race recovery failed, one or more drives offline\n");
1392		break;
1393	    case MLY_MSG_RACE_IN_PROGRESS:
1394		mly_printf(sc, "mirror race recovery in progress\n");
1395		break;
1396	    case MLY_MSG_RACE_ON_CRITICAL:
1397		mly_printf(sc, "mirror race recovery on a critical drive\n");
1398		break;
1399	    case MLY_MSG_PARITY_ERROR:
1400		mly_printf(sc, "FATAL MEMORY PARITY ERROR\n");
1401		return(ENXIO);
1402	    default:
1403		mly_printf(sc, "unknown initialisation code 0x%x\n", error);
1404	    }
1405	}
1406    }
1407    return(0);
1408}
1409
1410/********************************************************************************
1411 ********************************************************************************
1412                                                        Debugging and Diagnostics
1413 ********************************************************************************
1414 ********************************************************************************/
1415
1416/********************************************************************************
1417 * Print some information about the controller.
1418 */
1419static void
1420mly_describe_controller(struct mly_softc *sc)
1421{
1422    struct mly_ioctl_getcontrollerinfo	*mi = sc->mly_controllerinfo;
1423
1424    mly_printf(sc, "%16s, %d channel%s, firmware %d.%02d-%d-%02d (%02d%02d%02d%02d), %dMB RAM\n",
1425	       mi->controller_name, mi->physical_channels_present, (mi->physical_channels_present) > 1 ? "s" : "",
1426	       mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build,	/* XXX turn encoding? */
1427	       mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day,
1428	       mi->memory_size);
1429
1430    if (bootverbose) {
1431	mly_printf(sc, "%s %s (%x), %dMHz %d-bit %.16s\n",
1432		   mly_describe_code(mly_table_oemname, mi->oem_information),
1433		   mly_describe_code(mly_table_controllertype, mi->controller_type), mi->controller_type,
1434		   mi->interface_speed, mi->interface_width, mi->interface_name);
1435	mly_printf(sc, "%dMB %dMHz %d-bit %s%s%s, cache %dMB\n",
1436		   mi->memory_size, mi->memory_speed, mi->memory_width,
1437		   mly_describe_code(mly_table_memorytype, mi->memory_type),
1438		   mi->memory_parity ? "+parity": "",mi->memory_ecc ? "+ECC": "",
1439		   mi->cache_size);
1440	mly_printf(sc, "CPU: %s @ %dMHZ\n",
1441		   mly_describe_code(mly_table_cputype, mi->cpu[0].type), mi->cpu[0].speed);
1442	if (mi->l2cache_size != 0)
1443	    mly_printf(sc, "%dKB L2 cache\n", mi->l2cache_size);
1444	if (mi->exmemory_size != 0)
1445	    mly_printf(sc, "%dMB %dMHz %d-bit private %s%s%s\n",
1446		       mi->exmemory_size, mi->exmemory_speed, mi->exmemory_width,
1447		       mly_describe_code(mly_table_memorytype, mi->exmemory_type),
1448		       mi->exmemory_parity ? "+parity": "",mi->exmemory_ecc ? "+ECC": "");
1449	mly_printf(sc, "battery backup %s\n", mi->bbu_present ? "present" : "not installed");
1450	mly_printf(sc, "maximum data transfer %d blocks, maximum sg entries/command %d\n",
1451		   mi->maximum_block_count, mi->maximum_sg_entries);
1452	mly_printf(sc, "logical devices present/critical/offline %d/%d/%d\n",
1453		   mi->logical_devices_present, mi->logical_devices_critical, mi->logical_devices_offline);
1454	mly_printf(sc, "physical devices present %d\n",
1455		   mi->physical_devices_present);
1456	mly_printf(sc, "physical disks present/offline %d/%d\n",
1457		   mi->physical_disks_present, mi->physical_disks_offline);
1458	mly_printf(sc, "%d physical channel%s, %d virtual channel%s of %d possible\n",
1459		   mi->physical_channels_present, mi->physical_channels_present == 1 ? "" : "s",
1460		   mi->virtual_channels_present, mi->virtual_channels_present == 1 ? "" : "s",
1461		   mi->virtual_channels_possible);
1462	mly_printf(sc, "%d parallel commands supported\n", mi->maximum_parallel_commands);
1463	mly_printf(sc, "%dMB flash ROM, %d of %d maximum cycles\n",
1464		   mi->flash_size, mi->flash_age, mi->flash_maximum_age);
1465    }
1466}
1467
1468#ifdef MLY_DEBUG
1469/********************************************************************************
1470 * Print some controller state
1471 */
1472static void
1473mly_printstate(struct mly_softc *sc)
1474{
1475    mly_printf(sc, "IDBR %02x  ODBR %02x  ERROR %02x  (%x %x %x)\n",
1476		  MLY_GET_REG(sc, sc->mly_idbr),
1477		  MLY_GET_REG(sc, sc->mly_odbr),
1478		  MLY_GET_REG(sc, sc->mly_error_status),
1479		  sc->mly_idbr,
1480		  sc->mly_odbr,
1481		  sc->mly_error_status);
1482    mly_printf(sc, "IMASK %02x  ISTATUS %02x\n",
1483		  MLY_GET_REG(sc, sc->mly_interrupt_mask),
1484		  MLY_GET_REG(sc, sc->mly_interrupt_status));
1485    mly_printf(sc, "COMMAND %02x %02x %02x %02x %02x %02x %02x %02x\n",
1486		  MLY_GET_REG(sc, sc->mly_command_mailbox),
1487		  MLY_GET_REG(sc, sc->mly_command_mailbox + 1),
1488		  MLY_GET_REG(sc, sc->mly_command_mailbox + 2),
1489		  MLY_GET_REG(sc, sc->mly_command_mailbox + 3),
1490		  MLY_GET_REG(sc, sc->mly_command_mailbox + 4),
1491		  MLY_GET_REG(sc, sc->mly_command_mailbox + 5),
1492		  MLY_GET_REG(sc, sc->mly_command_mailbox + 6),
1493		  MLY_GET_REG(sc, sc->mly_command_mailbox + 7));
1494    mly_printf(sc, "STATUS  %02x %02x %02x %02x %02x %02x %02x %02x\n",
1495		  MLY_GET_REG(sc, sc->mly_status_mailbox),
1496		  MLY_GET_REG(sc, sc->mly_status_mailbox + 1),
1497		  MLY_GET_REG(sc, sc->mly_status_mailbox + 2),
1498		  MLY_GET_REG(sc, sc->mly_status_mailbox + 3),
1499		  MLY_GET_REG(sc, sc->mly_status_mailbox + 4),
1500		  MLY_GET_REG(sc, sc->mly_status_mailbox + 5),
1501		  MLY_GET_REG(sc, sc->mly_status_mailbox + 6),
1502		  MLY_GET_REG(sc, sc->mly_status_mailbox + 7));
1503    mly_printf(sc, "        %04x        %08x\n",
1504		  MLY_GET_REG2(sc, sc->mly_status_mailbox),
1505		  MLY_GET_REG4(sc, sc->mly_status_mailbox + 4));
1506}
1507
1508struct mly_softc	*mly_softc0 = NULL;
1509void
1510mly_printstate0(void)
1511{
1512    if (mly_softc0 != NULL)
1513	mly_printstate(mly_softc0);
1514}
1515
1516/********************************************************************************
1517 * Print a command
1518 */
1519static void
1520mly_print_command(struct mly_command *mc)
1521{
1522    struct mly_softc	*sc = mc->mc_sc;
1523
1524    mly_printf(sc, "COMMAND @ %p\n", mc);
1525    mly_printf(sc, "  slot      %d\n", mc->mc_slot);
1526    mly_printf(sc, "  state     %d\n", MLY_CMD_STATE(mc));
1527    mly_printf(sc, "  status    0x%x\n", mc->mc_status);
1528    mly_printf(sc, "  sense len %d\n", mc->mc_sense);
1529    mly_printf(sc, "  resid     %d\n", mc->mc_resid);
1530    mly_printf(sc, "  packet    %p/0x%llx\n", mc->mc_packet, mc->mc_packetphys);
1531    if (mc->mc_packet != NULL)
1532	mly_print_packet(mc);
1533    mly_printf(sc, "  data      %p/%d\n", mc->mc_data, mc->mc_length);
1534    mly_printf(sc, "  flags     %b\n", mc->mc_flags, "\20\11slotted\12mapped\13priority\14datain\15dataout\n");
1535    mly_printf(sc, "  complete  %p\n", mc->mc_complete);
1536    mly_printf(sc, "  private   %p\n", mc->mc_private);
1537}
1538
1539/********************************************************************************
1540 * Print a command packet
1541 */
1542static void
1543mly_print_packet(struct mly_command *mc)
1544{
1545    struct mly_softc			*sc = mc->mc_sc;
1546    struct mly_command_generic		*ge = (struct mly_command_generic *)mc->mc_packet;
1547    struct mly_command_scsi_small	*ss = (struct mly_command_scsi_small *)mc->mc_packet;
1548    struct mly_command_scsi_large	*sl = (struct mly_command_scsi_large *)mc->mc_packet;
1549    struct mly_command_ioctl		*io = (struct mly_command_ioctl *)mc->mc_packet;
1550    int					transfer;
1551
1552    mly_printf(sc, "   command_id           %d\n", ge->command_id);
1553    mly_printf(sc, "   opcode               %d\n", ge->opcode);
1554    mly_printf(sc, "   command_control      fua %d  dpo %d  est %d  dd %s  nas %d ddis %d\n",
1555		  ge->command_control.force_unit_access,
1556		  ge->command_control.disable_page_out,
1557		  ge->command_control.extended_sg_table,
1558		  (ge->command_control.data_direction == MLY_CCB_WRITE) ? "WRITE" : "READ",
1559		  ge->command_control.no_auto_sense,
1560		  ge->command_control.disable_disconnect);
1561    mly_printf(sc, "   data_size            %d\n", ge->data_size);
1562    mly_printf(sc, "   sense_buffer_address 0x%llx\n", ge->sense_buffer_address);
1563    mly_printf(sc, "   lun                  %d\n", ge->addr.phys.lun);
1564    mly_printf(sc, "   target               %d\n", ge->addr.phys.target);
1565    mly_printf(sc, "   channel              %d\n", ge->addr.phys.channel);
1566    mly_printf(sc, "   logical device       %d\n", ge->addr.log.logdev);
1567    mly_printf(sc, "   controller           %d\n", ge->addr.phys.controller);
1568    mly_printf(sc, "   timeout              %d %s\n",
1569		  ge->timeout.value,
1570		  (ge->timeout.scale == MLY_TIMEOUT_SECONDS) ? "seconds" :
1571		  ((ge->timeout.scale == MLY_TIMEOUT_MINUTES) ? "minutes" : "hours"));
1572    mly_printf(sc, "   maximum_sense_size   %d\n", ge->maximum_sense_size);
1573    switch(ge->opcode) {
1574    case MDACMD_SCSIPT:
1575    case MDACMD_SCSI:
1576	mly_printf(sc, "   cdb length           %d\n", ss->cdb_length);
1577	mly_printf(sc, "   cdb                  %*D\n", ss->cdb_length, ss->cdb, " ");
1578	transfer = 1;
1579	break;
1580    case MDACMD_SCSILC:
1581    case MDACMD_SCSILCPT:
1582	mly_printf(sc, "   cdb length           %d\n", sl->cdb_length);
1583	mly_printf(sc, "   cdb                  0x%llx\n", sl->cdb_physaddr);
1584	transfer = 1;
1585	break;
1586    case MDACMD_IOCTL:
1587	mly_printf(sc, "   sub_ioctl            0x%x\n", io->sub_ioctl);
1588	switch(io->sub_ioctl) {
1589	case MDACIOCTL_SETMEMORYMAILBOX:
1590	    mly_printf(sc, "   health_buffer_size   %d\n",
1591			  io->param.setmemorymailbox.health_buffer_size);
1592	    mly_printf(sc, "   health_buffer_phys   0x%llx\n",
1593			  io->param.setmemorymailbox.health_buffer_physaddr);
1594	    mly_printf(sc, "   command_mailbox      0x%llx\n",
1595			  io->param.setmemorymailbox.command_mailbox_physaddr);
1596	    mly_printf(sc, "   status_mailbox       0x%llx\n",
1597			  io->param.setmemorymailbox.status_mailbox_physaddr);
1598	    transfer = 0;
1599	    break;
1600
1601	case MDACIOCTL_SETREALTIMECLOCK:
1602	case MDACIOCTL_GETHEALTHSTATUS:
1603	case MDACIOCTL_GETCONTROLLERINFO:
1604	case MDACIOCTL_GETLOGDEVINFOVALID:
1605	case MDACIOCTL_GETPHYSDEVINFOVALID:
1606	case MDACIOCTL_GETPHYSDEVSTATISTICS:
1607	case MDACIOCTL_GETLOGDEVSTATISTICS:
1608	case MDACIOCTL_GETCONTROLLERSTATISTICS:
1609	case MDACIOCTL_GETBDT_FOR_SYSDRIVE:
1610	case MDACIOCTL_CREATENEWCONF:
1611	case MDACIOCTL_ADDNEWCONF:
1612	case MDACIOCTL_GETDEVCONFINFO:
1613	case MDACIOCTL_GETFREESPACELIST:
1614	case MDACIOCTL_MORE:
1615	case MDACIOCTL_SETPHYSDEVPARAMETER:
1616	case MDACIOCTL_GETPHYSDEVPARAMETER:
1617	case MDACIOCTL_GETLOGDEVPARAMETER:
1618	case MDACIOCTL_SETLOGDEVPARAMETER:
1619	    mly_printf(sc, "   param                %10D\n", io->param.data.param, " ");
1620	    transfer = 1;
1621	    break;
1622
1623	case MDACIOCTL_GETEVENT:
1624	    mly_printf(sc, "   event                %d\n",
1625		       io->param.getevent.sequence_number_low + ((u_int32_t)io->addr.log.logdev << 16));
1626	    transfer = 1;
1627	    break;
1628
1629	case MDACIOCTL_SETRAIDDEVSTATE:
1630	    mly_printf(sc, "   state                %d\n", io->param.setraiddevstate.state);
1631	    transfer = 0;
1632	    break;
1633
1634	case MDACIOCTL_XLATEPHYSDEVTORAIDDEV:
1635	    mly_printf(sc, "   raid_device          %d\n", io->param.xlatephysdevtoraiddev.raid_device);
1636	    mly_printf(sc, "   controller           %d\n", io->param.xlatephysdevtoraiddev.controller);
1637	    mly_printf(sc, "   channel              %d\n", io->param.xlatephysdevtoraiddev.channel);
1638	    mly_printf(sc, "   target               %d\n", io->param.xlatephysdevtoraiddev.target);
1639	    mly_printf(sc, "   lun                  %d\n", io->param.xlatephysdevtoraiddev.lun);
1640	    transfer = 0;
1641	    break;
1642
1643	case MDACIOCTL_GETGROUPCONFINFO:
1644	    mly_printf(sc, "   group                %d\n", io->param.getgroupconfinfo.group);
1645	    transfer = 1;
1646	    break;
1647
1648	case MDACIOCTL_GET_SUBSYSTEM_DATA:
1649	case MDACIOCTL_SET_SUBSYSTEM_DATA:
1650	case MDACIOCTL_STARTDISOCVERY:
1651	case MDACIOCTL_INITPHYSDEVSTART:
1652	case MDACIOCTL_INITPHYSDEVSTOP:
1653	case MDACIOCTL_INITRAIDDEVSTART:
1654	case MDACIOCTL_INITRAIDDEVSTOP:
1655	case MDACIOCTL_REBUILDRAIDDEVSTART:
1656	case MDACIOCTL_REBUILDRAIDDEVSTOP:
1657	case MDACIOCTL_MAKECONSISTENTDATASTART:
1658	case MDACIOCTL_MAKECONSISTENTDATASTOP:
1659	case MDACIOCTL_CONSISTENCYCHECKSTART:
1660	case MDACIOCTL_CONSISTENCYCHECKSTOP:
1661	case MDACIOCTL_RESETDEVICE:
1662	case MDACIOCTL_FLUSHDEVICEDATA:
1663	case MDACIOCTL_PAUSEDEVICE:
1664	case MDACIOCTL_UNPAUSEDEVICE:
1665	case MDACIOCTL_LOCATEDEVICE:
1666	case MDACIOCTL_SETMASTERSLAVEMODE:
1667	case MDACIOCTL_DELETERAIDDEV:
1668	case MDACIOCTL_REPLACEINTERNALDEV:
1669	case MDACIOCTL_CLEARCONF:
1670	case MDACIOCTL_GETCONTROLLERPARAMETER:
1671	case MDACIOCTL_SETCONTRLLERPARAMETER:
1672	case MDACIOCTL_CLEARCONFSUSPMODE:
1673	case MDACIOCTL_STOREIMAGE:
1674	case MDACIOCTL_READIMAGE:
1675	case MDACIOCTL_FLASHIMAGES:
1676	case MDACIOCTL_RENAMERAIDDEV:
1677	default:			/* no idea what to print */
1678	    transfer = 0;
1679	    break;
1680	}
1681	break;
1682
1683    case MDACMD_IOCTLCHECK:
1684    case MDACMD_MEMCOPY:
1685    default:
1686	transfer = 0;
1687	break;	/* print nothing */
1688    }
1689    if (transfer) {
1690	if (ge->command_control.extended_sg_table) {
1691	    mly_printf(sc, "   sg table             0x%llx/%d\n",
1692			  ge->transfer.indirect.table_physaddr[0], ge->transfer.indirect.entries[0]);
1693	} else {
1694	    mly_printf(sc, "   0000                 0x%llx/%lld\n",
1695			  ge->transfer.direct.sg[0].physaddr, ge->transfer.direct.sg[0].length);
1696	    mly_printf(sc, "   0001                 0x%llx/%lld\n",
1697			  ge->transfer.direct.sg[1].physaddr, ge->transfer.direct.sg[1].length);
1698	}
1699    }
1700}
1701
1702/********************************************************************************
1703 * Panic in a slightly informative fashion
1704 */
1705static void
1706mly_panic(struct mly_softc *sc, char *reason)
1707{
1708    mly_printstate(sc);
1709    panic(reason);
1710}
1711#endif
1712