mly.c revision 67164
1/*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *	$FreeBSD: head/sys/dev/mly/mly.c 67164 2000-10-15 14:19:01Z phk $
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/malloc.h>
33#include <sys/kernel.h>
34#include <sys/bus.h>
35#include <sys/conf.h>
36#include <sys/ctype.h>
37
38#include <machine/bus_memio.h>
39#include <machine/bus.h>
40#include <machine/resource.h>
41#include <sys/rman.h>
42
43#include <cam/scsi/scsi_all.h>
44
45#include <dev/mly/mlyreg.h>
46#include <dev/mly/mlyvar.h>
47#define MLY_DEFINE_TABLES
48#include <dev/mly/mly_tables.h>
49
50static int	mly_get_controllerinfo(struct mly_softc *sc);
51static void	mly_scan_devices(struct mly_softc *sc);
52static void	mly_rescan_btl(struct mly_softc *sc, int bus, int target);
53static void	mly_complete_rescan(struct mly_command *mc);
54static int	mly_get_eventstatus(struct mly_softc *sc);
55static int	mly_enable_mmbox(struct mly_softc *sc);
56static int	mly_flush(struct mly_softc *sc);
57static int	mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data,
58			  size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length);
59static void	mly_fetch_event(struct mly_softc *sc);
60static void	mly_complete_event(struct mly_command *mc);
61static void	mly_process_event(struct mly_softc *sc, struct mly_event *me);
62static void	mly_periodic(void *data);
63
64static int	mly_immediate_command(struct mly_command *mc);
65static int	mly_start(struct mly_command *mc);
66static void	mly_complete(void *context, int pending);
67
68static int	mly_get_slot(struct mly_command *mc);
69static void	mly_alloc_command_cluster_map(void *arg, bus_dma_segment_t *segs, int nseg, int error);
70static void	mly_alloc_command_cluster(struct mly_softc *sc);
71static void	mly_map_command(struct mly_command *mc);
72static void	mly_unmap_command(struct mly_command *mc);
73
74static int	mly_fwhandshake(struct mly_softc *sc);
75
76static void	mly_describe_controller(struct mly_softc *sc);
77#ifdef MLY_DEBUG
78static void	mly_printstate(struct mly_softc *sc);
79static void	mly_print_command(struct mly_command *mc);
80static void	mly_print_packet(struct mly_command *mc);
81static void	mly_panic(struct mly_softc *sc, char *reason);
82#endif
83
84/********************************************************************************
85 ********************************************************************************
86                                                                 Device Interface
87 ********************************************************************************
88 ********************************************************************************/
89
90/********************************************************************************
91 * Initialise the controller and softc
92 */
93int
94mly_attach(struct mly_softc *sc)
95{
96    int		error;
97
98    debug_called(1);
99
100    /*
101     * Initialise per-controller queues.
102     */
103    TAILQ_INIT(&sc->mly_freecmds);
104    TAILQ_INIT(&sc->mly_ready);
105    TAILQ_INIT(&sc->mly_completed);
106    TAILQ_INIT(&sc->mly_clusters);
107
108#if __FreeBSD_version >= 500005
109    /*
110     * Initialise command-completion task.
111     */
112    TASK_INIT(&sc->mly_task_complete, 0, mly_complete, sc);
113#endif
114
115    /* disable interrupts before we start talking to the controller */
116    MLY_MASK_INTERRUPTS(sc);
117
118    /*
119     * Wait for the controller to come ready, handshake with the firmware if required.
120     * This is typically only necessary on platforms where the controller BIOS does not
121     * run.
122     */
123    if ((error = mly_fwhandshake(sc)))
124	return(error);
125
126    /*
127     * Initialise the slot allocator so that we can issue commands.
128     */
129    sc->mly_max_commands = MLY_SLOT_MAX;
130    sc->mly_last_slot = MLY_SLOT_START;
131
132    /*
133     * Obtain controller feature information
134     */
135    if ((error = mly_get_controllerinfo(sc)))
136	return(error);
137
138    /*
139     * Update the slot allocator limit based on the controller inquiry.
140     */
141    sc->mly_max_commands = imin(sc->mly_controllerinfo->maximum_parallel_commands, MLY_SLOT_MAX);
142
143    /*
144     * Get the current event counter for health purposes, populate the initial
145     * health status buffer.
146     */
147    if ((error = mly_get_eventstatus(sc)))
148	return(error);
149
150    /*
151     * Enable memory-mailbox mode
152     */
153    if ((error = mly_enable_mmbox(sc)))
154	return(error);
155
156    /*
157     * Attach to CAM.
158     */
159    if ((error = mly_cam_attach(sc)))
160	return(error);
161
162    /*
163     * Print a little information about the controller
164     */
165    mly_describe_controller(sc);
166
167    /*
168     * Mark all attached devices for rescan
169     */
170    mly_scan_devices(sc);
171
172    /*
173     * Instigate the first status poll immediately.  Rescan completions won't
174     * happen until interrupts are enabled, which should still be before
175     * the SCSI subsystem gets to us. (XXX assuming CAM and interrupt-driven
176     * discovery here...)
177     */
178    mly_periodic((void *)sc);
179
180    /* enable interrupts now */
181    MLY_UNMASK_INTERRUPTS(sc);
182
183    return(0);
184}
185
186/********************************************************************************
187 * Bring the controller to a state where it can be safely left alone.
188 */
189void
190mly_detach(struct mly_softc *sc)
191{
192
193    debug_called(1);
194
195    /* kill the periodic event */
196    untimeout(mly_periodic, sc, sc->mly_periodic);
197
198    sc->mly_state |= MLY_STATE_SUSPEND;
199
200    /* flush controller */
201    mly_printf(sc, "flushing cache...");
202    printf("%s\n", mly_flush(sc) ? "failed" : "done");
203
204    MLY_MASK_INTERRUPTS(sc);
205}
206
207/********************************************************************************
208 ********************************************************************************
209                                                                 Command Wrappers
210 ********************************************************************************
211 ********************************************************************************/
212
213/********************************************************************************
214 * Fill in the mly_controllerinfo and mly_controllerparam fields in the softc.
215 */
216static int
217mly_get_controllerinfo(struct mly_softc *sc)
218{
219    struct mly_command_ioctl	mci;
220    u_int8_t			status;
221    int				error;
222
223    debug_called(1);
224
225    if (sc->mly_controllerinfo != NULL)
226	free(sc->mly_controllerinfo, M_DEVBUF);
227
228    /* build the getcontrollerinfo ioctl and send it */
229    bzero(&mci, sizeof(mci));
230    sc->mly_controllerinfo = NULL;
231    mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO;
232    if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerinfo, sizeof(*sc->mly_controllerinfo),
233			   &status, NULL, NULL)))
234	return(error);
235    if (status != 0)
236	return(EIO);
237
238    if (sc->mly_controllerparam != NULL)
239	free(sc->mly_controllerparam, M_DEVBUF);
240
241    /* build the getcontrollerparameter ioctl and send it */
242    bzero(&mci, sizeof(mci));
243    sc->mly_controllerparam = NULL;
244    mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER;
245    if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerparam, sizeof(*sc->mly_controllerparam),
246			   &status, NULL, NULL)))
247	return(error);
248    if (status != 0)
249	return(EIO);
250
251    return(0);
252}
253
254/********************************************************************************
255 * Schedule all possible devices for a rescan.
256 *
257 */
258static void
259mly_scan_devices(struct mly_softc *sc)
260{
261    int		bus, target, nchn;
262
263    debug_called(1);
264
265    /*
266     * Clear any previous BTL information.
267     */
268    bzero(&sc->mly_btl, sizeof(sc->mly_btl));
269
270    /*
271     * Mark all devices as requiring a rescan, and let the early periodic scan collect them.
272     */
273    nchn = sc->mly_controllerinfo->physical_channels_present +
274	sc->mly_controllerinfo->virtual_channels_present;
275    for (bus = 0; bus < nchn; bus++)
276	for (target = 0; target < MLY_MAX_TARGETS; target++)
277	    sc->mly_btl[bus][target].mb_flags = MLY_BTL_RESCAN;
278
279}
280
281/********************************************************************************
282 * Rescan a device, possibly as a consequence of getting an event which suggests
283 * that it may have changed.
284 */
285static void
286mly_rescan_btl(struct mly_softc *sc, int bus, int target)
287{
288    struct mly_command		*mc;
289    struct mly_command_ioctl	*mci;
290
291    debug_called(2);
292
293    /* get a command */
294    mc = NULL;
295    if (mly_alloc_command(sc, &mc))
296	return;				/* we'll be retried soon */
297
298    /* set up the data buffer */
299    if ((mc->mc_data = malloc(sizeof(union mly_devinfo), M_DEVBUF, M_NOWAIT)) == NULL) {
300	mly_release_command(mc);
301	return;				/* we'll get retried the next time a command completes */
302    }
303    bzero(mc->mc_data, sizeof(union mly_devinfo));
304    mc->mc_flags |= MLY_CMD_DATAIN;
305    mc->mc_complete = mly_complete_rescan;
306
307    sc->mly_btl[bus][target].mb_flags &= ~MLY_BTL_RESCAN;
308
309    /*
310     * Build the ioctl.
311     *
312     * At this point we are committed to sending this request, as it
313     * will be the only one constructed for this particular update.
314     */
315    mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
316    mci->opcode = MDACMD_IOCTL;
317    mci->addr.phys.controller = 0;
318    mci->timeout.value = 30;
319    mci->timeout.scale = MLY_TIMEOUT_SECONDS;
320    if (bus >= sc->mly_controllerinfo->physical_channels_present) {
321	mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getlogdevinfovalid);
322	mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID;
323	mci->addr.log.logdev = ((bus - sc->mly_controllerinfo->physical_channels_present) * MLY_MAX_TARGETS)
324	    + target;
325	debug(2, "logical device %d", mci->addr.log.logdev);
326    } else {
327	mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getphysdevinfovalid);
328	mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID;
329	mci->addr.phys.lun = 0;
330	mci->addr.phys.target = target;
331	mci->addr.phys.channel = bus;
332	debug(2, "physical device %d:%d", mci->addr.phys.channel, mci->addr.phys.target);
333    }
334
335    /*
336     * Use the ready queue to get this command dispatched.
337     */
338    mly_enqueue_ready(mc);
339    mly_startio(sc);
340}
341
342/********************************************************************************
343 * Handle the completion of a rescan operation
344 */
345static void
346mly_complete_rescan(struct mly_command *mc)
347{
348    struct mly_softc				*sc = mc->mc_sc;
349    struct mly_ioctl_getlogdevinfovalid		*ldi;
350    struct mly_ioctl_getphysdevinfovalid	*pdi;
351    int						bus, target;
352
353    debug_called(2);
354
355    /* iff the command completed OK, we should use the result to update our data */
356    if (mc->mc_status == 0) {
357	if (mc->mc_length == sizeof(*ldi)) {
358	    ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data;
359	    bus = MLY_LOGDEV_BUS(sc, ldi->logical_device_number);
360	    target = MLY_LOGDEV_TARGET(ldi->logical_device_number);
361	    sc->mly_btl[bus][target].mb_flags = MLY_BTL_LOGICAL;	/* clears all other flags */
362	    sc->mly_btl[bus][target].mb_type = ldi->raid_level;
363	    sc->mly_btl[bus][target].mb_state = ldi->state;
364	    debug(2, "BTL rescan for %d returns %s, %s", ldi->logical_device_number,
365		  mly_describe_code(mly_table_device_type, ldi->raid_level),
366		  mly_describe_code(mly_table_device_state, ldi->state));
367	} else if (mc->mc_length == sizeof(*pdi)) {
368	    pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data;
369	    bus = pdi->channel;
370	    target = pdi->target;
371	    sc->mly_btl[bus][target].mb_flags = MLY_BTL_PHYSICAL;	/* clears all other flags */
372	    sc->mly_btl[bus][target].mb_type = MLY_DEVICE_TYPE_PHYSICAL;
373	    sc->mly_btl[bus][target].mb_state = pdi->state;
374	    if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED)
375		sc->mly_btl[bus][target].mb_flags |= MLY_BTL_PROTECTED;
376	    debug(2, "BTL rescan for %d:%d returns %s", bus, target,
377		  mly_describe_code(mly_table_device_state, pdi->state));
378	} else {
379	    mly_printf(sc, "BTL rescan result corrupted\n");
380	}
381    } else {
382	/*
383	 * A request sent for a device beyond the last device present will fail.
384	 * We don't care about this, so we do nothing about it.
385	 */
386    }
387    free(mc->mc_data, M_DEVBUF);
388    mly_release_command(mc);
389}
390
391/********************************************************************************
392 * Get the current health status and set the 'next event' counter to suit.
393 */
394static int
395mly_get_eventstatus(struct mly_softc *sc)
396{
397    struct mly_command_ioctl	mci;
398    struct mly_health_status	*mh;
399    u_int8_t			status;
400    int				error;
401
402    /* build the gethealthstatus ioctl and send it */
403    bzero(&mci, sizeof(mci));
404    mh = NULL;
405    mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS;
406
407    if ((error = mly_ioctl(sc, &mci, (void **)&mh, sizeof(*mh), &status, NULL, NULL)))
408	return(error);
409    if (status != 0)
410	return(EIO);
411
412    /* get the event counter */
413    sc->mly_event_change = mh->change_counter;
414    sc->mly_event_waiting = mh->next_event;
415    sc->mly_event_counter = mh->next_event;
416
417    /* save the health status into the memory mailbox */
418    bcopy(mh, &sc->mly_mmbox->mmm_health.status, sizeof(*mh));
419
420    debug(1, "initial change counter %d, event counter %d", mh->change_counter, mh->next_event);
421
422    free(mh, M_DEVBUF);
423    return(0);
424}
425
426/********************************************************************************
427 * Enable the memory mailbox mode.
428 */
429static int
430mly_enable_mmbox(struct mly_softc *sc)
431{
432    struct mly_command_ioctl	mci;
433    u_int8_t			*sp, status;
434    int				error;
435
436    debug_called(1);
437
438    /* build the ioctl and send it */
439    bzero(&mci, sizeof(mci));
440    mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX;
441    /* set buffer addresses */
442    mci.param.setmemorymailbox.command_mailbox_physaddr = sc->mly_mmbox_busaddr + fldoff(mly_mmbox, mmm_command);
443    mci.param.setmemorymailbox.status_mailbox_physaddr = sc->mly_mmbox_busaddr + fldoff(mly_mmbox, mmm_status);
444    mci.param.setmemorymailbox.health_buffer_physaddr = sc->mly_mmbox_busaddr + fldoff(mly_mmbox, mmm_health);
445
446    /* set buffer sizes - abuse of data_size field is revolting */
447    sp = (u_int8_t *)&mci.data_size;
448    sp[0] = ((sizeof(union mly_command_packet) * MLY_MMBOX_COMMANDS) / 1024);
449    sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) / 1024;
450    mci.param.setmemorymailbox.health_buffer_size = sizeof(union mly_health_region) / 1024;
451
452    debug(1, "memory mailbox at %p (0x%llx/%d 0x%llx/%d 0x%llx/%d", sc->mly_mmbox,
453	  mci.param.setmemorymailbox.command_mailbox_physaddr, sp[0],
454	  mci.param.setmemorymailbox.status_mailbox_physaddr, sp[1],
455	  mci.param.setmemorymailbox.health_buffer_physaddr, mci.param.setmemorymailbox.health_buffer_size);
456
457    if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL)))
458	return(error);
459    if (status != 0)
460	return(EIO);
461    sc->mly_state |= MLY_STATE_MMBOX_ACTIVE;
462    debug(1, "memory mailbox active");
463    return(0);
464}
465
466/********************************************************************************
467 * Flush all pending I/O from the controller.
468 */
469static int
470mly_flush(struct mly_softc *sc)
471{
472    struct mly_command_ioctl	mci;
473    u_int8_t			status;
474    int				error;
475
476    debug_called(1);
477
478    /* build the ioctl */
479    bzero(&mci, sizeof(mci));
480    mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA;
481    mci.param.deviceoperation.operation_device = MLY_OPDEVICE_PHYSICAL_CONTROLLER;
482
483    /* pass it off to the controller */
484    if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL)))
485	return(error);
486
487    return((status == 0) ? 0 : EIO);
488}
489
490/********************************************************************************
491 * Perform an ioctl command.
492 *
493 * If (data) is not NULL, the command requires data transfer.  If (*data) is NULL
494 * the command requires data transfer from the controller, and we will allocate
495 * a buffer for it.  If (*data) is not NULL, the command requires data transfer
496 * to the controller.
497 *
498 * XXX passing in the whole ioctl structure is ugly.  Better ideas?
499 *
500 * XXX we don't even try to handle the case where datasize > 4k.  We should.
501 */
502static int
503mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, size_t datasize,
504	  u_int8_t *status, void *sense_buffer, size_t *sense_length)
505{
506    struct mly_command		*mc;
507    struct mly_command_ioctl	*mci;
508    int				error;
509
510    debug_called(1);
511
512    mc = NULL;
513    if (mly_alloc_command(sc, &mc)) {
514	error = ENOMEM;
515	goto out;
516    }
517
518    /* copy the ioctl structure, but save some important fields and then fixup */
519    mci = &mc->mc_packet->ioctl;
520    ioctl->sense_buffer_address = mci->sense_buffer_address;
521    ioctl->maximum_sense_size = mci->maximum_sense_size;
522    *mci = *ioctl;
523    mci->opcode = MDACMD_IOCTL;
524    mci->timeout.value = 30;
525    mci->timeout.scale = MLY_TIMEOUT_SECONDS;
526
527    /* handle the data buffer */
528    if (data != NULL) {
529	if (*data == NULL) {
530	    /* allocate data buffer */
531	    if ((mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT)) == NULL) {
532		error = ENOMEM;
533		goto out;
534	    }
535	    mc->mc_flags |= MLY_CMD_DATAIN;
536	} else {
537	    mc->mc_data = *data;
538	    mc->mc_flags |= MLY_CMD_DATAOUT;
539	}
540	mc->mc_length = datasize;
541	mc->mc_packet->generic.data_size = datasize;
542    }
543
544    /* run the command */
545    if ((error = mly_immediate_command(mc)))
546	goto out;
547
548    /* clean up and return any data */
549    *status = mc->mc_status;
550    if ((mc->mc_sense > 0) && (sense_buffer != NULL)) {
551	bcopy(mc->mc_packet, sense_buffer, mc->mc_sense);
552	*sense_length = mc->mc_sense;
553	goto out;
554    }
555
556    /* should we return a data pointer? */
557    if ((data != NULL) && (*data == NULL))
558	*data = mc->mc_data;
559
560    /* command completed OK */
561    error = 0;
562
563out:
564    if (mc != NULL) {
565	/* do we need to free a data buffer we allocated? */
566	if (error && (mc->mc_data != NULL) && (*data == NULL))
567	    free(mc->mc_data, M_DEVBUF);
568	mly_release_command(mc);
569    }
570    return(error);
571}
572
573/********************************************************************************
574 * Fetch one event from the controller.
575 */
576static void
577mly_fetch_event(struct mly_softc *sc)
578{
579    struct mly_command		*mc;
580    struct mly_command_ioctl	*mci;
581    int				s;
582    u_int32_t			event;
583
584    debug_called(2);
585
586    /* get a command */
587    mc = NULL;
588    if (mly_alloc_command(sc, &mc))
589	return;				/* we'll get retried the next time a command completes */
590
591    /* set up the data buffer */
592    if ((mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF, M_NOWAIT)) == NULL) {
593	mly_release_command(mc);
594	return;				/* we'll get retried the next time a command completes */
595    }
596    bzero(mc->mc_data, sizeof(struct mly_event));
597    mc->mc_length = sizeof(struct mly_event);
598    mc->mc_flags |= MLY_CMD_DATAIN;
599    mc->mc_complete = mly_complete_event;
600
601    /*
602     * Get an event number to fetch.  It's possible that we've raced with another
603     * context for the last event, in which case there will be no more events.
604     */
605    s = splcam();
606    if (sc->mly_event_counter == sc->mly_event_waiting) {
607	mly_release_command(mc);
608	splx(s);
609	return;
610    }
611    event = sc->mly_event_counter++;
612    splx(s);
613
614    /*
615     * Build the ioctl.
616     *
617     * At this point we are committed to sending this request, as it
618     * will be the only one constructed for this particular event number.
619     */
620    mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
621    mci->opcode = MDACMD_IOCTL;
622    mci->data_size = sizeof(struct mly_event);
623    mci->addr.phys.lun = (event >> 16) & 0xff;
624    mci->addr.phys.target = (event >> 24) & 0xff;
625    mci->addr.phys.channel = 0;
626    mci->addr.phys.controller = 0;
627    mci->timeout.value = 30;
628    mci->timeout.scale = MLY_TIMEOUT_SECONDS;
629    mci->sub_ioctl = MDACIOCTL_GETEVENT;
630    mci->param.getevent.sequence_number_low = event & 0xffff;
631
632    debug(2, "fetch event %u", event);
633
634    /*
635     * Use the ready queue to get this command dispatched.
636     */
637    mly_enqueue_ready(mc);
638    mly_startio(sc);
639}
640
641/********************************************************************************
642 * Handle the completion of an event poll.
643 *
644 * Note that we don't actually have to instigate another poll; the completion of
645 * this command will trigger that if there are any more events to poll for.
646 */
647static void
648mly_complete_event(struct mly_command *mc)
649{
650    struct mly_softc	*sc = mc->mc_sc;
651    struct mly_event	*me = (struct mly_event *)mc->mc_data;
652
653    debug_called(2);
654
655    /*
656     * If the event was successfully fetched, process it.
657     */
658    if (mc->mc_status == SCSI_STATUS_OK) {
659	mly_process_event(sc, me);
660	free(me, M_DEVBUF);
661    }
662    mly_release_command(mc);
663}
664
665/********************************************************************************
666 * Process a controller event.
667 */
668static void
669mly_process_event(struct mly_softc *sc, struct mly_event *me)
670{
671    struct scsi_sense_data	*ssd = (struct scsi_sense_data *)&me->sense[0];
672    char			*fp, *tp;
673    int				bus, target, event, class, action;
674
675    /*
676     * Errors can be reported using vendor-unique sense data.  In this case, the
677     * event code will be 0x1c (Request sense data present), the sense key will
678     * be 0x09 (vendor specific), the MSB of the ASC will be set, and the
679     * actual event code will be a 16-bit value comprised of the ASCQ (low byte)
680     * and low seven bits of the ASC (low seven bits of the high byte).
681     */
682    if ((me->code == 0x1c) &&
683	((ssd->flags & SSD_KEY) == SSD_KEY_Vendor_Specific) &&
684	(ssd->add_sense_code & 0x80)) {
685	event = ((int)(ssd->add_sense_code & ~0x80) << 8) + ssd->add_sense_code_qual;
686    } else {
687	event = me->code;
688    }
689
690    /* look up event, get codes */
691    fp = mly_describe_code(mly_table_event, event);
692
693    debug(2, "Event %d  code 0x%x", me->sequence_number, me->code);
694
695    /* quiet event? */
696    class = fp[0];
697    if (isupper(class) && bootverbose)
698	class = tolower(class);
699
700    /* get action code, text string */
701    action = fp[1];
702    tp = &fp[2];
703
704    /*
705     * Print some information about the event.
706     *
707     * This code uses a table derived from the corresponding portion of the Linux
708     * driver, and thus the parser is very similar.
709     */
710    switch(class) {
711    case 'p':		/* error on physical device */
712	mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
713	if (action == 'r')
714	    sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
715	break;
716    case 'l':		/* error on logical unit */
717    case 'm':		/* message about logical unit */
718	bus = MLY_LOGDEV_BUS(sc, me->lun);
719	target = MLY_LOGDEV_TARGET(me->lun);
720	mly_name_device(sc, bus, target);
721	mly_printf(sc, "logical device %d (%s) %s\n", me->lun, sc->mly_btl[bus][target].mb_name, tp);
722	if (action == 'r')
723	    sc->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN;
724	break;
725      break;
726    case 's':		/* report of sense data */
727	if (((ssd->flags & SSD_KEY) == SSD_KEY_NO_SENSE) ||
728	    (((ssd->flags & SSD_KEY) == SSD_KEY_NOT_READY) &&
729	     (ssd->add_sense_code == 0x04) &&
730	     ((ssd->add_sense_code_qual == 0x01) || (ssd->add_sense_code_qual == 0x02))))
731	    break;	/* ignore NO_SENSE or NOT_READY in one case */
732
733	mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
734	mly_printf(sc, "  sense key %d  asc %02x  ascq %02x\n",
735		      ssd->flags & SSD_KEY, ssd->add_sense_code, ssd->add_sense_code_qual);
736	mly_printf(sc, "  info %4D  csi %4D\n", ssd->info, "", ssd->cmd_spec_info, "");
737	if (action == 'r')
738	    sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
739	break;
740    case 'e':
741	mly_printf(sc, tp, me->target, me->lun);
742	break;
743    case 'c':
744	mly_printf(sc, "controller %s\n", tp);
745	break;
746    case '?':
747	mly_printf(sc, "%s - %d\n", tp, me->code);
748	break;
749    default:	/* probably a 'noisy' event being ignored */
750	break;
751    }
752}
753
754/********************************************************************************
755 * Perform periodic activities.
756 */
757static void
758mly_periodic(void *data)
759{
760    struct mly_softc	*sc = (struct mly_softc *)data;
761    int			nchn, bus, target;
762
763    debug_called(2);
764
765    /*
766     * Scan devices.
767     */
768    nchn = sc->mly_controllerinfo->physical_channels_present +
769	sc->mly_controllerinfo->virtual_channels_present;
770    for (bus = 0; bus < nchn; bus++) {
771	for (target = 0; target < MLY_MAX_TARGETS; target++) {
772
773	    /* ignore the controller in this scan */
774	    if (target == sc->mly_controllerparam->initiator_id)
775		continue;
776
777	    /* perform device rescan? */
778	    if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_RESCAN)
779		mly_rescan_btl(sc, bus, target);
780	}
781    }
782
783    sc->mly_periodic = timeout(mly_periodic, sc, hz);
784}
785
786/********************************************************************************
787 ********************************************************************************
788                                                               Command Processing
789 ********************************************************************************
790 ********************************************************************************/
791
792/********************************************************************************
793 * Run a command and wait for it to complete.
794 *
795 */
796static int
797mly_immediate_command(struct mly_command *mc)
798{
799    struct mly_softc	*sc = mc->mc_sc;
800    int			error, s;
801
802    debug_called(2);
803
804    /* spinning at splcam is ugly, but we're only used during controller init */
805    s = splcam();
806    if ((error = mly_start(mc)))
807	return(error);
808
809    if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) {
810	/* sleep on the command */
811	while(MLY_CMD_STATE(mc) != MLY_CMD_COMPLETE) {
812	    tsleep(mc, PRIBIO, "mlywait", 0);
813	}
814    } else {
815	/* spin and collect status while we do */
816	while(MLY_CMD_STATE(mc) != MLY_CMD_COMPLETE)
817	    mly_done(mc->mc_sc);
818    }
819    splx(s);
820    return(0);
821}
822
823/********************************************************************************
824 * Start as much queued I/O as possible on the controller
825 */
826void
827mly_startio(struct mly_softc *sc)
828{
829    struct mly_command	*mc;
830
831    debug_called(2);
832
833    for (;;) {
834
835	/* try for a ready command */
836	mc = mly_dequeue_ready(sc);
837
838	/* try to build a command from a queued ccb */
839	if (!mc)
840	    mly_cam_command(sc, &mc);
841
842	/* no command == nothing to do */
843	if (!mc)
844	    break;
845
846	/* try to post the command */
847	if (mly_start(mc)) {
848	    /* controller busy, or no resources - defer for later */
849	    mly_requeue_ready(mc);
850	    break;
851	}
852    }
853}
854
855/********************************************************************************
856 * Deliver a command to the controller; allocate controller resources at the
857 * last moment.
858 */
859static int
860mly_start(struct mly_command *mc)
861{
862    struct mly_softc		*sc = mc->mc_sc;
863    union mly_command_packet	*pkt;
864    int				s;
865
866    debug_called(2);
867
868    /*
869     * Set the command up for delivery to the controller.  This may fail
870     * due to resource shortages.
871     */
872    if (mly_get_slot(mc))
873	return(EBUSY);
874    mly_map_command(mc);
875
876    s = splcam();
877    /*
878     * Do we have to use the hardware mailbox?
879     */
880    if (!(sc->mly_state & MLY_STATE_MMBOX_ACTIVE)) {
881	/*
882	 * Check to see if the controller is ready for us.
883	 */
884	if (MLY_IDBR_TRUE(sc, MLY_HM_CMDSENT)) {
885	    splx(s);
886	    return(EBUSY);
887	}
888
889	/*
890	 * It's ready, send the command.
891	 */
892	MLY_SET_MBOX(sc, sc->mly_command_mailbox, &mc->mc_packetphys);
893	MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_CMDSENT);
894
895    } else {	/* use memory-mailbox mode */
896
897	pkt = &sc->mly_mmbox->mmm_command[sc->mly_mmbox_command_index];
898
899	/* check to see if the next slot is free yet */
900	if (pkt->mmbox.flag != 0) {
901	    splx(s);
902	    return(EBUSY);
903	}
904
905	/* copy in new command */
906	bcopy(mc->mc_packet->mmbox.data, pkt->mmbox.data, sizeof(pkt->mmbox.data));
907	/* barrier to ensure completion of previous write before we write the flag */
908	bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE);	/* tag/handle? */
909	/* copy flag last */
910	pkt->mmbox.flag = mc->mc_packet->mmbox.flag;
911	/* barrier to ensure completion of previous write before we notify the controller */
912	bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE);	/* tag/handle */
913
914	/* signal controller, update index */
915	MLY_SET_REG(sc, sc->mly_idbr, MLY_AM_CMDSENT);
916	sc->mly_mmbox_command_index = (sc->mly_mmbox_command_index + 1) % MLY_MMBOX_COMMANDS;
917    }
918
919    splx(s);
920    return(0);
921}
922
923/********************************************************************************
924 * Pick up command status from the controller, schedule a completion event
925 */
926void
927mly_done(struct mly_softc *sc)
928{
929    struct mly_command		*mc;
930    union mly_status_packet	*sp;
931    u_int16_t			slot;
932    int				s, worked;
933
934    s = splcam();
935    worked = 0;
936
937    /* pick up hardware-mailbox commands */
938    if (MLY_ODBR_TRUE(sc, MLY_HM_STSREADY)) {
939	slot = MLY_GET_REG2(sc, sc->mly_status_mailbox);
940	if (slot < MLY_SLOT_MAX) {
941	    mc = sc->mly_busycmds[slot];
942	    if (mc != NULL) {
943		mc->mc_status = MLY_GET_REG(sc, sc->mly_status_mailbox + 2);
944		mc->mc_sense = MLY_GET_REG(sc, sc->mly_status_mailbox + 3);
945		mc->mc_resid = MLY_GET_REG4(sc, sc->mly_status_mailbox + 4);
946		mly_enqueue_completed(mc);
947		sc->mly_busycmds[slot] = NULL;
948		worked = 1;
949	    } else {
950		mly_printf(sc, "got HM completion for nonbusy slot %u\n", slot);
951	    }
952	} else {
953	    /* slot 0xffff may mean "extremely bogus command" */
954	    mly_printf(sc, "got HM completion for illegal slot %u\n", slot);
955	}
956	/* unconditionally acknowledge status */
957	MLY_SET_REG(sc, sc->mly_odbr, MLY_HM_STSREADY);
958	MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK);
959    }
960
961    /* pick up memory-mailbox commands */
962    if (MLY_ODBR_TRUE(sc, MLY_AM_STSREADY)) {
963	for (;;) {
964	    sp = &sc->mly_mmbox->mmm_status[sc->mly_mmbox_status_index];
965
966	    /* check for more status */
967	    if (sp->mmbox.flag == 0)
968		break;
969
970	    /* get slot number */
971	    slot = sp->status.command_id;
972	    if (slot < MLY_SLOT_MAX) {
973		mc = sc->mly_busycmds[slot];
974		if (mc != NULL) {
975		    mc->mc_status = sp->status.status;
976		    mc->mc_sense = sp->status.sense_length;
977		    mc->mc_resid = sp->status.residue;
978		    mly_enqueue_completed(mc);
979		    sc->mly_busycmds[slot] = NULL;
980		    worked = 1;
981		} else {
982		    mly_printf(sc, "got AM completion for nonbusy slot %u\n", slot);
983		}
984	    } else {
985		/* slot 0xffff may mean "extremely bogus command" */
986		mly_printf(sc, "got AM completion for illegal slot %u at %d\n", slot, sc->mly_mmbox_status_index);
987	    }
988
989	    /* clear and move to next slot */
990	    sp->mmbox.flag = 0;
991	    sc->mly_mmbox_status_index = (sc->mly_mmbox_status_index + 1) % MLY_MMBOX_STATUS;
992	}
993	/* acknowledge that we have collected status value(s) */
994	MLY_SET_REG(sc, sc->mly_odbr, MLY_AM_STSREADY);
995    }
996
997    splx(s);
998    if (worked) {
999#if __FreeBSD_version >= 500005
1000	if (sc->mly_state & MLY_STATE_INTERRUPTS_ON)
1001	    taskqueue_enqueue(taskqueue_swi, &sc->mly_task_complete);
1002	else
1003#endif
1004	    mly_complete(sc, 0);
1005    }
1006}
1007
1008/********************************************************************************
1009 * Process completed commands
1010 */
1011static void
1012mly_complete(void *context, int pending)
1013{
1014    struct mly_softc	*sc = (struct mly_softc *)context;
1015    struct mly_command	*mc;
1016    void	        (* mc_complete)(struct mly_command *mc);
1017
1018
1019    debug_called(2);
1020
1021    /*
1022     * Spin pulling commands off the completed queue and processing them.
1023     */
1024    while ((mc = mly_dequeue_completed(sc)) != NULL) {
1025
1026	/*
1027	 * Free controller resources, mark command complete.
1028	 *
1029	 * Note that as soon as we mark the command complete, it may be freed
1030	 * out from under us, so we need to save the mc_complete field in
1031	 * order to later avoid dereferencing mc.  (We would not expect to
1032	 * have a polling/sleeping consumer with mc_complete != NULL).
1033	 */
1034	mly_unmap_command(mc);
1035	mc_complete = mc->mc_complete;
1036	MLY_CMD_SETSTATE(mc, MLY_CMD_COMPLETE);
1037
1038	/*
1039	 * Call completion handler or wake up sleeping consumer.
1040	 */
1041	if (mc_complete != NULL) {
1042	    mc_complete(mc);
1043	} else {
1044	    wakeup(mc);
1045	}
1046    }
1047
1048    /*
1049     * We may have freed up controller resources which would allow us
1050     * to push more commands onto the controller, so we check here.
1051     */
1052    mly_startio(sc);
1053
1054    /*
1055     * The controller may have updated the health status information,
1056     * so check for it here.
1057     *
1058     * Note that we only check for health status after a completed command.  It
1059     * might be wise to ping the controller occasionally if it's been idle for
1060     * a while just to check up on it.  While a filesystem is mounted, or I/O is
1061     * active this isn't really an issue.
1062     */
1063    if (sc->mly_mmbox->mmm_health.status.change_counter != sc->mly_event_change) {
1064	sc->mly_event_change = sc->mly_mmbox->mmm_health.status.change_counter;
1065	debug(1, "event change %d, event status update, %d -> %d", sc->mly_event_change,
1066	      sc->mly_event_waiting, sc->mly_mmbox->mmm_health.status.next_event);
1067	sc->mly_event_waiting = sc->mly_mmbox->mmm_health.status.next_event;
1068    }
1069    if (sc->mly_event_counter != sc->mly_event_waiting)
1070	mly_fetch_event(sc);
1071}
1072
1073/********************************************************************************
1074 ********************************************************************************
1075                                                        Command Buffer Management
1076 ********************************************************************************
1077 ********************************************************************************/
1078
1079/********************************************************************************
1080 * Give a command a slot in our lookup table, so that we can recover it when
1081 * the controller returns the slot number.
1082 *
1083 * Slots are freed in mly_done().
1084 */
1085static int
1086mly_get_slot(struct mly_command *mc)
1087{
1088    struct mly_softc	*sc = mc->mc_sc;
1089    u_int16_t		slot;
1090    int			tries;
1091
1092    debug_called(3);
1093
1094    if (mc->mc_flags & MLY_CMD_SLOTTED)
1095	return(0);
1096
1097    /*
1098     * Optimisation for the controller-busy case - check to see whether
1099     * we are already over the limit and stop immediately.
1100     */
1101    if (sc->mly_busy_count >= sc->mly_max_commands)
1102	return(EBUSY);
1103
1104    /*
1105     * Scan forward from the last slot that we assigned looking for a free
1106     * slot.  Don't scan more than the maximum number of commands that we
1107     * support (we should never reach the limit here due to the optimisation
1108     * above)
1109     */
1110    slot = sc->mly_last_slot;
1111    for (tries = sc->mly_max_commands; tries > 0; tries--) {
1112	if (sc->mly_busycmds[slot] == NULL) {
1113	    sc->mly_busycmds[slot] = mc;
1114	    mc->mc_slot = slot;
1115	    mc->mc_packet->generic.command_id = slot;
1116	    mc->mc_flags |= MLY_CMD_SLOTTED;
1117	    sc->mly_last_slot = slot;
1118	    return(0);
1119	}
1120	slot++;
1121	if (slot >= MLY_SLOT_MAX)
1122	    slot = MLY_SLOT_START;
1123    }
1124    return(EBUSY);
1125}
1126
1127/********************************************************************************
1128 * Allocate a command.
1129 */
1130int
1131mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp)
1132{
1133    struct mly_command	*mc;
1134
1135    debug_called(3);
1136
1137    if ((mc = mly_dequeue_free(sc)) == NULL) {
1138	mly_alloc_command_cluster(sc);
1139	mc = mly_dequeue_free(sc);
1140    }
1141    if (mc != NULL)
1142	TAILQ_REMOVE(&sc->mly_freecmds, mc, mc_link);
1143
1144    if (mc == NULL)
1145	return(ENOMEM);
1146
1147    MLY_CMD_SETSTATE(mc, MLY_CMD_SETUP);
1148    *mcp = mc;
1149    return(0);
1150}
1151
1152/********************************************************************************
1153 * Release a command back to the freelist.
1154 */
1155void
1156mly_release_command(struct mly_command *mc)
1157{
1158    debug_called(3);
1159
1160    /*
1161     * Fill in parts of the command that may cause confusion if
1162     * a consumer doesn't when we are later allocated.
1163     */
1164    MLY_CMD_SETSTATE(mc, MLY_CMD_FREE);
1165    mc->mc_data = NULL;
1166    mc->mc_flags = 0;
1167    mc->mc_complete = NULL;
1168    mc->mc_private = NULL;
1169
1170    /*
1171     * By default, we set up to overwrite the command packet with
1172     * sense information.
1173     */
1174    mc->mc_packet->generic.sense_buffer_address = mc->mc_packetphys;
1175    mc->mc_packet->generic.maximum_sense_size = sizeof(union mly_command_packet);
1176
1177    mly_enqueue_free(mc);
1178}
1179
1180/********************************************************************************
1181 * Map helper for command cluster allocation.
1182 *
1183 * Note that there are never more command packets in a cluster than will fit in
1184 * a page, so there is no need to look at anything other than the base of the
1185 * allocation (which will be page-aligned).
1186 */
1187static void
1188mly_alloc_command_cluster_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1189{
1190    struct mly_command_cluster	*mcc = (struct mly_command_cluster *)arg;
1191
1192    debug_called(2);
1193
1194    mcc->mcc_packetphys = segs[0].ds_addr;
1195}
1196
1197/********************************************************************************
1198 * Allocate and initialise a cluster of commands.
1199 */
1200static void
1201mly_alloc_command_cluster(struct mly_softc *sc)
1202{
1203    struct mly_command_cluster	*mcc;
1204    struct mly_command		*mc;
1205    int				i;
1206
1207    debug_called(1);
1208
1209    mcc = malloc(sizeof(struct mly_command_cluster), M_DEVBUF, M_NOWAIT);
1210    if (mcc != NULL) {
1211
1212	/*
1213	 * Allocate enough space for all the command packets for this cluster and
1214	 * map them permanently into controller-visible space.
1215	 */
1216	if (bus_dmamem_alloc(sc->mly_packet_dmat, (void **)&mcc->mcc_packet,
1217			     BUS_DMA_NOWAIT, &mcc->mcc_packetmap)) {
1218	    free(mcc, M_DEVBUF);
1219	    return;
1220	}
1221	bus_dmamap_load(sc->mly_packet_dmat, mcc->mcc_packetmap, mcc->mcc_packet,
1222			MLY_CMD_CLUSTERCOUNT * sizeof(union mly_command_packet),
1223			mly_alloc_command_cluster_map, mcc, 0);
1224
1225	mly_enqueue_cluster(sc, mcc);
1226	for (i = 0; i < MLY_CMD_CLUSTERCOUNT; i++) {
1227	    mc = &mcc->mcc_command[i];
1228	    bzero(mc, sizeof(*mc));
1229	    mc->mc_sc = sc;
1230	    mc->mc_packet = mcc->mcc_packet + i;
1231	    mc->mc_packetphys = mcc->mcc_packetphys + (i * sizeof(union mly_command_packet));
1232	    if (!bus_dmamap_create(sc->mly_buffer_dmat, 0, &mc->mc_datamap))
1233		mly_release_command(mc);
1234	}
1235    }
1236}
1237
1238/********************************************************************************
1239 * Command-mapping helper function - populate this command slot's s/g table
1240 * with the s/g entries for this command.
1241 */
1242static void
1243mly_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1244{
1245    struct mly_command		*mc = (struct mly_command *)arg;
1246    struct mly_softc		*sc = mc->mc_sc;
1247    struct mly_command_generic	*gen = &(mc->mc_packet->generic);
1248    struct mly_sg_entry		*sg;
1249    int				i, tabofs;
1250
1251    debug_called(3);
1252
1253    /* can we use the transfer structure directly? */
1254    if (nseg <= 2) {
1255	sg = &gen->transfer.direct.sg[0];
1256	gen->command_control.extended_sg_table = 0;
1257    } else {
1258	tabofs = (mc->mc_slot * MLY_MAXSGENTRIES);
1259	sg = sc->mly_sg_table + tabofs;
1260	gen->transfer.indirect.entries[0] = nseg;
1261	gen->transfer.indirect.table_physaddr[0] = sc->mly_sg_busaddr + (tabofs * sizeof(struct mly_sg_entry));
1262	gen->command_control.extended_sg_table = 1;
1263    }
1264
1265    /* copy the s/g table */
1266    for (i = 0; i < nseg; i++) {
1267	sg[i].physaddr = segs[i].ds_addr;
1268	sg[i].length = segs[i].ds_len;
1269    }
1270
1271}
1272
1273#if 0
1274/********************************************************************************
1275 * Command-mapping helper function - save the cdb's physical address.
1276 *
1277 * We don't support 'large' SCSI commands at this time, so this is unused.
1278 */
1279static void
1280mly_map_command_cdb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1281{
1282    struct mly_command			*mc = (struct mly_command *)arg;
1283
1284    debug_called(3);
1285
1286    /* XXX can we safely assume that a CDB will never cross a page boundary? */
1287    if ((segs[0].ds_addr % PAGE_SIZE) >
1288	((segs[0].ds_addr + mc->mc_packet->scsi_large.cdb_length) % PAGE_SIZE))
1289	panic("cdb crosses page boundary");
1290
1291    /* fix up fields in the command packet */
1292    mc->mc_packet->scsi_large.cdb_physaddr = segs[0].ds_addr;
1293}
1294#endif
1295
1296/********************************************************************************
1297 * Map a command into controller-visible space
1298 */
1299static void
1300mly_map_command(struct mly_command *mc)
1301{
1302    struct mly_softc	*sc = mc->mc_sc;
1303
1304    debug_called(2);
1305
1306    /* don't map more than once */
1307    if (mc->mc_flags & MLY_CMD_MAPPED)
1308	return;
1309
1310    /* does the command have a data buffer? */
1311    if (mc->mc_data != NULL)
1312	bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mc->mc_length,
1313			mly_map_command_sg, mc, 0);
1314
1315    if (mc->mc_flags & MLY_CMD_DATAIN)
1316	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREREAD);
1317    if (mc->mc_flags & MLY_CMD_DATAOUT)
1318	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREWRITE);
1319
1320    mc->mc_flags |= MLY_CMD_MAPPED;
1321}
1322
1323/********************************************************************************
1324 * Unmap a command from controller-visible space
1325 */
1326static void
1327mly_unmap_command(struct mly_command *mc)
1328{
1329    struct mly_softc	*sc = mc->mc_sc;
1330
1331    debug_called(2);
1332
1333    if (!(mc->mc_flags & MLY_CMD_MAPPED))
1334	return;
1335
1336    if (mc->mc_flags & MLY_CMD_DATAIN)
1337	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTREAD);
1338    if (mc->mc_flags & MLY_CMD_DATAOUT)
1339	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTWRITE);
1340
1341    /* does the command have a data buffer? */
1342    if (mc->mc_data != NULL)
1343	bus_dmamap_unload(sc->mly_buffer_dmat, mc->mc_datamap);
1344
1345    mc->mc_flags &= ~MLY_CMD_MAPPED;
1346}
1347
1348/********************************************************************************
1349 ********************************************************************************
1350                                                                 Hardware Control
1351 ********************************************************************************
1352 ********************************************************************************/
1353
1354/********************************************************************************
1355 * Handshake with the firmware while the card is being initialised.
1356 */
1357static int
1358mly_fwhandshake(struct mly_softc *sc)
1359{
1360    u_int8_t	error, param0, param1;
1361    int		spinup = 0;
1362
1363    debug_called(1);
1364
1365    /* set HM_STSACK and let the firmware initialise */
1366    MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK);
1367    DELAY(1000);	/* too short? */
1368
1369    /* if HM_STSACK is still true, the controller is initialising */
1370    if (!MLY_IDBR_TRUE(sc, MLY_HM_STSACK))
1371	return(0);
1372    mly_printf(sc, "controller initialisation started\n");
1373
1374    /* spin waiting for initialisation to finish, or for a message to be delivered */
1375    while (MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) {
1376	/* check for a message */
1377	if (MLY_ERROR_VALID(sc)) {
1378	    error = MLY_GET_REG(sc, sc->mly_error_status) & ~MLY_MSG_EMPTY;
1379	    param0 = MLY_GET_REG(sc, sc->mly_command_mailbox);
1380	    param1 = MLY_GET_REG(sc, sc->mly_command_mailbox + 1);
1381
1382	    switch(error) {
1383	    case MLY_MSG_SPINUP:
1384		if (!spinup) {
1385		    mly_printf(sc, "drive spinup in progress\n");
1386		    spinup = 1;			/* only print this once (should print drive being spun?) */
1387		}
1388		break;
1389	    case MLY_MSG_RACE_RECOVERY_FAIL:
1390		mly_printf(sc, "mirror race recovery failed, one or more drives offline\n");
1391		break;
1392	    case MLY_MSG_RACE_IN_PROGRESS:
1393		mly_printf(sc, "mirror race recovery in progress\n");
1394		break;
1395	    case MLY_MSG_RACE_ON_CRITICAL:
1396		mly_printf(sc, "mirror race recovery on a critical drive\n");
1397		break;
1398	    case MLY_MSG_PARITY_ERROR:
1399		mly_printf(sc, "FATAL MEMORY PARITY ERROR\n");
1400		return(ENXIO);
1401	    default:
1402		mly_printf(sc, "unknown initialisation code 0x%x\n", error);
1403	    }
1404	}
1405    }
1406    return(0);
1407}
1408
1409/********************************************************************************
1410 ********************************************************************************
1411                                                        Debugging and Diagnostics
1412 ********************************************************************************
1413 ********************************************************************************/
1414
1415/********************************************************************************
1416 * Print some information about the controller.
1417 */
1418static void
1419mly_describe_controller(struct mly_softc *sc)
1420{
1421    struct mly_ioctl_getcontrollerinfo	*mi = sc->mly_controllerinfo;
1422
1423    mly_printf(sc, "%16s, %d channel%s, firmware %d.%02d-%d-%02d (%02d%02d%02d%02d), %dMB RAM\n",
1424	       mi->controller_name, mi->physical_channels_present, (mi->physical_channels_present) > 1 ? "s" : "",
1425	       mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build,	/* XXX turn encoding? */
1426	       mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day,
1427	       mi->memory_size);
1428
1429    if (bootverbose) {
1430	mly_printf(sc, "%s %s (%x), %dMHz %d-bit %.16s\n",
1431		   mly_describe_code(mly_table_oemname, mi->oem_information),
1432		   mly_describe_code(mly_table_controllertype, mi->controller_type), mi->controller_type,
1433		   mi->interface_speed, mi->interface_width, mi->interface_name);
1434	mly_printf(sc, "%dMB %dMHz %d-bit %s%s%s, cache %dMB\n",
1435		   mi->memory_size, mi->memory_speed, mi->memory_width,
1436		   mly_describe_code(mly_table_memorytype, mi->memory_type),
1437		   mi->memory_parity ? "+parity": "",mi->memory_ecc ? "+ECC": "",
1438		   mi->cache_size);
1439	mly_printf(sc, "CPU: %s @ %dMHZ\n",
1440		   mly_describe_code(mly_table_cputype, mi->cpu[0].type), mi->cpu[0].speed);
1441	if (mi->l2cache_size != 0)
1442	    mly_printf(sc, "%dKB L2 cache\n", mi->l2cache_size);
1443	if (mi->exmemory_size != 0)
1444	    mly_printf(sc, "%dMB %dMHz %d-bit private %s%s%s\n",
1445		       mi->exmemory_size, mi->exmemory_speed, mi->exmemory_width,
1446		       mly_describe_code(mly_table_memorytype, mi->exmemory_type),
1447		       mi->exmemory_parity ? "+parity": "",mi->exmemory_ecc ? "+ECC": "");
1448	mly_printf(sc, "battery backup %s\n", mi->bbu_present ? "present" : "not installed");
1449	mly_printf(sc, "maximum data transfer %d blocks, maximum sg entries/command %d\n",
1450		   mi->maximum_block_count, mi->maximum_sg_entries);
1451	mly_printf(sc, "logical devices present/critical/offline %d/%d/%d\n",
1452		   mi->logical_devices_present, mi->logical_devices_critical, mi->logical_devices_offline);
1453	mly_printf(sc, "physical devices present %d\n",
1454		   mi->physical_devices_present);
1455	mly_printf(sc, "physical disks present/offline %d/%d\n",
1456		   mi->physical_disks_present, mi->physical_disks_offline);
1457	mly_printf(sc, "%d physical channel%s, %d virtual channel%s of %d possible\n",
1458		   mi->physical_channels_present, mi->physical_channels_present == 1 ? "" : "s",
1459		   mi->virtual_channels_present, mi->virtual_channels_present == 1 ? "" : "s",
1460		   mi->virtual_channels_possible);
1461	mly_printf(sc, "%d parallel commands supported\n", mi->maximum_parallel_commands);
1462	mly_printf(sc, "%dMB flash ROM, %d of %d maximum cycles\n",
1463		   mi->flash_size, mi->flash_age, mi->flash_maximum_age);
1464    }
1465}
1466
1467#ifdef MLY_DEBUG
1468/********************************************************************************
1469 * Print some controller state
1470 */
1471static void
1472mly_printstate(struct mly_softc *sc)
1473{
1474    mly_printf(sc, "IDBR %02x  ODBR %02x  ERROR %02x  (%x %x %x)\n",
1475		  MLY_GET_REG(sc, sc->mly_idbr),
1476		  MLY_GET_REG(sc, sc->mly_odbr),
1477		  MLY_GET_REG(sc, sc->mly_error_status),
1478		  sc->mly_idbr,
1479		  sc->mly_odbr,
1480		  sc->mly_error_status);
1481    mly_printf(sc, "IMASK %02x  ISTATUS %02x\n",
1482		  MLY_GET_REG(sc, sc->mly_interrupt_mask),
1483		  MLY_GET_REG(sc, sc->mly_interrupt_status));
1484    mly_printf(sc, "COMMAND %02x %02x %02x %02x %02x %02x %02x %02x\n",
1485		  MLY_GET_REG(sc, sc->mly_command_mailbox),
1486		  MLY_GET_REG(sc, sc->mly_command_mailbox + 1),
1487		  MLY_GET_REG(sc, sc->mly_command_mailbox + 2),
1488		  MLY_GET_REG(sc, sc->mly_command_mailbox + 3),
1489		  MLY_GET_REG(sc, sc->mly_command_mailbox + 4),
1490		  MLY_GET_REG(sc, sc->mly_command_mailbox + 5),
1491		  MLY_GET_REG(sc, sc->mly_command_mailbox + 6),
1492		  MLY_GET_REG(sc, sc->mly_command_mailbox + 7));
1493    mly_printf(sc, "STATUS  %02x %02x %02x %02x %02x %02x %02x %02x\n",
1494		  MLY_GET_REG(sc, sc->mly_status_mailbox),
1495		  MLY_GET_REG(sc, sc->mly_status_mailbox + 1),
1496		  MLY_GET_REG(sc, sc->mly_status_mailbox + 2),
1497		  MLY_GET_REG(sc, sc->mly_status_mailbox + 3),
1498		  MLY_GET_REG(sc, sc->mly_status_mailbox + 4),
1499		  MLY_GET_REG(sc, sc->mly_status_mailbox + 5),
1500		  MLY_GET_REG(sc, sc->mly_status_mailbox + 6),
1501		  MLY_GET_REG(sc, sc->mly_status_mailbox + 7));
1502    mly_printf(sc, "        %04x        %08x\n",
1503		  MLY_GET_REG2(sc, sc->mly_status_mailbox),
1504		  MLY_GET_REG4(sc, sc->mly_status_mailbox + 4));
1505}
1506
1507struct mly_softc	*mly_softc0 = NULL;
1508void
1509mly_printstate0(void)
1510{
1511    if (mly_softc0 != NULL)
1512	mly_printstate(mly_softc0);
1513}
1514
1515/********************************************************************************
1516 * Print a command
1517 */
1518static void
1519mly_print_command(struct mly_command *mc)
1520{
1521    struct mly_softc	*sc = mc->mc_sc;
1522
1523    mly_printf(sc, "COMMAND @ %p\n", mc);
1524    mly_printf(sc, "  slot      %d\n", mc->mc_slot);
1525    mly_printf(sc, "  state     %d\n", MLY_CMD_STATE(mc));
1526    mly_printf(sc, "  status    0x%x\n", mc->mc_status);
1527    mly_printf(sc, "  sense len %d\n", mc->mc_sense);
1528    mly_printf(sc, "  resid     %d\n", mc->mc_resid);
1529    mly_printf(sc, "  packet    %p/0x%llx\n", mc->mc_packet, mc->mc_packetphys);
1530    if (mc->mc_packet != NULL)
1531	mly_print_packet(mc);
1532    mly_printf(sc, "  data      %p/%d\n", mc->mc_data, mc->mc_length);
1533    mly_printf(sc, "  flags     %b\n", mc->mc_flags, "\20\11slotted\12mapped\13priority\14datain\15dataout\n");
1534    mly_printf(sc, "  complete  %p\n", mc->mc_complete);
1535    mly_printf(sc, "  private   %p\n", mc->mc_private);
1536}
1537
1538/********************************************************************************
1539 * Print a command packet
1540 */
1541static void
1542mly_print_packet(struct mly_command *mc)
1543{
1544    struct mly_softc			*sc = mc->mc_sc;
1545    struct mly_command_generic		*ge = (struct mly_command_generic *)mc->mc_packet;
1546    struct mly_command_scsi_small	*ss = (struct mly_command_scsi_small *)mc->mc_packet;
1547    struct mly_command_scsi_large	*sl = (struct mly_command_scsi_large *)mc->mc_packet;
1548    struct mly_command_ioctl		*io = (struct mly_command_ioctl *)mc->mc_packet;
1549    int					transfer;
1550
1551    mly_printf(sc, "   command_id           %d\n", ge->command_id);
1552    mly_printf(sc, "   opcode               %d\n", ge->opcode);
1553    mly_printf(sc, "   command_control      fua %d  dpo %d  est %d  dd %s  nas %d ddis %d\n",
1554		  ge->command_control.force_unit_access,
1555		  ge->command_control.disable_page_out,
1556		  ge->command_control.extended_sg_table,
1557		  (ge->command_control.data_direction == MLY_CCB_WRITE) ? "WRITE" : "READ",
1558		  ge->command_control.no_auto_sense,
1559		  ge->command_control.disable_disconnect);
1560    mly_printf(sc, "   data_size            %d\n", ge->data_size);
1561    mly_printf(sc, "   sense_buffer_address 0x%llx\n", ge->sense_buffer_address);
1562    mly_printf(sc, "   lun                  %d\n", ge->addr.phys.lun);
1563    mly_printf(sc, "   target               %d\n", ge->addr.phys.target);
1564    mly_printf(sc, "   channel              %d\n", ge->addr.phys.channel);
1565    mly_printf(sc, "   logical device       %d\n", ge->addr.log.logdev);
1566    mly_printf(sc, "   controller           %d\n", ge->addr.phys.controller);
1567    mly_printf(sc, "   timeout              %d %s\n",
1568		  ge->timeout.value,
1569		  (ge->timeout.scale == MLY_TIMEOUT_SECONDS) ? "seconds" :
1570		  ((ge->timeout.scale == MLY_TIMEOUT_MINUTES) ? "minutes" : "hours"));
1571    mly_printf(sc, "   maximum_sense_size   %d\n", ge->maximum_sense_size);
1572    switch(ge->opcode) {
1573    case MDACMD_SCSIPT:
1574    case MDACMD_SCSI:
1575	mly_printf(sc, "   cdb length           %d\n", ss->cdb_length);
1576	mly_printf(sc, "   cdb                  %*D\n", ss->cdb_length, ss->cdb, " ");
1577	transfer = 1;
1578	break;
1579    case MDACMD_SCSILC:
1580    case MDACMD_SCSILCPT:
1581	mly_printf(sc, "   cdb length           %d\n", sl->cdb_length);
1582	mly_printf(sc, "   cdb                  0x%llx\n", sl->cdb_physaddr);
1583	transfer = 1;
1584	break;
1585    case MDACMD_IOCTL:
1586	mly_printf(sc, "   sub_ioctl            0x%x\n", io->sub_ioctl);
1587	switch(io->sub_ioctl) {
1588	case MDACIOCTL_SETMEMORYMAILBOX:
1589	    mly_printf(sc, "   health_buffer_size   %d\n",
1590			  io->param.setmemorymailbox.health_buffer_size);
1591	    mly_printf(sc, "   health_buffer_phys   0x%llx\n",
1592			  io->param.setmemorymailbox.health_buffer_physaddr);
1593	    mly_printf(sc, "   command_mailbox      0x%llx\n",
1594			  io->param.setmemorymailbox.command_mailbox_physaddr);
1595	    mly_printf(sc, "   status_mailbox       0x%llx\n",
1596			  io->param.setmemorymailbox.status_mailbox_physaddr);
1597	    transfer = 0;
1598	    break;
1599
1600	case MDACIOCTL_SETREALTIMECLOCK:
1601	case MDACIOCTL_GETHEALTHSTATUS:
1602	case MDACIOCTL_GETCONTROLLERINFO:
1603	case MDACIOCTL_GETLOGDEVINFOVALID:
1604	case MDACIOCTL_GETPHYSDEVINFOVALID:
1605	case MDACIOCTL_GETPHYSDEVSTATISTICS:
1606	case MDACIOCTL_GETLOGDEVSTATISTICS:
1607	case MDACIOCTL_GETCONTROLLERSTATISTICS:
1608	case MDACIOCTL_GETBDT_FOR_SYSDRIVE:
1609	case MDACIOCTL_CREATENEWCONF:
1610	case MDACIOCTL_ADDNEWCONF:
1611	case MDACIOCTL_GETDEVCONFINFO:
1612	case MDACIOCTL_GETFREESPACELIST:
1613	case MDACIOCTL_MORE:
1614	case MDACIOCTL_SETPHYSDEVPARAMETER:
1615	case MDACIOCTL_GETPHYSDEVPARAMETER:
1616	case MDACIOCTL_GETLOGDEVPARAMETER:
1617	case MDACIOCTL_SETLOGDEVPARAMETER:
1618	    mly_printf(sc, "   param                %10D\n", io->param.data.param, " ");
1619	    transfer = 1;
1620	    break;
1621
1622	case MDACIOCTL_GETEVENT:
1623	    mly_printf(sc, "   event                %d\n",
1624		       io->param.getevent.sequence_number_low + ((u_int32_t)io->addr.log.logdev << 16));
1625	    transfer = 1;
1626	    break;
1627
1628	case MDACIOCTL_SETRAIDDEVSTATE:
1629	    mly_printf(sc, "   state                %d\n", io->param.setraiddevstate.state);
1630	    transfer = 0;
1631	    break;
1632
1633	case MDACIOCTL_XLATEPHYSDEVTORAIDDEV:
1634	    mly_printf(sc, "   raid_device          %d\n", io->param.xlatephysdevtoraiddev.raid_device);
1635	    mly_printf(sc, "   controller           %d\n", io->param.xlatephysdevtoraiddev.controller);
1636	    mly_printf(sc, "   channel              %d\n", io->param.xlatephysdevtoraiddev.channel);
1637	    mly_printf(sc, "   target               %d\n", io->param.xlatephysdevtoraiddev.target);
1638	    mly_printf(sc, "   lun                  %d\n", io->param.xlatephysdevtoraiddev.lun);
1639	    transfer = 0;
1640	    break;
1641
1642	case MDACIOCTL_GETGROUPCONFINFO:
1643	    mly_printf(sc, "   group                %d\n", io->param.getgroupconfinfo.group);
1644	    transfer = 1;
1645	    break;
1646
1647	case MDACIOCTL_GET_SUBSYSTEM_DATA:
1648	case MDACIOCTL_SET_SUBSYSTEM_DATA:
1649	case MDACIOCTL_STARTDISOCVERY:
1650	case MDACIOCTL_INITPHYSDEVSTART:
1651	case MDACIOCTL_INITPHYSDEVSTOP:
1652	case MDACIOCTL_INITRAIDDEVSTART:
1653	case MDACIOCTL_INITRAIDDEVSTOP:
1654	case MDACIOCTL_REBUILDRAIDDEVSTART:
1655	case MDACIOCTL_REBUILDRAIDDEVSTOP:
1656	case MDACIOCTL_MAKECONSISTENTDATASTART:
1657	case MDACIOCTL_MAKECONSISTENTDATASTOP:
1658	case MDACIOCTL_CONSISTENCYCHECKSTART:
1659	case MDACIOCTL_CONSISTENCYCHECKSTOP:
1660	case MDACIOCTL_RESETDEVICE:
1661	case MDACIOCTL_FLUSHDEVICEDATA:
1662	case MDACIOCTL_PAUSEDEVICE:
1663	case MDACIOCTL_UNPAUSEDEVICE:
1664	case MDACIOCTL_LOCATEDEVICE:
1665	case MDACIOCTL_SETMASTERSLAVEMODE:
1666	case MDACIOCTL_DELETERAIDDEV:
1667	case MDACIOCTL_REPLACEINTERNALDEV:
1668	case MDACIOCTL_CLEARCONF:
1669	case MDACIOCTL_GETCONTROLLERPARAMETER:
1670	case MDACIOCTL_SETCONTRLLERPARAMETER:
1671	case MDACIOCTL_CLEARCONFSUSPMODE:
1672	case MDACIOCTL_STOREIMAGE:
1673	case MDACIOCTL_READIMAGE:
1674	case MDACIOCTL_FLASHIMAGES:
1675	case MDACIOCTL_RENAMERAIDDEV:
1676	default:			/* no idea what to print */
1677	    transfer = 0;
1678	    break;
1679	}
1680	break;
1681
1682    case MDACMD_IOCTLCHECK:
1683    case MDACMD_MEMCOPY:
1684    default:
1685	transfer = 0;
1686	break;	/* print nothing */
1687    }
1688    if (transfer) {
1689	if (ge->command_control.extended_sg_table) {
1690	    mly_printf(sc, "   sg table             0x%llx/%d\n",
1691			  ge->transfer.indirect.table_physaddr[0], ge->transfer.indirect.entries[0]);
1692	} else {
1693	    mly_printf(sc, "   0000                 0x%llx/%lld\n",
1694			  ge->transfer.direct.sg[0].physaddr, ge->transfer.direct.sg[0].length);
1695	    mly_printf(sc, "   0001                 0x%llx/%lld\n",
1696			  ge->transfer.direct.sg[1].physaddr, ge->transfer.direct.sg[1].length);
1697	}
1698    }
1699}
1700
1701/********************************************************************************
1702 * Panic in a slightly informative fashion
1703 */
1704static void
1705mly_panic(struct mly_softc *sc, char *reason)
1706{
1707    mly_printstate(sc);
1708    panic(reason);
1709}
1710#endif
1711