mly.c revision 68877
1/*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *	$FreeBSD: head/sys/dev/mly/mly.c 68877 2000-11-18 15:21:22Z dwmalone $
28 */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/malloc.h>
33#include <sys/kernel.h>
34#include <sys/bus.h>
35#include <sys/conf.h>
36#include <sys/ctype.h>
37
38#include <machine/bus_memio.h>
39#include <machine/bus.h>
40#include <machine/resource.h>
41#include <sys/rman.h>
42
43#include <cam/scsi/scsi_all.h>
44
45#include <dev/mly/mlyreg.h>
46#include <dev/mly/mlyvar.h>
47#define MLY_DEFINE_TABLES
48#include <dev/mly/mly_tables.h>
49
50static int	mly_get_controllerinfo(struct mly_softc *sc);
51static void	mly_scan_devices(struct mly_softc *sc);
52static void	mly_rescan_btl(struct mly_softc *sc, int bus, int target);
53static void	mly_complete_rescan(struct mly_command *mc);
54static int	mly_get_eventstatus(struct mly_softc *sc);
55static int	mly_enable_mmbox(struct mly_softc *sc);
56static int	mly_flush(struct mly_softc *sc);
57static int	mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data,
58			  size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length);
59static void	mly_fetch_event(struct mly_softc *sc);
60static void	mly_complete_event(struct mly_command *mc);
61static void	mly_process_event(struct mly_softc *sc, struct mly_event *me);
62static void	mly_periodic(void *data);
63
64static int	mly_immediate_command(struct mly_command *mc);
65static int	mly_start(struct mly_command *mc);
66static void	mly_complete(void *context, int pending);
67
68static int	mly_get_slot(struct mly_command *mc);
69static void	mly_alloc_command_cluster_map(void *arg, bus_dma_segment_t *segs, int nseg, int error);
70static void	mly_alloc_command_cluster(struct mly_softc *sc);
71static void	mly_map_command(struct mly_command *mc);
72static void	mly_unmap_command(struct mly_command *mc);
73
74static int	mly_fwhandshake(struct mly_softc *sc);
75
76static void	mly_describe_controller(struct mly_softc *sc);
77#ifdef MLY_DEBUG
78static void	mly_printstate(struct mly_softc *sc);
79static void	mly_print_command(struct mly_command *mc);
80static void	mly_print_packet(struct mly_command *mc);
81static void	mly_panic(struct mly_softc *sc, char *reason);
82#endif
83
84/********************************************************************************
85 ********************************************************************************
86                                                                 Device Interface
87 ********************************************************************************
88 ********************************************************************************/
89
90/********************************************************************************
91 * Initialise the controller and softc
92 */
93int
94mly_attach(struct mly_softc *sc)
95{
96    int		error;
97
98    debug_called(1);
99
100    /*
101     * Initialise per-controller queues.
102     */
103    TAILQ_INIT(&sc->mly_freecmds);
104    TAILQ_INIT(&sc->mly_ready);
105    TAILQ_INIT(&sc->mly_completed);
106    TAILQ_INIT(&sc->mly_clusters);
107
108#if __FreeBSD_version >= 500005
109    /*
110     * Initialise command-completion task.
111     */
112    TASK_INIT(&sc->mly_task_complete, 0, mly_complete, sc);
113#endif
114
115    /* disable interrupts before we start talking to the controller */
116    MLY_MASK_INTERRUPTS(sc);
117
118    /*
119     * Wait for the controller to come ready, handshake with the firmware if required.
120     * This is typically only necessary on platforms where the controller BIOS does not
121     * run.
122     */
123    if ((error = mly_fwhandshake(sc)))
124	return(error);
125
126    /*
127     * Initialise the slot allocator so that we can issue commands.
128     */
129    sc->mly_max_commands = MLY_SLOT_MAX;
130    sc->mly_last_slot = MLY_SLOT_START;
131
132    /*
133     * Obtain controller feature information
134     */
135    if ((error = mly_get_controllerinfo(sc)))
136	return(error);
137
138    /*
139     * Update the slot allocator limit based on the controller inquiry.
140     */
141    sc->mly_max_commands = imin(sc->mly_controllerinfo->maximum_parallel_commands, MLY_SLOT_MAX);
142
143    /*
144     * Get the current event counter for health purposes, populate the initial
145     * health status buffer.
146     */
147    if ((error = mly_get_eventstatus(sc)))
148	return(error);
149
150    /*
151     * Enable memory-mailbox mode
152     */
153    if ((error = mly_enable_mmbox(sc)))
154	return(error);
155
156    /*
157     * Attach to CAM.
158     */
159    if ((error = mly_cam_attach(sc)))
160	return(error);
161
162    /*
163     * Print a little information about the controller
164     */
165    mly_describe_controller(sc);
166
167    /*
168     * Mark all attached devices for rescan
169     */
170    mly_scan_devices(sc);
171
172    /*
173     * Instigate the first status poll immediately.  Rescan completions won't
174     * happen until interrupts are enabled, which should still be before
175     * the SCSI subsystem gets to us. (XXX assuming CAM and interrupt-driven
176     * discovery here...)
177     */
178    mly_periodic((void *)sc);
179
180    /* enable interrupts now */
181    MLY_UNMASK_INTERRUPTS(sc);
182
183    return(0);
184}
185
186/********************************************************************************
187 * Bring the controller to a state where it can be safely left alone.
188 */
189void
190mly_detach(struct mly_softc *sc)
191{
192
193    debug_called(1);
194
195    /* kill the periodic event */
196    untimeout(mly_periodic, sc, sc->mly_periodic);
197
198    sc->mly_state |= MLY_STATE_SUSPEND;
199
200    /* flush controller */
201    mly_printf(sc, "flushing cache...");
202    printf("%s\n", mly_flush(sc) ? "failed" : "done");
203
204    MLY_MASK_INTERRUPTS(sc);
205}
206
207/********************************************************************************
208 ********************************************************************************
209                                                                 Command Wrappers
210 ********************************************************************************
211 ********************************************************************************/
212
213/********************************************************************************
214 * Fill in the mly_controllerinfo and mly_controllerparam fields in the softc.
215 */
216static int
217mly_get_controllerinfo(struct mly_softc *sc)
218{
219    struct mly_command_ioctl	mci;
220    u_int8_t			status;
221    int				error;
222
223    debug_called(1);
224
225    if (sc->mly_controllerinfo != NULL)
226	free(sc->mly_controllerinfo, M_DEVBUF);
227
228    /* build the getcontrollerinfo ioctl and send it */
229    bzero(&mci, sizeof(mci));
230    sc->mly_controllerinfo = NULL;
231    mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO;
232    if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerinfo, sizeof(*sc->mly_controllerinfo),
233			   &status, NULL, NULL)))
234	return(error);
235    if (status != 0)
236	return(EIO);
237
238    if (sc->mly_controllerparam != NULL)
239	free(sc->mly_controllerparam, M_DEVBUF);
240
241    /* build the getcontrollerparameter ioctl and send it */
242    bzero(&mci, sizeof(mci));
243    sc->mly_controllerparam = NULL;
244    mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER;
245    if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerparam, sizeof(*sc->mly_controllerparam),
246			   &status, NULL, NULL)))
247	return(error);
248    if (status != 0)
249	return(EIO);
250
251    return(0);
252}
253
254/********************************************************************************
255 * Schedule all possible devices for a rescan.
256 *
257 */
258static void
259mly_scan_devices(struct mly_softc *sc)
260{
261    int		bus, target, nchn;
262
263    debug_called(1);
264
265    /*
266     * Clear any previous BTL information.
267     */
268    bzero(&sc->mly_btl, sizeof(sc->mly_btl));
269
270    /*
271     * Mark all devices as requiring a rescan, and let the early periodic scan collect them.
272     */
273    nchn = sc->mly_controllerinfo->physical_channels_present +
274	sc->mly_controllerinfo->virtual_channels_present;
275    for (bus = 0; bus < nchn; bus++)
276	for (target = 0; target < MLY_MAX_TARGETS; target++)
277	    sc->mly_btl[bus][target].mb_flags = MLY_BTL_RESCAN;
278
279}
280
281/********************************************************************************
282 * Rescan a device, possibly as a consequence of getting an event which suggests
283 * that it may have changed.
284 */
285static void
286mly_rescan_btl(struct mly_softc *sc, int bus, int target)
287{
288    struct mly_command		*mc;
289    struct mly_command_ioctl	*mci;
290
291    debug_called(2);
292
293    /* get a command */
294    mc = NULL;
295    if (mly_alloc_command(sc, &mc))
296	return;				/* we'll be retried soon */
297
298    /* set up the data buffer */
299    if ((mc->mc_data = malloc(sizeof(union mly_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) {
300	mly_release_command(mc);
301	return;				/* we'll get retried the next time a command completes */
302    }
303    mc->mc_flags |= MLY_CMD_DATAIN;
304    mc->mc_complete = mly_complete_rescan;
305
306    sc->mly_btl[bus][target].mb_flags &= ~MLY_BTL_RESCAN;
307
308    /*
309     * Build the ioctl.
310     *
311     * At this point we are committed to sending this request, as it
312     * will be the only one constructed for this particular update.
313     */
314    mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
315    mci->opcode = MDACMD_IOCTL;
316    mci->addr.phys.controller = 0;
317    mci->timeout.value = 30;
318    mci->timeout.scale = MLY_TIMEOUT_SECONDS;
319    if (bus >= sc->mly_controllerinfo->physical_channels_present) {
320	mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getlogdevinfovalid);
321	mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID;
322	mci->addr.log.logdev = ((bus - sc->mly_controllerinfo->physical_channels_present) * MLY_MAX_TARGETS)
323	    + target;
324	debug(2, "logical device %d", mci->addr.log.logdev);
325    } else {
326	mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getphysdevinfovalid);
327	mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID;
328	mci->addr.phys.lun = 0;
329	mci->addr.phys.target = target;
330	mci->addr.phys.channel = bus;
331	debug(2, "physical device %d:%d", mci->addr.phys.channel, mci->addr.phys.target);
332    }
333
334    /*
335     * Use the ready queue to get this command dispatched.
336     */
337    mly_enqueue_ready(mc);
338    mly_startio(sc);
339}
340
341/********************************************************************************
342 * Handle the completion of a rescan operation
343 */
344static void
345mly_complete_rescan(struct mly_command *mc)
346{
347    struct mly_softc				*sc = mc->mc_sc;
348    struct mly_ioctl_getlogdevinfovalid		*ldi;
349    struct mly_ioctl_getphysdevinfovalid	*pdi;
350    int						bus, target;
351
352    debug_called(2);
353
354    /* iff the command completed OK, we should use the result to update our data */
355    if (mc->mc_status == 0) {
356	if (mc->mc_length == sizeof(*ldi)) {
357	    ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data;
358	    bus = MLY_LOGDEV_BUS(sc, ldi->logical_device_number);
359	    target = MLY_LOGDEV_TARGET(ldi->logical_device_number);
360	    sc->mly_btl[bus][target].mb_flags = MLY_BTL_LOGICAL;	/* clears all other flags */
361	    sc->mly_btl[bus][target].mb_type = ldi->raid_level;
362	    sc->mly_btl[bus][target].mb_state = ldi->state;
363	    debug(2, "BTL rescan for %d returns %s, %s", ldi->logical_device_number,
364		  mly_describe_code(mly_table_device_type, ldi->raid_level),
365		  mly_describe_code(mly_table_device_state, ldi->state));
366	} else if (mc->mc_length == sizeof(*pdi)) {
367	    pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data;
368	    bus = pdi->channel;
369	    target = pdi->target;
370	    sc->mly_btl[bus][target].mb_flags = MLY_BTL_PHYSICAL;	/* clears all other flags */
371	    sc->mly_btl[bus][target].mb_type = MLY_DEVICE_TYPE_PHYSICAL;
372	    sc->mly_btl[bus][target].mb_state = pdi->state;
373	    if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED)
374		sc->mly_btl[bus][target].mb_flags |= MLY_BTL_PROTECTED;
375	    debug(2, "BTL rescan for %d:%d returns %s", bus, target,
376		  mly_describe_code(mly_table_device_state, pdi->state));
377	} else {
378	    mly_printf(sc, "BTL rescan result corrupted\n");
379	}
380    } else {
381	/*
382	 * A request sent for a device beyond the last device present will fail.
383	 * We don't care about this, so we do nothing about it.
384	 */
385    }
386    free(mc->mc_data, M_DEVBUF);
387    mly_release_command(mc);
388}
389
390/********************************************************************************
391 * Get the current health status and set the 'next event' counter to suit.
392 */
393static int
394mly_get_eventstatus(struct mly_softc *sc)
395{
396    struct mly_command_ioctl	mci;
397    struct mly_health_status	*mh;
398    u_int8_t			status;
399    int				error;
400
401    /* build the gethealthstatus ioctl and send it */
402    bzero(&mci, sizeof(mci));
403    mh = NULL;
404    mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS;
405
406    if ((error = mly_ioctl(sc, &mci, (void **)&mh, sizeof(*mh), &status, NULL, NULL)))
407	return(error);
408    if (status != 0)
409	return(EIO);
410
411    /* get the event counter */
412    sc->mly_event_change = mh->change_counter;
413    sc->mly_event_waiting = mh->next_event;
414    sc->mly_event_counter = mh->next_event;
415
416    /* save the health status into the memory mailbox */
417    bcopy(mh, &sc->mly_mmbox->mmm_health.status, sizeof(*mh));
418
419    debug(1, "initial change counter %d, event counter %d", mh->change_counter, mh->next_event);
420
421    free(mh, M_DEVBUF);
422    return(0);
423}
424
425/********************************************************************************
426 * Enable the memory mailbox mode.
427 */
428static int
429mly_enable_mmbox(struct mly_softc *sc)
430{
431    struct mly_command_ioctl	mci;
432    u_int8_t			*sp, status;
433    int				error;
434
435    debug_called(1);
436
437    /* build the ioctl and send it */
438    bzero(&mci, sizeof(mci));
439    mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX;
440    /* set buffer addresses */
441    mci.param.setmemorymailbox.command_mailbox_physaddr = sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command);
442    mci.param.setmemorymailbox.status_mailbox_physaddr = sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status);
443    mci.param.setmemorymailbox.health_buffer_physaddr = sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health);
444
445    /* set buffer sizes - abuse of data_size field is revolting */
446    sp = (u_int8_t *)&mci.data_size;
447    sp[0] = ((sizeof(union mly_command_packet) * MLY_MMBOX_COMMANDS) / 1024);
448    sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) / 1024;
449    mci.param.setmemorymailbox.health_buffer_size = sizeof(union mly_health_region) / 1024;
450
451    debug(1, "memory mailbox at %p (0x%llx/%d 0x%llx/%d 0x%llx/%d", sc->mly_mmbox,
452	  mci.param.setmemorymailbox.command_mailbox_physaddr, sp[0],
453	  mci.param.setmemorymailbox.status_mailbox_physaddr, sp[1],
454	  mci.param.setmemorymailbox.health_buffer_physaddr, mci.param.setmemorymailbox.health_buffer_size);
455
456    if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL)))
457	return(error);
458    if (status != 0)
459	return(EIO);
460    sc->mly_state |= MLY_STATE_MMBOX_ACTIVE;
461    debug(1, "memory mailbox active");
462    return(0);
463}
464
465/********************************************************************************
466 * Flush all pending I/O from the controller.
467 */
468static int
469mly_flush(struct mly_softc *sc)
470{
471    struct mly_command_ioctl	mci;
472    u_int8_t			status;
473    int				error;
474
475    debug_called(1);
476
477    /* build the ioctl */
478    bzero(&mci, sizeof(mci));
479    mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA;
480    mci.param.deviceoperation.operation_device = MLY_OPDEVICE_PHYSICAL_CONTROLLER;
481
482    /* pass it off to the controller */
483    if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL)))
484	return(error);
485
486    return((status == 0) ? 0 : EIO);
487}
488
489/********************************************************************************
490 * Perform an ioctl command.
491 *
492 * If (data) is not NULL, the command requires data transfer.  If (*data) is NULL
493 * the command requires data transfer from the controller, and we will allocate
494 * a buffer for it.  If (*data) is not NULL, the command requires data transfer
495 * to the controller.
496 *
497 * XXX passing in the whole ioctl structure is ugly.  Better ideas?
498 *
499 * XXX we don't even try to handle the case where datasize > 4k.  We should.
500 */
501static int
502mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, size_t datasize,
503	  u_int8_t *status, void *sense_buffer, size_t *sense_length)
504{
505    struct mly_command		*mc;
506    struct mly_command_ioctl	*mci;
507    int				error;
508
509    debug_called(1);
510
511    mc = NULL;
512    if (mly_alloc_command(sc, &mc)) {
513	error = ENOMEM;
514	goto out;
515    }
516
517    /* copy the ioctl structure, but save some important fields and then fixup */
518    mci = &mc->mc_packet->ioctl;
519    ioctl->sense_buffer_address = mci->sense_buffer_address;
520    ioctl->maximum_sense_size = mci->maximum_sense_size;
521    *mci = *ioctl;
522    mci->opcode = MDACMD_IOCTL;
523    mci->timeout.value = 30;
524    mci->timeout.scale = MLY_TIMEOUT_SECONDS;
525
526    /* handle the data buffer */
527    if (data != NULL) {
528	if (*data == NULL) {
529	    /* allocate data buffer */
530	    if ((mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT)) == NULL) {
531		error = ENOMEM;
532		goto out;
533	    }
534	    mc->mc_flags |= MLY_CMD_DATAIN;
535	} else {
536	    mc->mc_data = *data;
537	    mc->mc_flags |= MLY_CMD_DATAOUT;
538	}
539	mc->mc_length = datasize;
540	mc->mc_packet->generic.data_size = datasize;
541    }
542
543    /* run the command */
544    if ((error = mly_immediate_command(mc)))
545	goto out;
546
547    /* clean up and return any data */
548    *status = mc->mc_status;
549    if ((mc->mc_sense > 0) && (sense_buffer != NULL)) {
550	bcopy(mc->mc_packet, sense_buffer, mc->mc_sense);
551	*sense_length = mc->mc_sense;
552	goto out;
553    }
554
555    /* should we return a data pointer? */
556    if ((data != NULL) && (*data == NULL))
557	*data = mc->mc_data;
558
559    /* command completed OK */
560    error = 0;
561
562out:
563    if (mc != NULL) {
564	/* do we need to free a data buffer we allocated? */
565	if (error && (mc->mc_data != NULL) && (*data == NULL))
566	    free(mc->mc_data, M_DEVBUF);
567	mly_release_command(mc);
568    }
569    return(error);
570}
571
572/********************************************************************************
573 * Fetch one event from the controller.
574 */
575static void
576mly_fetch_event(struct mly_softc *sc)
577{
578    struct mly_command		*mc;
579    struct mly_command_ioctl	*mci;
580    int				s;
581    u_int32_t			event;
582
583    debug_called(2);
584
585    /* get a command */
586    mc = NULL;
587    if (mly_alloc_command(sc, &mc))
588	return;				/* we'll get retried the next time a command completes */
589
590    /* set up the data buffer */
591    if ((mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) {
592	mly_release_command(mc);
593	return;				/* we'll get retried the next time a command completes */
594    }
595    mc->mc_length = sizeof(struct mly_event);
596    mc->mc_flags |= MLY_CMD_DATAIN;
597    mc->mc_complete = mly_complete_event;
598
599    /*
600     * Get an event number to fetch.  It's possible that we've raced with another
601     * context for the last event, in which case there will be no more events.
602     */
603    s = splcam();
604    if (sc->mly_event_counter == sc->mly_event_waiting) {
605	mly_release_command(mc);
606	splx(s);
607	return;
608    }
609    event = sc->mly_event_counter++;
610    splx(s);
611
612    /*
613     * Build the ioctl.
614     *
615     * At this point we are committed to sending this request, as it
616     * will be the only one constructed for this particular event number.
617     */
618    mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
619    mci->opcode = MDACMD_IOCTL;
620    mci->data_size = sizeof(struct mly_event);
621    mci->addr.phys.lun = (event >> 16) & 0xff;
622    mci->addr.phys.target = (event >> 24) & 0xff;
623    mci->addr.phys.channel = 0;
624    mci->addr.phys.controller = 0;
625    mci->timeout.value = 30;
626    mci->timeout.scale = MLY_TIMEOUT_SECONDS;
627    mci->sub_ioctl = MDACIOCTL_GETEVENT;
628    mci->param.getevent.sequence_number_low = event & 0xffff;
629
630    debug(2, "fetch event %u", event);
631
632    /*
633     * Use the ready queue to get this command dispatched.
634     */
635    mly_enqueue_ready(mc);
636    mly_startio(sc);
637}
638
639/********************************************************************************
640 * Handle the completion of an event poll.
641 *
642 * Note that we don't actually have to instigate another poll; the completion of
643 * this command will trigger that if there are any more events to poll for.
644 */
645static void
646mly_complete_event(struct mly_command *mc)
647{
648    struct mly_softc	*sc = mc->mc_sc;
649    struct mly_event	*me = (struct mly_event *)mc->mc_data;
650
651    debug_called(2);
652
653    /*
654     * If the event was successfully fetched, process it.
655     */
656    if (mc->mc_status == SCSI_STATUS_OK) {
657	mly_process_event(sc, me);
658	free(me, M_DEVBUF);
659    }
660    mly_release_command(mc);
661}
662
663/********************************************************************************
664 * Process a controller event.
665 */
666static void
667mly_process_event(struct mly_softc *sc, struct mly_event *me)
668{
669    struct scsi_sense_data	*ssd = (struct scsi_sense_data *)&me->sense[0];
670    char			*fp, *tp;
671    int				bus, target, event, class, action;
672
673    /*
674     * Errors can be reported using vendor-unique sense data.  In this case, the
675     * event code will be 0x1c (Request sense data present), the sense key will
676     * be 0x09 (vendor specific), the MSB of the ASC will be set, and the
677     * actual event code will be a 16-bit value comprised of the ASCQ (low byte)
678     * and low seven bits of the ASC (low seven bits of the high byte).
679     */
680    if ((me->code == 0x1c) &&
681	((ssd->flags & SSD_KEY) == SSD_KEY_Vendor_Specific) &&
682	(ssd->add_sense_code & 0x80)) {
683	event = ((int)(ssd->add_sense_code & ~0x80) << 8) + ssd->add_sense_code_qual;
684    } else {
685	event = me->code;
686    }
687
688    /* look up event, get codes */
689    fp = mly_describe_code(mly_table_event, event);
690
691    debug(2, "Event %d  code 0x%x", me->sequence_number, me->code);
692
693    /* quiet event? */
694    class = fp[0];
695    if (isupper(class) && bootverbose)
696	class = tolower(class);
697
698    /* get action code, text string */
699    action = fp[1];
700    tp = &fp[2];
701
702    /*
703     * Print some information about the event.
704     *
705     * This code uses a table derived from the corresponding portion of the Linux
706     * driver, and thus the parser is very similar.
707     */
708    switch(class) {
709    case 'p':		/* error on physical device */
710	mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
711	if (action == 'r')
712	    sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
713	break;
714    case 'l':		/* error on logical unit */
715    case 'm':		/* message about logical unit */
716	bus = MLY_LOGDEV_BUS(sc, me->lun);
717	target = MLY_LOGDEV_TARGET(me->lun);
718	mly_name_device(sc, bus, target);
719	mly_printf(sc, "logical device %d (%s) %s\n", me->lun, sc->mly_btl[bus][target].mb_name, tp);
720	if (action == 'r')
721	    sc->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN;
722	break;
723      break;
724    case 's':		/* report of sense data */
725	if (((ssd->flags & SSD_KEY) == SSD_KEY_NO_SENSE) ||
726	    (((ssd->flags & SSD_KEY) == SSD_KEY_NOT_READY) &&
727	     (ssd->add_sense_code == 0x04) &&
728	     ((ssd->add_sense_code_qual == 0x01) || (ssd->add_sense_code_qual == 0x02))))
729	    break;	/* ignore NO_SENSE or NOT_READY in one case */
730
731	mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
732	mly_printf(sc, "  sense key %d  asc %02x  ascq %02x\n",
733		      ssd->flags & SSD_KEY, ssd->add_sense_code, ssd->add_sense_code_qual);
734	mly_printf(sc, "  info %4D  csi %4D\n", ssd->info, "", ssd->cmd_spec_info, "");
735	if (action == 'r')
736	    sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
737	break;
738    case 'e':
739	mly_printf(sc, tp, me->target, me->lun);
740	break;
741    case 'c':
742	mly_printf(sc, "controller %s\n", tp);
743	break;
744    case '?':
745	mly_printf(sc, "%s - %d\n", tp, me->code);
746	break;
747    default:	/* probably a 'noisy' event being ignored */
748	break;
749    }
750}
751
752/********************************************************************************
753 * Perform periodic activities.
754 */
755static void
756mly_periodic(void *data)
757{
758    struct mly_softc	*sc = (struct mly_softc *)data;
759    int			nchn, bus, target;
760
761    debug_called(2);
762
763    /*
764     * Scan devices.
765     */
766    nchn = sc->mly_controllerinfo->physical_channels_present +
767	sc->mly_controllerinfo->virtual_channels_present;
768    for (bus = 0; bus < nchn; bus++) {
769	for (target = 0; target < MLY_MAX_TARGETS; target++) {
770
771	    /* ignore the controller in this scan */
772	    if (target == sc->mly_controllerparam->initiator_id)
773		continue;
774
775	    /* perform device rescan? */
776	    if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_RESCAN)
777		mly_rescan_btl(sc, bus, target);
778	}
779    }
780
781    sc->mly_periodic = timeout(mly_periodic, sc, hz);
782}
783
784/********************************************************************************
785 ********************************************************************************
786                                                               Command Processing
787 ********************************************************************************
788 ********************************************************************************/
789
790/********************************************************************************
791 * Run a command and wait for it to complete.
792 *
793 */
794static int
795mly_immediate_command(struct mly_command *mc)
796{
797    struct mly_softc	*sc = mc->mc_sc;
798    int			error, s;
799
800    debug_called(2);
801
802    /* spinning at splcam is ugly, but we're only used during controller init */
803    s = splcam();
804    if ((error = mly_start(mc)))
805	return(error);
806
807    if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) {
808	/* sleep on the command */
809	while(MLY_CMD_STATE(mc) != MLY_CMD_COMPLETE) {
810	    tsleep(mc, PRIBIO, "mlywait", 0);
811	}
812    } else {
813	/* spin and collect status while we do */
814	while(MLY_CMD_STATE(mc) != MLY_CMD_COMPLETE)
815	    mly_done(mc->mc_sc);
816    }
817    splx(s);
818    return(0);
819}
820
821/********************************************************************************
822 * Start as much queued I/O as possible on the controller
823 */
824void
825mly_startio(struct mly_softc *sc)
826{
827    struct mly_command	*mc;
828
829    debug_called(2);
830
831    for (;;) {
832
833	/* try for a ready command */
834	mc = mly_dequeue_ready(sc);
835
836	/* try to build a command from a queued ccb */
837	if (!mc)
838	    mly_cam_command(sc, &mc);
839
840	/* no command == nothing to do */
841	if (!mc)
842	    break;
843
844	/* try to post the command */
845	if (mly_start(mc)) {
846	    /* controller busy, or no resources - defer for later */
847	    mly_requeue_ready(mc);
848	    break;
849	}
850    }
851}
852
853/********************************************************************************
854 * Deliver a command to the controller; allocate controller resources at the
855 * last moment.
856 */
857static int
858mly_start(struct mly_command *mc)
859{
860    struct mly_softc		*sc = mc->mc_sc;
861    union mly_command_packet	*pkt;
862    int				s;
863
864    debug_called(2);
865
866    /*
867     * Set the command up for delivery to the controller.  This may fail
868     * due to resource shortages.
869     */
870    if (mly_get_slot(mc))
871	return(EBUSY);
872    mly_map_command(mc);
873
874    s = splcam();
875    /*
876     * Do we have to use the hardware mailbox?
877     */
878    if (!(sc->mly_state & MLY_STATE_MMBOX_ACTIVE)) {
879	/*
880	 * Check to see if the controller is ready for us.
881	 */
882	if (MLY_IDBR_TRUE(sc, MLY_HM_CMDSENT)) {
883	    splx(s);
884	    return(EBUSY);
885	}
886
887	/*
888	 * It's ready, send the command.
889	 */
890	MLY_SET_MBOX(sc, sc->mly_command_mailbox, &mc->mc_packetphys);
891	MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_CMDSENT);
892
893    } else {	/* use memory-mailbox mode */
894
895	pkt = &sc->mly_mmbox->mmm_command[sc->mly_mmbox_command_index];
896
897	/* check to see if the next slot is free yet */
898	if (pkt->mmbox.flag != 0) {
899	    splx(s);
900	    return(EBUSY);
901	}
902
903	/* copy in new command */
904	bcopy(mc->mc_packet->mmbox.data, pkt->mmbox.data, sizeof(pkt->mmbox.data));
905	/* barrier to ensure completion of previous write before we write the flag */
906	bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE);	/* tag/handle? */
907	/* copy flag last */
908	pkt->mmbox.flag = mc->mc_packet->mmbox.flag;
909	/* barrier to ensure completion of previous write before we notify the controller */
910	bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE);	/* tag/handle */
911
912	/* signal controller, update index */
913	MLY_SET_REG(sc, sc->mly_idbr, MLY_AM_CMDSENT);
914	sc->mly_mmbox_command_index = (sc->mly_mmbox_command_index + 1) % MLY_MMBOX_COMMANDS;
915    }
916
917    splx(s);
918    return(0);
919}
920
921/********************************************************************************
922 * Pick up command status from the controller, schedule a completion event
923 */
924void
925mly_done(struct mly_softc *sc)
926{
927    struct mly_command		*mc;
928    union mly_status_packet	*sp;
929    u_int16_t			slot;
930    int				s, worked;
931
932    s = splcam();
933    worked = 0;
934
935    /* pick up hardware-mailbox commands */
936    if (MLY_ODBR_TRUE(sc, MLY_HM_STSREADY)) {
937	slot = MLY_GET_REG2(sc, sc->mly_status_mailbox);
938	if (slot < MLY_SLOT_MAX) {
939	    mc = sc->mly_busycmds[slot];
940	    if (mc != NULL) {
941		mc->mc_status = MLY_GET_REG(sc, sc->mly_status_mailbox + 2);
942		mc->mc_sense = MLY_GET_REG(sc, sc->mly_status_mailbox + 3);
943		mc->mc_resid = MLY_GET_REG4(sc, sc->mly_status_mailbox + 4);
944		mly_enqueue_completed(mc);
945		sc->mly_busycmds[slot] = NULL;
946		worked = 1;
947	    } else {
948		mly_printf(sc, "got HM completion for nonbusy slot %u\n", slot);
949	    }
950	} else {
951	    /* slot 0xffff may mean "extremely bogus command" */
952	    mly_printf(sc, "got HM completion for illegal slot %u\n", slot);
953	}
954	/* unconditionally acknowledge status */
955	MLY_SET_REG(sc, sc->mly_odbr, MLY_HM_STSREADY);
956	MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK);
957    }
958
959    /* pick up memory-mailbox commands */
960    if (MLY_ODBR_TRUE(sc, MLY_AM_STSREADY)) {
961	for (;;) {
962	    sp = &sc->mly_mmbox->mmm_status[sc->mly_mmbox_status_index];
963
964	    /* check for more status */
965	    if (sp->mmbox.flag == 0)
966		break;
967
968	    /* get slot number */
969	    slot = sp->status.command_id;
970	    if (slot < MLY_SLOT_MAX) {
971		mc = sc->mly_busycmds[slot];
972		if (mc != NULL) {
973		    mc->mc_status = sp->status.status;
974		    mc->mc_sense = sp->status.sense_length;
975		    mc->mc_resid = sp->status.residue;
976		    mly_enqueue_completed(mc);
977		    sc->mly_busycmds[slot] = NULL;
978		    worked = 1;
979		} else {
980		    mly_printf(sc, "got AM completion for nonbusy slot %u\n", slot);
981		}
982	    } else {
983		/* slot 0xffff may mean "extremely bogus command" */
984		mly_printf(sc, "got AM completion for illegal slot %u at %d\n", slot, sc->mly_mmbox_status_index);
985	    }
986
987	    /* clear and move to next slot */
988	    sp->mmbox.flag = 0;
989	    sc->mly_mmbox_status_index = (sc->mly_mmbox_status_index + 1) % MLY_MMBOX_STATUS;
990	}
991	/* acknowledge that we have collected status value(s) */
992	MLY_SET_REG(sc, sc->mly_odbr, MLY_AM_STSREADY);
993    }
994
995    splx(s);
996    if (worked) {
997#if __FreeBSD_version >= 500005
998	if (sc->mly_state & MLY_STATE_INTERRUPTS_ON)
999	    taskqueue_enqueue(taskqueue_swi, &sc->mly_task_complete);
1000	else
1001#endif
1002	    mly_complete(sc, 0);
1003    }
1004}
1005
1006/********************************************************************************
1007 * Process completed commands
1008 */
1009static void
1010mly_complete(void *context, int pending)
1011{
1012    struct mly_softc	*sc = (struct mly_softc *)context;
1013    struct mly_command	*mc;
1014    void	        (* mc_complete)(struct mly_command *mc);
1015
1016
1017    debug_called(2);
1018
1019    /*
1020     * Spin pulling commands off the completed queue and processing them.
1021     */
1022    while ((mc = mly_dequeue_completed(sc)) != NULL) {
1023
1024	/*
1025	 * Free controller resources, mark command complete.
1026	 *
1027	 * Note that as soon as we mark the command complete, it may be freed
1028	 * out from under us, so we need to save the mc_complete field in
1029	 * order to later avoid dereferencing mc.  (We would not expect to
1030	 * have a polling/sleeping consumer with mc_complete != NULL).
1031	 */
1032	mly_unmap_command(mc);
1033	mc_complete = mc->mc_complete;
1034	MLY_CMD_SETSTATE(mc, MLY_CMD_COMPLETE);
1035
1036	/*
1037	 * Call completion handler or wake up sleeping consumer.
1038	 */
1039	if (mc_complete != NULL) {
1040	    mc_complete(mc);
1041	} else {
1042	    wakeup(mc);
1043	}
1044    }
1045
1046    /*
1047     * We may have freed up controller resources which would allow us
1048     * to push more commands onto the controller, so we check here.
1049     */
1050    mly_startio(sc);
1051
1052    /*
1053     * The controller may have updated the health status information,
1054     * so check for it here.
1055     *
1056     * Note that we only check for health status after a completed command.  It
1057     * might be wise to ping the controller occasionally if it's been idle for
1058     * a while just to check up on it.  While a filesystem is mounted, or I/O is
1059     * active this isn't really an issue.
1060     */
1061    if (sc->mly_mmbox->mmm_health.status.change_counter != sc->mly_event_change) {
1062	sc->mly_event_change = sc->mly_mmbox->mmm_health.status.change_counter;
1063	debug(1, "event change %d, event status update, %d -> %d", sc->mly_event_change,
1064	      sc->mly_event_waiting, sc->mly_mmbox->mmm_health.status.next_event);
1065	sc->mly_event_waiting = sc->mly_mmbox->mmm_health.status.next_event;
1066    }
1067    if (sc->mly_event_counter != sc->mly_event_waiting)
1068	mly_fetch_event(sc);
1069}
1070
1071/********************************************************************************
1072 ********************************************************************************
1073                                                        Command Buffer Management
1074 ********************************************************************************
1075 ********************************************************************************/
1076
1077/********************************************************************************
1078 * Give a command a slot in our lookup table, so that we can recover it when
1079 * the controller returns the slot number.
1080 *
1081 * Slots are freed in mly_done().
1082 */
1083static int
1084mly_get_slot(struct mly_command *mc)
1085{
1086    struct mly_softc	*sc = mc->mc_sc;
1087    u_int16_t		slot;
1088    int			tries;
1089
1090    debug_called(3);
1091
1092    if (mc->mc_flags & MLY_CMD_SLOTTED)
1093	return(0);
1094
1095    /*
1096     * Optimisation for the controller-busy case - check to see whether
1097     * we are already over the limit and stop immediately.
1098     */
1099    if (sc->mly_busy_count >= sc->mly_max_commands)
1100	return(EBUSY);
1101
1102    /*
1103     * Scan forward from the last slot that we assigned looking for a free
1104     * slot.  Don't scan more than the maximum number of commands that we
1105     * support (we should never reach the limit here due to the optimisation
1106     * above)
1107     */
1108    slot = sc->mly_last_slot;
1109    for (tries = sc->mly_max_commands; tries > 0; tries--) {
1110	if (sc->mly_busycmds[slot] == NULL) {
1111	    sc->mly_busycmds[slot] = mc;
1112	    mc->mc_slot = slot;
1113	    mc->mc_packet->generic.command_id = slot;
1114	    mc->mc_flags |= MLY_CMD_SLOTTED;
1115	    sc->mly_last_slot = slot;
1116	    return(0);
1117	}
1118	slot++;
1119	if (slot >= MLY_SLOT_MAX)
1120	    slot = MLY_SLOT_START;
1121    }
1122    return(EBUSY);
1123}
1124
1125/********************************************************************************
1126 * Allocate a command.
1127 */
1128int
1129mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp)
1130{
1131    struct mly_command	*mc;
1132
1133    debug_called(3);
1134
1135    if ((mc = mly_dequeue_free(sc)) == NULL) {
1136	mly_alloc_command_cluster(sc);
1137	mc = mly_dequeue_free(sc);
1138    }
1139    if (mc != NULL)
1140	TAILQ_REMOVE(&sc->mly_freecmds, mc, mc_link);
1141
1142    if (mc == NULL)
1143	return(ENOMEM);
1144
1145    MLY_CMD_SETSTATE(mc, MLY_CMD_SETUP);
1146    *mcp = mc;
1147    return(0);
1148}
1149
1150/********************************************************************************
1151 * Release a command back to the freelist.
1152 */
1153void
1154mly_release_command(struct mly_command *mc)
1155{
1156    debug_called(3);
1157
1158    /*
1159     * Fill in parts of the command that may cause confusion if
1160     * a consumer doesn't when we are later allocated.
1161     */
1162    MLY_CMD_SETSTATE(mc, MLY_CMD_FREE);
1163    mc->mc_data = NULL;
1164    mc->mc_flags = 0;
1165    mc->mc_complete = NULL;
1166    mc->mc_private = NULL;
1167
1168    /*
1169     * By default, we set up to overwrite the command packet with
1170     * sense information.
1171     */
1172    mc->mc_packet->generic.sense_buffer_address = mc->mc_packetphys;
1173    mc->mc_packet->generic.maximum_sense_size = sizeof(union mly_command_packet);
1174
1175    mly_enqueue_free(mc);
1176}
1177
1178/********************************************************************************
1179 * Map helper for command cluster allocation.
1180 *
1181 * Note that there are never more command packets in a cluster than will fit in
1182 * a page, so there is no need to look at anything other than the base of the
1183 * allocation (which will be page-aligned).
1184 */
1185static void
1186mly_alloc_command_cluster_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1187{
1188    struct mly_command_cluster	*mcc = (struct mly_command_cluster *)arg;
1189
1190    debug_called(2);
1191
1192    mcc->mcc_packetphys = segs[0].ds_addr;
1193}
1194
1195/********************************************************************************
1196 * Allocate and initialise a cluster of commands.
1197 */
1198static void
1199mly_alloc_command_cluster(struct mly_softc *sc)
1200{
1201    struct mly_command_cluster	*mcc;
1202    struct mly_command		*mc;
1203    int				i;
1204
1205    debug_called(1);
1206
1207    mcc = malloc(sizeof(struct mly_command_cluster), M_DEVBUF, M_NOWAIT);
1208    if (mcc != NULL) {
1209
1210	/*
1211	 * Allocate enough space for all the command packets for this cluster and
1212	 * map them permanently into controller-visible space.
1213	 */
1214	if (bus_dmamem_alloc(sc->mly_packet_dmat, (void **)&mcc->mcc_packet,
1215			     BUS_DMA_NOWAIT, &mcc->mcc_packetmap)) {
1216	    free(mcc, M_DEVBUF);
1217	    return;
1218	}
1219	bus_dmamap_load(sc->mly_packet_dmat, mcc->mcc_packetmap, mcc->mcc_packet,
1220			MLY_CMD_CLUSTERCOUNT * sizeof(union mly_command_packet),
1221			mly_alloc_command_cluster_map, mcc, 0);
1222
1223	mly_enqueue_cluster(sc, mcc);
1224	for (i = 0; i < MLY_CMD_CLUSTERCOUNT; i++) {
1225	    mc = &mcc->mcc_command[i];
1226	    bzero(mc, sizeof(*mc));
1227	    mc->mc_sc = sc;
1228	    mc->mc_packet = mcc->mcc_packet + i;
1229	    mc->mc_packetphys = mcc->mcc_packetphys + (i * sizeof(union mly_command_packet));
1230	    if (!bus_dmamap_create(sc->mly_buffer_dmat, 0, &mc->mc_datamap))
1231		mly_release_command(mc);
1232	}
1233    }
1234}
1235
1236/********************************************************************************
1237 * Command-mapping helper function - populate this command slot's s/g table
1238 * with the s/g entries for this command.
1239 */
1240static void
1241mly_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1242{
1243    struct mly_command		*mc = (struct mly_command *)arg;
1244    struct mly_softc		*sc = mc->mc_sc;
1245    struct mly_command_generic	*gen = &(mc->mc_packet->generic);
1246    struct mly_sg_entry		*sg;
1247    int				i, tabofs;
1248
1249    debug_called(3);
1250
1251    /* can we use the transfer structure directly? */
1252    if (nseg <= 2) {
1253	sg = &gen->transfer.direct.sg[0];
1254	gen->command_control.extended_sg_table = 0;
1255    } else {
1256	tabofs = (mc->mc_slot * MLY_MAXSGENTRIES);
1257	sg = sc->mly_sg_table + tabofs;
1258	gen->transfer.indirect.entries[0] = nseg;
1259	gen->transfer.indirect.table_physaddr[0] = sc->mly_sg_busaddr + (tabofs * sizeof(struct mly_sg_entry));
1260	gen->command_control.extended_sg_table = 1;
1261    }
1262
1263    /* copy the s/g table */
1264    for (i = 0; i < nseg; i++) {
1265	sg[i].physaddr = segs[i].ds_addr;
1266	sg[i].length = segs[i].ds_len;
1267    }
1268
1269}
1270
1271#if 0
1272/********************************************************************************
1273 * Command-mapping helper function - save the cdb's physical address.
1274 *
1275 * We don't support 'large' SCSI commands at this time, so this is unused.
1276 */
1277static void
1278mly_map_command_cdb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1279{
1280    struct mly_command			*mc = (struct mly_command *)arg;
1281
1282    debug_called(3);
1283
1284    /* XXX can we safely assume that a CDB will never cross a page boundary? */
1285    if ((segs[0].ds_addr % PAGE_SIZE) >
1286	((segs[0].ds_addr + mc->mc_packet->scsi_large.cdb_length) % PAGE_SIZE))
1287	panic("cdb crosses page boundary");
1288
1289    /* fix up fields in the command packet */
1290    mc->mc_packet->scsi_large.cdb_physaddr = segs[0].ds_addr;
1291}
1292#endif
1293
1294/********************************************************************************
1295 * Map a command into controller-visible space
1296 */
1297static void
1298mly_map_command(struct mly_command *mc)
1299{
1300    struct mly_softc	*sc = mc->mc_sc;
1301
1302    debug_called(2);
1303
1304    /* don't map more than once */
1305    if (mc->mc_flags & MLY_CMD_MAPPED)
1306	return;
1307
1308    /* does the command have a data buffer? */
1309    if (mc->mc_data != NULL)
1310	bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mc->mc_length,
1311			mly_map_command_sg, mc, 0);
1312
1313    if (mc->mc_flags & MLY_CMD_DATAIN)
1314	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREREAD);
1315    if (mc->mc_flags & MLY_CMD_DATAOUT)
1316	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREWRITE);
1317
1318    mc->mc_flags |= MLY_CMD_MAPPED;
1319}
1320
1321/********************************************************************************
1322 * Unmap a command from controller-visible space
1323 */
1324static void
1325mly_unmap_command(struct mly_command *mc)
1326{
1327    struct mly_softc	*sc = mc->mc_sc;
1328
1329    debug_called(2);
1330
1331    if (!(mc->mc_flags & MLY_CMD_MAPPED))
1332	return;
1333
1334    if (mc->mc_flags & MLY_CMD_DATAIN)
1335	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTREAD);
1336    if (mc->mc_flags & MLY_CMD_DATAOUT)
1337	bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTWRITE);
1338
1339    /* does the command have a data buffer? */
1340    if (mc->mc_data != NULL)
1341	bus_dmamap_unload(sc->mly_buffer_dmat, mc->mc_datamap);
1342
1343    mc->mc_flags &= ~MLY_CMD_MAPPED;
1344}
1345
1346/********************************************************************************
1347 ********************************************************************************
1348                                                                 Hardware Control
1349 ********************************************************************************
1350 ********************************************************************************/
1351
1352/********************************************************************************
1353 * Handshake with the firmware while the card is being initialised.
1354 */
1355static int
1356mly_fwhandshake(struct mly_softc *sc)
1357{
1358    u_int8_t	error, param0, param1;
1359    int		spinup = 0;
1360
1361    debug_called(1);
1362
1363    /* set HM_STSACK and let the firmware initialise */
1364    MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK);
1365    DELAY(1000);	/* too short? */
1366
1367    /* if HM_STSACK is still true, the controller is initialising */
1368    if (!MLY_IDBR_TRUE(sc, MLY_HM_STSACK))
1369	return(0);
1370    mly_printf(sc, "controller initialisation started\n");
1371
1372    /* spin waiting for initialisation to finish, or for a message to be delivered */
1373    while (MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) {
1374	/* check for a message */
1375	if (MLY_ERROR_VALID(sc)) {
1376	    error = MLY_GET_REG(sc, sc->mly_error_status) & ~MLY_MSG_EMPTY;
1377	    param0 = MLY_GET_REG(sc, sc->mly_command_mailbox);
1378	    param1 = MLY_GET_REG(sc, sc->mly_command_mailbox + 1);
1379
1380	    switch(error) {
1381	    case MLY_MSG_SPINUP:
1382		if (!spinup) {
1383		    mly_printf(sc, "drive spinup in progress\n");
1384		    spinup = 1;			/* only print this once (should print drive being spun?) */
1385		}
1386		break;
1387	    case MLY_MSG_RACE_RECOVERY_FAIL:
1388		mly_printf(sc, "mirror race recovery failed, one or more drives offline\n");
1389		break;
1390	    case MLY_MSG_RACE_IN_PROGRESS:
1391		mly_printf(sc, "mirror race recovery in progress\n");
1392		break;
1393	    case MLY_MSG_RACE_ON_CRITICAL:
1394		mly_printf(sc, "mirror race recovery on a critical drive\n");
1395		break;
1396	    case MLY_MSG_PARITY_ERROR:
1397		mly_printf(sc, "FATAL MEMORY PARITY ERROR\n");
1398		return(ENXIO);
1399	    default:
1400		mly_printf(sc, "unknown initialisation code 0x%x\n", error);
1401	    }
1402	}
1403    }
1404    return(0);
1405}
1406
1407/********************************************************************************
1408 ********************************************************************************
1409                                                        Debugging and Diagnostics
1410 ********************************************************************************
1411 ********************************************************************************/
1412
1413/********************************************************************************
1414 * Print some information about the controller.
1415 */
1416static void
1417mly_describe_controller(struct mly_softc *sc)
1418{
1419    struct mly_ioctl_getcontrollerinfo	*mi = sc->mly_controllerinfo;
1420
1421    mly_printf(sc, "%16s, %d channel%s, firmware %d.%02d-%d-%02d (%02d%02d%02d%02d), %dMB RAM\n",
1422	       mi->controller_name, mi->physical_channels_present, (mi->physical_channels_present) > 1 ? "s" : "",
1423	       mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build,	/* XXX turn encoding? */
1424	       mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day,
1425	       mi->memory_size);
1426
1427    if (bootverbose) {
1428	mly_printf(sc, "%s %s (%x), %dMHz %d-bit %.16s\n",
1429		   mly_describe_code(mly_table_oemname, mi->oem_information),
1430		   mly_describe_code(mly_table_controllertype, mi->controller_type), mi->controller_type,
1431		   mi->interface_speed, mi->interface_width, mi->interface_name);
1432	mly_printf(sc, "%dMB %dMHz %d-bit %s%s%s, cache %dMB\n",
1433		   mi->memory_size, mi->memory_speed, mi->memory_width,
1434		   mly_describe_code(mly_table_memorytype, mi->memory_type),
1435		   mi->memory_parity ? "+parity": "",mi->memory_ecc ? "+ECC": "",
1436		   mi->cache_size);
1437	mly_printf(sc, "CPU: %s @ %dMHZ\n",
1438		   mly_describe_code(mly_table_cputype, mi->cpu[0].type), mi->cpu[0].speed);
1439	if (mi->l2cache_size != 0)
1440	    mly_printf(sc, "%dKB L2 cache\n", mi->l2cache_size);
1441	if (mi->exmemory_size != 0)
1442	    mly_printf(sc, "%dMB %dMHz %d-bit private %s%s%s\n",
1443		       mi->exmemory_size, mi->exmemory_speed, mi->exmemory_width,
1444		       mly_describe_code(mly_table_memorytype, mi->exmemory_type),
1445		       mi->exmemory_parity ? "+parity": "",mi->exmemory_ecc ? "+ECC": "");
1446	mly_printf(sc, "battery backup %s\n", mi->bbu_present ? "present" : "not installed");
1447	mly_printf(sc, "maximum data transfer %d blocks, maximum sg entries/command %d\n",
1448		   mi->maximum_block_count, mi->maximum_sg_entries);
1449	mly_printf(sc, "logical devices present/critical/offline %d/%d/%d\n",
1450		   mi->logical_devices_present, mi->logical_devices_critical, mi->logical_devices_offline);
1451	mly_printf(sc, "physical devices present %d\n",
1452		   mi->physical_devices_present);
1453	mly_printf(sc, "physical disks present/offline %d/%d\n",
1454		   mi->physical_disks_present, mi->physical_disks_offline);
1455	mly_printf(sc, "%d physical channel%s, %d virtual channel%s of %d possible\n",
1456		   mi->physical_channels_present, mi->physical_channels_present == 1 ? "" : "s",
1457		   mi->virtual_channels_present, mi->virtual_channels_present == 1 ? "" : "s",
1458		   mi->virtual_channels_possible);
1459	mly_printf(sc, "%d parallel commands supported\n", mi->maximum_parallel_commands);
1460	mly_printf(sc, "%dMB flash ROM, %d of %d maximum cycles\n",
1461		   mi->flash_size, mi->flash_age, mi->flash_maximum_age);
1462    }
1463}
1464
1465#ifdef MLY_DEBUG
1466/********************************************************************************
1467 * Print some controller state
1468 */
1469static void
1470mly_printstate(struct mly_softc *sc)
1471{
1472    mly_printf(sc, "IDBR %02x  ODBR %02x  ERROR %02x  (%x %x %x)\n",
1473		  MLY_GET_REG(sc, sc->mly_idbr),
1474		  MLY_GET_REG(sc, sc->mly_odbr),
1475		  MLY_GET_REG(sc, sc->mly_error_status),
1476		  sc->mly_idbr,
1477		  sc->mly_odbr,
1478		  sc->mly_error_status);
1479    mly_printf(sc, "IMASK %02x  ISTATUS %02x\n",
1480		  MLY_GET_REG(sc, sc->mly_interrupt_mask),
1481		  MLY_GET_REG(sc, sc->mly_interrupt_status));
1482    mly_printf(sc, "COMMAND %02x %02x %02x %02x %02x %02x %02x %02x\n",
1483		  MLY_GET_REG(sc, sc->mly_command_mailbox),
1484		  MLY_GET_REG(sc, sc->mly_command_mailbox + 1),
1485		  MLY_GET_REG(sc, sc->mly_command_mailbox + 2),
1486		  MLY_GET_REG(sc, sc->mly_command_mailbox + 3),
1487		  MLY_GET_REG(sc, sc->mly_command_mailbox + 4),
1488		  MLY_GET_REG(sc, sc->mly_command_mailbox + 5),
1489		  MLY_GET_REG(sc, sc->mly_command_mailbox + 6),
1490		  MLY_GET_REG(sc, sc->mly_command_mailbox + 7));
1491    mly_printf(sc, "STATUS  %02x %02x %02x %02x %02x %02x %02x %02x\n",
1492		  MLY_GET_REG(sc, sc->mly_status_mailbox),
1493		  MLY_GET_REG(sc, sc->mly_status_mailbox + 1),
1494		  MLY_GET_REG(sc, sc->mly_status_mailbox + 2),
1495		  MLY_GET_REG(sc, sc->mly_status_mailbox + 3),
1496		  MLY_GET_REG(sc, sc->mly_status_mailbox + 4),
1497		  MLY_GET_REG(sc, sc->mly_status_mailbox + 5),
1498		  MLY_GET_REG(sc, sc->mly_status_mailbox + 6),
1499		  MLY_GET_REG(sc, sc->mly_status_mailbox + 7));
1500    mly_printf(sc, "        %04x        %08x\n",
1501		  MLY_GET_REG2(sc, sc->mly_status_mailbox),
1502		  MLY_GET_REG4(sc, sc->mly_status_mailbox + 4));
1503}
1504
1505struct mly_softc	*mly_softc0 = NULL;
1506void
1507mly_printstate0(void)
1508{
1509    if (mly_softc0 != NULL)
1510	mly_printstate(mly_softc0);
1511}
1512
1513/********************************************************************************
1514 * Print a command
1515 */
1516static void
1517mly_print_command(struct mly_command *mc)
1518{
1519    struct mly_softc	*sc = mc->mc_sc;
1520
1521    mly_printf(sc, "COMMAND @ %p\n", mc);
1522    mly_printf(sc, "  slot      %d\n", mc->mc_slot);
1523    mly_printf(sc, "  state     %d\n", MLY_CMD_STATE(mc));
1524    mly_printf(sc, "  status    0x%x\n", mc->mc_status);
1525    mly_printf(sc, "  sense len %d\n", mc->mc_sense);
1526    mly_printf(sc, "  resid     %d\n", mc->mc_resid);
1527    mly_printf(sc, "  packet    %p/0x%llx\n", mc->mc_packet, mc->mc_packetphys);
1528    if (mc->mc_packet != NULL)
1529	mly_print_packet(mc);
1530    mly_printf(sc, "  data      %p/%d\n", mc->mc_data, mc->mc_length);
1531    mly_printf(sc, "  flags     %b\n", mc->mc_flags, "\20\11slotted\12mapped\13priority\14datain\15dataout\n");
1532    mly_printf(sc, "  complete  %p\n", mc->mc_complete);
1533    mly_printf(sc, "  private   %p\n", mc->mc_private);
1534}
1535
1536/********************************************************************************
1537 * Print a command packet
1538 */
1539static void
1540mly_print_packet(struct mly_command *mc)
1541{
1542    struct mly_softc			*sc = mc->mc_sc;
1543    struct mly_command_generic		*ge = (struct mly_command_generic *)mc->mc_packet;
1544    struct mly_command_scsi_small	*ss = (struct mly_command_scsi_small *)mc->mc_packet;
1545    struct mly_command_scsi_large	*sl = (struct mly_command_scsi_large *)mc->mc_packet;
1546    struct mly_command_ioctl		*io = (struct mly_command_ioctl *)mc->mc_packet;
1547    int					transfer;
1548
1549    mly_printf(sc, "   command_id           %d\n", ge->command_id);
1550    mly_printf(sc, "   opcode               %d\n", ge->opcode);
1551    mly_printf(sc, "   command_control      fua %d  dpo %d  est %d  dd %s  nas %d ddis %d\n",
1552		  ge->command_control.force_unit_access,
1553		  ge->command_control.disable_page_out,
1554		  ge->command_control.extended_sg_table,
1555		  (ge->command_control.data_direction == MLY_CCB_WRITE) ? "WRITE" : "READ",
1556		  ge->command_control.no_auto_sense,
1557		  ge->command_control.disable_disconnect);
1558    mly_printf(sc, "   data_size            %d\n", ge->data_size);
1559    mly_printf(sc, "   sense_buffer_address 0x%llx\n", ge->sense_buffer_address);
1560    mly_printf(sc, "   lun                  %d\n", ge->addr.phys.lun);
1561    mly_printf(sc, "   target               %d\n", ge->addr.phys.target);
1562    mly_printf(sc, "   channel              %d\n", ge->addr.phys.channel);
1563    mly_printf(sc, "   logical device       %d\n", ge->addr.log.logdev);
1564    mly_printf(sc, "   controller           %d\n", ge->addr.phys.controller);
1565    mly_printf(sc, "   timeout              %d %s\n",
1566		  ge->timeout.value,
1567		  (ge->timeout.scale == MLY_TIMEOUT_SECONDS) ? "seconds" :
1568		  ((ge->timeout.scale == MLY_TIMEOUT_MINUTES) ? "minutes" : "hours"));
1569    mly_printf(sc, "   maximum_sense_size   %d\n", ge->maximum_sense_size);
1570    switch(ge->opcode) {
1571    case MDACMD_SCSIPT:
1572    case MDACMD_SCSI:
1573	mly_printf(sc, "   cdb length           %d\n", ss->cdb_length);
1574	mly_printf(sc, "   cdb                  %*D\n", ss->cdb_length, ss->cdb, " ");
1575	transfer = 1;
1576	break;
1577    case MDACMD_SCSILC:
1578    case MDACMD_SCSILCPT:
1579	mly_printf(sc, "   cdb length           %d\n", sl->cdb_length);
1580	mly_printf(sc, "   cdb                  0x%llx\n", sl->cdb_physaddr);
1581	transfer = 1;
1582	break;
1583    case MDACMD_IOCTL:
1584	mly_printf(sc, "   sub_ioctl            0x%x\n", io->sub_ioctl);
1585	switch(io->sub_ioctl) {
1586	case MDACIOCTL_SETMEMORYMAILBOX:
1587	    mly_printf(sc, "   health_buffer_size   %d\n",
1588			  io->param.setmemorymailbox.health_buffer_size);
1589	    mly_printf(sc, "   health_buffer_phys   0x%llx\n",
1590			  io->param.setmemorymailbox.health_buffer_physaddr);
1591	    mly_printf(sc, "   command_mailbox      0x%llx\n",
1592			  io->param.setmemorymailbox.command_mailbox_physaddr);
1593	    mly_printf(sc, "   status_mailbox       0x%llx\n",
1594			  io->param.setmemorymailbox.status_mailbox_physaddr);
1595	    transfer = 0;
1596	    break;
1597
1598	case MDACIOCTL_SETREALTIMECLOCK:
1599	case MDACIOCTL_GETHEALTHSTATUS:
1600	case MDACIOCTL_GETCONTROLLERINFO:
1601	case MDACIOCTL_GETLOGDEVINFOVALID:
1602	case MDACIOCTL_GETPHYSDEVINFOVALID:
1603	case MDACIOCTL_GETPHYSDEVSTATISTICS:
1604	case MDACIOCTL_GETLOGDEVSTATISTICS:
1605	case MDACIOCTL_GETCONTROLLERSTATISTICS:
1606	case MDACIOCTL_GETBDT_FOR_SYSDRIVE:
1607	case MDACIOCTL_CREATENEWCONF:
1608	case MDACIOCTL_ADDNEWCONF:
1609	case MDACIOCTL_GETDEVCONFINFO:
1610	case MDACIOCTL_GETFREESPACELIST:
1611	case MDACIOCTL_MORE:
1612	case MDACIOCTL_SETPHYSDEVPARAMETER:
1613	case MDACIOCTL_GETPHYSDEVPARAMETER:
1614	case MDACIOCTL_GETLOGDEVPARAMETER:
1615	case MDACIOCTL_SETLOGDEVPARAMETER:
1616	    mly_printf(sc, "   param                %10D\n", io->param.data.param, " ");
1617	    transfer = 1;
1618	    break;
1619
1620	case MDACIOCTL_GETEVENT:
1621	    mly_printf(sc, "   event                %d\n",
1622		       io->param.getevent.sequence_number_low + ((u_int32_t)io->addr.log.logdev << 16));
1623	    transfer = 1;
1624	    break;
1625
1626	case MDACIOCTL_SETRAIDDEVSTATE:
1627	    mly_printf(sc, "   state                %d\n", io->param.setraiddevstate.state);
1628	    transfer = 0;
1629	    break;
1630
1631	case MDACIOCTL_XLATEPHYSDEVTORAIDDEV:
1632	    mly_printf(sc, "   raid_device          %d\n", io->param.xlatephysdevtoraiddev.raid_device);
1633	    mly_printf(sc, "   controller           %d\n", io->param.xlatephysdevtoraiddev.controller);
1634	    mly_printf(sc, "   channel              %d\n", io->param.xlatephysdevtoraiddev.channel);
1635	    mly_printf(sc, "   target               %d\n", io->param.xlatephysdevtoraiddev.target);
1636	    mly_printf(sc, "   lun                  %d\n", io->param.xlatephysdevtoraiddev.lun);
1637	    transfer = 0;
1638	    break;
1639
1640	case MDACIOCTL_GETGROUPCONFINFO:
1641	    mly_printf(sc, "   group                %d\n", io->param.getgroupconfinfo.group);
1642	    transfer = 1;
1643	    break;
1644
1645	case MDACIOCTL_GET_SUBSYSTEM_DATA:
1646	case MDACIOCTL_SET_SUBSYSTEM_DATA:
1647	case MDACIOCTL_STARTDISOCVERY:
1648	case MDACIOCTL_INITPHYSDEVSTART:
1649	case MDACIOCTL_INITPHYSDEVSTOP:
1650	case MDACIOCTL_INITRAIDDEVSTART:
1651	case MDACIOCTL_INITRAIDDEVSTOP:
1652	case MDACIOCTL_REBUILDRAIDDEVSTART:
1653	case MDACIOCTL_REBUILDRAIDDEVSTOP:
1654	case MDACIOCTL_MAKECONSISTENTDATASTART:
1655	case MDACIOCTL_MAKECONSISTENTDATASTOP:
1656	case MDACIOCTL_CONSISTENCYCHECKSTART:
1657	case MDACIOCTL_CONSISTENCYCHECKSTOP:
1658	case MDACIOCTL_RESETDEVICE:
1659	case MDACIOCTL_FLUSHDEVICEDATA:
1660	case MDACIOCTL_PAUSEDEVICE:
1661	case MDACIOCTL_UNPAUSEDEVICE:
1662	case MDACIOCTL_LOCATEDEVICE:
1663	case MDACIOCTL_SETMASTERSLAVEMODE:
1664	case MDACIOCTL_DELETERAIDDEV:
1665	case MDACIOCTL_REPLACEINTERNALDEV:
1666	case MDACIOCTL_CLEARCONF:
1667	case MDACIOCTL_GETCONTROLLERPARAMETER:
1668	case MDACIOCTL_SETCONTRLLERPARAMETER:
1669	case MDACIOCTL_CLEARCONFSUSPMODE:
1670	case MDACIOCTL_STOREIMAGE:
1671	case MDACIOCTL_READIMAGE:
1672	case MDACIOCTL_FLASHIMAGES:
1673	case MDACIOCTL_RENAMERAIDDEV:
1674	default:			/* no idea what to print */
1675	    transfer = 0;
1676	    break;
1677	}
1678	break;
1679
1680    case MDACMD_IOCTLCHECK:
1681    case MDACMD_MEMCOPY:
1682    default:
1683	transfer = 0;
1684	break;	/* print nothing */
1685    }
1686    if (transfer) {
1687	if (ge->command_control.extended_sg_table) {
1688	    mly_printf(sc, "   sg table             0x%llx/%d\n",
1689			  ge->transfer.indirect.table_physaddr[0], ge->transfer.indirect.entries[0]);
1690	} else {
1691	    mly_printf(sc, "   0000                 0x%llx/%lld\n",
1692			  ge->transfer.direct.sg[0].physaddr, ge->transfer.direct.sg[0].length);
1693	    mly_printf(sc, "   0001                 0x%llx/%lld\n",
1694			  ge->transfer.direct.sg[1].physaddr, ge->transfer.direct.sg[1].length);
1695	}
1696    }
1697}
1698
1699/********************************************************************************
1700 * Panic in a slightly informative fashion
1701 */
1702static void
1703mly_panic(struct mly_softc *sc, char *reason)
1704{
1705    mly_printstate(sc);
1706    panic(reason);
1707}
1708#endif
1709