amr.c revision 158267
1/*-
2 * Copyright (c) 1999,2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * Copyright (c) 2005 Scott Long
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*-
29 * Copyright (c) 2002 Eric Moore
30 * Copyright (c) 2002, 2004 LSI Logic Corporation
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 *    notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 *    notice, this list of conditions and the following disclaimer in the
40 *    documentation and/or other materials provided with the distribution.
41 * 3. The party using or redistributing the source code and binary forms
42 *    agrees to the disclaimer below and the terms and conditions set forth
43 *    herein.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58#include <sys/cdefs.h>
59__FBSDID("$FreeBSD: head/sys/dev/amr/amr.c 158267 2006-05-03 16:45:15Z ambrisko $");
60
61/*
62 * Driver for the AMI MegaRaid family of controllers.
63 */
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/malloc.h>
68#include <sys/kernel.h>
69#include <sys/proc.h>
70#include <sys/sysctl.h>
71
72#include <sys/bio.h>
73#include <sys/bus.h>
74#include <sys/conf.h>
75#include <sys/stat.h>
76
77#include <machine/bus.h>
78#include <machine/cpu.h>
79#include <machine/resource.h>
80#include <sys/rman.h>
81
82#include <dev/pci/pcireg.h>
83#include <dev/pci/pcivar.h>
84
85#include <dev/amr/amrio.h>
86#include <dev/amr/amrreg.h>
87#include <dev/amr/amrvar.h>
88#define AMR_DEFINE_TABLES
89#include <dev/amr/amr_tables.h>
90
91/*
92 * The CAM interface appears to be completely broken.  Disable it.
93 */
94#ifndef AMR_ENABLE_CAM
95#define AMR_ENABLE_CAM 0
96#endif
97
98SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD, 0, "AMR driver parameters");
99
100static d_open_t         amr_open;
101static d_close_t        amr_close;
102static d_ioctl_t        amr_ioctl;
103
104static struct cdevsw amr_cdevsw = {
105	.d_version =	D_VERSION,
106	.d_flags =	D_NEEDGIANT,
107	.d_open =	amr_open,
108	.d_close =	amr_close,
109	.d_ioctl =	amr_ioctl,
110	.d_name =	"amr",
111};
112
113int linux_no_adapter = 0;
114/*
115 * Initialisation, bus interface.
116 */
117static void	amr_startup(void *arg);
118
119/*
120 * Command wrappers
121 */
122static int	amr_query_controller(struct amr_softc *sc);
123static void	*amr_enquiry(struct amr_softc *sc, size_t bufsize,
124			     u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status);
125static void	amr_completeio(struct amr_command *ac);
126static int	amr_support_ext_cdb(struct amr_softc *sc);
127
128/*
129 * Command buffer allocation.
130 */
131static void	amr_alloccmd_cluster(struct amr_softc *sc);
132static void	amr_freecmd_cluster(struct amr_command_cluster *acc);
133
134/*
135 * Command processing.
136 */
137static int	amr_bio_command(struct amr_softc *sc, struct amr_command **acp);
138static int	amr_wait_command(struct amr_command *ac) __unused;
139static int	amr_mapcmd(struct amr_command *ac);
140static void	amr_unmapcmd(struct amr_command *ac);
141static int	amr_start(struct amr_command *ac);
142static void	amr_complete(void *context, int pending);
143static void	amr_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
144static void	amr_setup_dma64map(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
145static void	amr_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
146
147/*
148 * Status monitoring
149 */
150static void	amr_periodic(void *data);
151
152/*
153 * Interface-specific shims
154 */
155static int	amr_quartz_submit_command(struct amr_command *ac);
156static int	amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
157static int	amr_quartz_poll_command(struct amr_command *ac);
158static int	amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac);
159
160static int	amr_std_submit_command(struct amr_command *ac);
161static int	amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
162static int	amr_std_poll_command(struct amr_command *ac);
163static void	amr_std_attach_mailbox(struct amr_softc *sc);
164
165#ifdef AMR_BOARD_INIT
166static int	amr_quartz_init(struct amr_softc *sc);
167static int	amr_std_init(struct amr_softc *sc);
168#endif
169
170/*
171 * Debugging
172 */
173static void	amr_describe_controller(struct amr_softc *sc);
174#ifdef AMR_DEBUG
175#if 0
176static void	amr_printcommand(struct amr_command *ac);
177#endif
178#endif
179
180static void	amr_init_sysctl(struct amr_softc *sc);
181static int	amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr,
182		    int32_t flag, d_thread_t *td);
183
184/********************************************************************************
185 ********************************************************************************
186                                                                      Inline Glue
187 ********************************************************************************
188 ********************************************************************************/
189
190/********************************************************************************
191 ********************************************************************************
192                                                                Public Interfaces
193 ********************************************************************************
194 ********************************************************************************/
195
196/********************************************************************************
197 * Initialise the controller and softc.
198 */
199int
200amr_attach(struct amr_softc *sc)
201{
202
203    debug_called(1);
204
205    /*
206     * Initialise per-controller queues.
207     */
208    TAILQ_INIT(&sc->amr_completed);
209    TAILQ_INIT(&sc->amr_freecmds);
210    TAILQ_INIT(&sc->amr_cmd_clusters);
211    TAILQ_INIT(&sc->amr_ready);
212    bioq_init(&sc->amr_bioq);
213
214    debug(2, "queue init done");
215
216    /*
217     * Configure for this controller type.
218     */
219    if (AMR_IS_QUARTZ(sc)) {
220	sc->amr_submit_command = amr_quartz_submit_command;
221	sc->amr_get_work       = amr_quartz_get_work;
222	sc->amr_poll_command   = amr_quartz_poll_command;
223	sc->amr_poll_command1  = amr_quartz_poll_command1;
224    } else {
225	sc->amr_submit_command = amr_std_submit_command;
226	sc->amr_get_work       = amr_std_get_work;
227	sc->amr_poll_command   = amr_std_poll_command;
228	amr_std_attach_mailbox(sc);;
229    }
230
231#ifdef AMR_BOARD_INIT
232    if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc))))
233	return(ENXIO);
234#endif
235
236    /*
237     * Quiz controller for features and limits.
238     */
239    if (amr_query_controller(sc))
240	return(ENXIO);
241
242    debug(2, "controller query complete");
243
244    /*
245     * Setup sysctls.
246     */
247    amr_init_sysctl(sc);
248
249#if AMR_ENABLE_CAM != 0
250    /*
251     * Attach our 'real' SCSI channels to CAM.
252     */
253    if (amr_cam_attach(sc))
254	return(ENXIO);
255    debug(2, "CAM attach done");
256#endif
257
258    /*
259     * Create the control device.
260     */
261    sc->amr_dev_t = make_dev(&amr_cdevsw, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR,
262			     S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev));
263    sc->amr_dev_t->si_drv1 = sc;
264    linux_no_adapter++;
265    if (device_get_unit(sc->amr_dev) == 0)
266	make_dev_alias(sc->amr_dev_t, "megadev0");
267
268    /*
269     * Schedule ourselves to bring the controller up once interrupts are
270     * available.
271     */
272    bzero(&sc->amr_ich, sizeof(struct intr_config_hook));
273    sc->amr_ich.ich_func = amr_startup;
274    sc->amr_ich.ich_arg = sc;
275    if (config_intrhook_establish(&sc->amr_ich) != 0) {
276	device_printf(sc->amr_dev, "can't establish configuration hook\n");
277	return(ENOMEM);
278    }
279
280    /*
281     * Print a little information about the controller.
282     */
283    amr_describe_controller(sc);
284
285    debug(2, "attach complete");
286    return(0);
287}
288
289/********************************************************************************
290 * Locate disk resources and attach children to them.
291 */
292static void
293amr_startup(void *arg)
294{
295    struct amr_softc	*sc = (struct amr_softc *)arg;
296    struct amr_logdrive	*dr;
297    int			i, error;
298
299    debug_called(1);
300
301    /* pull ourselves off the intrhook chain */
302    if (sc->amr_ich.ich_func)
303	config_intrhook_disestablish(&sc->amr_ich);
304    sc->amr_ich.ich_func = NULL;
305
306    /* get up-to-date drive information */
307    if (amr_query_controller(sc)) {
308	device_printf(sc->amr_dev, "can't scan controller for drives\n");
309	return;
310    }
311
312    /* iterate over available drives */
313    for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) {
314	/* are we already attached to this drive? */
315	if (dr->al_disk == 0) {
316	    /* generate geometry information */
317	    if (dr->al_size > 0x200000) {	/* extended translation? */
318		dr->al_heads = 255;
319		dr->al_sectors = 63;
320	    } else {
321		dr->al_heads = 64;
322		dr->al_sectors = 32;
323	    }
324	    dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors);
325
326	    dr->al_disk = device_add_child(sc->amr_dev, NULL, -1);
327	    if (dr->al_disk == 0)
328		device_printf(sc->amr_dev, "device_add_child failed\n");
329	    device_set_ivars(dr->al_disk, dr);
330	}
331    }
332
333    if ((error = bus_generic_attach(sc->amr_dev)) != 0)
334	device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error);
335
336    /* mark controller back up */
337    sc->amr_state &= ~AMR_STATE_SHUTDOWN;
338
339    /* interrupts will be enabled before we do anything more */
340    sc->amr_state |= AMR_STATE_INTEN;
341
342    /*
343     * Start the timeout routine.
344     */
345/*    sc->amr_timeout = timeout(amr_periodic, sc, hz);*/
346
347    return;
348}
349
350static void
351amr_init_sysctl(struct amr_softc *sc)
352{
353
354    SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
355	SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
356	OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0,
357	"");
358}
359
360
361/*******************************************************************************
362 * Free resources associated with a controller instance
363 */
364void
365amr_free(struct amr_softc *sc)
366{
367    struct amr_command_cluster	*acc;
368
369#if AMR_ENABLE_CAM != 0
370    /* detach from CAM */
371    amr_cam_detach(sc);
372#endif
373
374    /* cancel status timeout */
375    untimeout(amr_periodic, sc, sc->amr_timeout);
376
377    /* throw away any command buffers */
378    while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) {
379	TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link);
380	amr_freecmd_cluster(acc);
381    }
382
383    /* destroy control device */
384    if( sc->amr_dev_t != (struct cdev *)NULL)
385	    destroy_dev(sc->amr_dev_t);
386
387    if (mtx_initialized(&sc->amr_hw_lock))
388	mtx_destroy(&sc->amr_hw_lock);
389
390    if (mtx_initialized(&sc->amr_list_lock))
391	mtx_destroy(&sc->amr_list_lock);
392}
393
394/*******************************************************************************
395 * Receive a bio structure from a child device and queue it on a particular
396 * disk resource, then poke the disk resource to start as much work as it can.
397 */
398int
399amr_submit_bio(struct amr_softc *sc, struct bio *bio)
400{
401    debug_called(2);
402
403    mtx_lock(&sc->amr_list_lock);
404    amr_enqueue_bio(sc, bio);
405    amr_startio(sc);
406    mtx_unlock(&sc->amr_list_lock);
407    return(0);
408}
409
410/********************************************************************************
411 * Accept an open operation on the control device.
412 */
413static int
414amr_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
415{
416    int			unit = minor(dev);
417    struct amr_softc	*sc = devclass_get_softc(devclass_find("amr"), unit);
418
419    debug_called(1);
420
421    sc->amr_state |= AMR_STATE_OPEN;
422    return(0);
423}
424
425#ifdef LSI
426static int
427amr_del_ld(struct amr_softc *sc, int drv_no, int status)
428{
429
430    debug_called(1);
431
432    sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
433    sc->amr_state &= ~AMR_STATE_LD_DELETE;
434    sc->amr_state |= AMR_STATE_REMAP_LD;
435    debug(1, "State Set");
436
437    if (!status) {
438	debug(1, "disk begin destroyed %d",drv_no);
439	if (--amr_disks_registered == 0)
440	    cdevsw_remove(&amrddisk_cdevsw);
441	debug(1, "disk begin destroyed success");
442    }
443    return 0;
444}
445
446static int
447amr_prepare_ld_delete(struct amr_softc *sc)
448{
449
450    debug_called(1);
451    if (sc->ld_del_supported == 0)
452	return(ENOIOCTL);
453
454    sc->amr_state |= AMR_STATE_QUEUE_FRZN;
455    sc->amr_state |= AMR_STATE_LD_DELETE;
456
457    /* 5 minutes for the all the commands to be flushed.*/
458    tsleep((void *)&sc->ld_del_supported, PCATCH | PRIBIO,"delete_logical_drv",hz * 60 * 1);
459    if ( sc->amr_busyslots )
460	return(ENOIOCTL);
461
462    return 0;
463}
464#endif
465
466/********************************************************************************
467 * Accept the last close on the control device.
468 */
469static int
470amr_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
471{
472    int			unit = minor(dev);
473    struct amr_softc	*sc = devclass_get_softc(devclass_find("amr"), unit);
474
475    debug_called(1);
476
477    sc->amr_state &= ~AMR_STATE_OPEN;
478    return (0);
479}
480
481/********************************************************************************
482 * Handle controller-specific control operations.
483 */
484static void
485amr_rescan_drives(struct cdev *dev)
486{
487    struct amr_softc	*sc = (struct amr_softc *)dev->si_drv1;
488    int			i, error = 0;
489
490    sc->amr_state |= AMR_STATE_REMAP_LD;
491    while (sc->amr_busyslots) {
492	device_printf(sc->amr_dev, "idle controller\n");
493	amr_done(sc);
494    }
495
496    /* mark ourselves as in-shutdown */
497    sc->amr_state |= AMR_STATE_SHUTDOWN;
498
499    /* flush controller */
500    device_printf(sc->amr_dev, "flushing cache...");
501    printf("%s\n", amr_flush(sc) ? "failed" : "done");
502
503    /* delete all our child devices */
504    for(i = 0 ; i < AMR_MAXLD; i++) {
505	if(sc->amr_drive[i].al_disk != 0) {
506	    if((error = device_delete_child(sc->amr_dev,
507		sc->amr_drive[i].al_disk)) != 0)
508		goto shutdown_out;
509
510	     sc->amr_drive[i].al_disk = 0;
511	}
512    }
513
514shutdown_out:
515    amr_startup(sc);
516}
517
518int
519amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag,
520    d_thread_t *td)
521{
522    struct amr_softc		*sc = (struct amr_softc *)dev->si_drv1;
523    struct amr_command		*ac;
524    struct amr_mailbox		*mb;
525    struct amr_linux_ioctl	ali;
526    void			*dp, *temp;
527    int				error;
528    int				adapter, len, ac_flags = 0;
529    int				logical_drives_changed = 0;
530    u_int32_t			linux_version = 0x02100000;
531    u_int8_t			status;
532    struct amr_passthrough	*ap;	/* 60 bytes */
533
534    error = 0;
535    dp = NULL;
536    ac = NULL;
537    ap = NULL;
538
539    if ((error = copyin(addr, &ali, sizeof(ali))) != 0)
540	return (error);
541    switch (ali.ui.fcs.opcode) {
542    case 0x82:
543	switch(ali.ui.fcs.subopcode) {
544	case 'e':
545	    copyout(&linux_version, (void *)(uintptr_t)ali.data,
546		sizeof(linux_version));
547	    error = 0;
548	    break;
549
550	case 'm':
551	    copyout(&linux_no_adapter, (void *)(uintptr_t)ali.data,
552		sizeof(linux_no_adapter));
553	    td->td_retval[0] = linux_no_adapter;
554	    error = 0;
555	    break;
556
557	default:
558	    printf("Unknown subopcode\n");
559	    error = ENOIOCTL;
560	    break;
561	}
562    break;
563
564    case 0x80:
565    case 0x81:
566	if (ali.ui.fcs.opcode == 0x80)
567	    len = max(ali.outlen, ali.inlen);
568	else
569	    len = ali.ui.fcs.length;
570
571	adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
572
573	ap = malloc(sizeof(struct amr_passthrough),
574	    M_DEVBUF, M_WAITOK | M_ZERO);
575
576	mb = (void *)&ali.mbox[0];
577
578	if ((ali.mbox[0] == FC_DEL_LOGDRV  && ali.mbox[2] == OP_DEL_LOGDRV) ||	/* delete */
579	    (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) {		/* create */
580	    if (sc->amr_allow_vol_config == 0) {
581		error = EPERM;
582		break;
583	    }
584	    logical_drives_changed = 1;
585	}
586
587	if (ali.mbox[0] == AMR_CMD_PASS) {
588	    error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap,
589		sizeof(struct amr_passthrough));
590	    if (error)
591		break;
592
593	    if (ap->ap_data_transfer_length)
594		dp = malloc(ap->ap_data_transfer_length, M_DEVBUF,
595		    M_WAITOK | M_ZERO);
596
597	    if (ali.inlen) {
598		error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address,
599		    dp, ap->ap_data_transfer_length);
600		if (error)
601		    break;
602	    }
603
604	    mtx_lock(&sc->amr_list_lock);
605	    while ((ac = amr_alloccmd(sc)) == NULL)
606		msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
607
608	    ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB_DATAIN|AMR_CMD_CCB_DATAOUT;
609	    bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
610	    ac->ac_mailbox.mb_command = AMR_CMD_PASS;
611	    ac->ac_flags = ac_flags;
612
613	    ac->ac_data = ap;
614	    ac->ac_length = sizeof(struct amr_passthrough);
615	    ac->ac_ccb_data = dp;
616	    ac->ac_ccb_length = ap->ap_data_transfer_length;
617	    temp = (void *)(uintptr_t)ap->ap_data_transfer_address;
618
619	    error = amr_wait_command(ac);
620	    mtx_unlock(&sc->amr_list_lock);
621	    if (error)
622		break;
623
624	    status = ac->ac_status;
625	    error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status));
626	    if (error)
627		break;
628
629	    if (ali.outlen) {
630		error = copyout(dp, temp, ap->ap_data_transfer_length);
631	        if (error)
632		    break;
633	    }
634	    error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length);
635	    if (error)
636		break;
637
638	    error = 0;
639	    break;
640	} else if (ali.mbox[0] == AMR_CMD_PASS_64) {
641	    printf("No AMR_CMD_PASS_64\n");
642	    error = ENOIOCTL;
643	    break;
644	} else if (ali.mbox[0] == AMR_CMD_EXTPASS) {
645	    printf("No AMR_CMD_EXTPASS\n");
646	    error = ENOIOCTL;
647	    break;
648	} else {
649	    if (len)
650		dp = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
651
652	    if (ali.inlen) {
653		error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len);
654		if (error)
655		    break;
656	    }
657
658	    mtx_lock(&sc->amr_list_lock);
659	    while ((ac = amr_alloccmd(sc)) == NULL)
660		msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
661
662	    ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
663	    bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
664	    bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox));
665
666	    ac->ac_length = len;
667	    ac->ac_data = dp;
668	    ac->ac_flags = ac_flags;
669
670	    error = amr_wait_command(ac);
671	    mtx_unlock(&sc->amr_list_lock);
672	    if (error)
673		break;
674
675	    status = ac->ac_status;
676	    error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status));
677	    if (ali.outlen) {
678		error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, len);
679		if (error)
680		    break;
681	    }
682
683	    error = 0;
684	    if (logical_drives_changed)
685		amr_rescan_drives(dev);
686	    break;
687	}
688	break;
689
690    default:
691	debug(1, "unknown linux ioctl 0x%lx", cmd);
692	printf("unknown linux ioctl 0x%lx\n", cmd);
693	error = ENOIOCTL;
694	break;
695    }
696
697    /*
698     * At this point, we know that there is a lock held and that these
699     * objects have been allocated.
700     */
701    mtx_lock(&sc->amr_list_lock);
702    if (ac != NULL)
703	amr_releasecmd(ac);
704    mtx_unlock(&sc->amr_list_lock);
705    if (dp != NULL)
706	free(dp, M_DEVBUF);
707    if (ap != NULL)
708	free(ap, M_DEVBUF);
709    return(error);
710}
711
712static int
713amr_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, d_thread_t *td)
714{
715    struct amr_softc		*sc = (struct amr_softc *)dev->si_drv1;
716    union {
717	void			*_p;
718	struct amr_user_ioctl	*au;
719#ifdef AMR_IO_COMMAND32
720	struct amr_user_ioctl32	*au32;
721#endif
722	int			*result;
723    } arg;
724    struct amr_command		*ac;
725    struct amr_mailbox_ioctl	*mbi;
726    void			*dp, *au_buffer;
727    unsigned long		au_length;
728    unsigned char		*au_cmd;
729    int				*au_statusp, au_direction;
730    int				error, ac_flags = 0;
731    struct amr_passthrough	*ap;	/* 60 bytes */
732    int				logical_drives_changed = 0;
733
734    debug_called(1);
735
736    arg._p = (void *)addr;
737
738    error = 0;
739    dp = NULL;
740    ac = NULL;
741    ap = NULL;
742
743    switch(cmd) {
744
745    case AMR_IO_VERSION:
746	debug(1, "AMR_IO_VERSION");
747	*arg.result = AMR_IO_VERSION_NUMBER;
748	return(0);
749
750#ifdef AMR_IO_COMMAND32
751    /*
752     * Accept ioctl-s from 32-bit binaries on non-32-bit
753     * platforms, such as AMD. LSI's MEGAMGR utility is
754     * the only example known today...	-mi
755     */
756    case AMR_IO_COMMAND32:
757	debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]);
758	au_cmd = arg.au32->au_cmd;
759	au_buffer = (void *)(u_int64_t)arg.au32->au_buffer;
760	au_length = arg.au32->au_length;
761	au_direction = arg.au32->au_direction;
762	au_statusp = &arg.au32->au_status;
763	break;
764#endif
765
766    case AMR_IO_COMMAND:
767	debug(1, "AMR_IO_COMMAND  0x%x", arg.au->au_cmd[0]);
768	au_cmd = arg.au->au_cmd;
769	au_buffer = (void *)arg.au->au_buffer;
770	au_length = arg.au->au_length;
771	au_direction = arg.au->au_direction;
772	au_statusp = &arg.au->au_status;
773	break;
774
775    case 0xc0046d00:
776    case 0xc06e6d00:	/* Linux emulation */
777	{
778	    devclass_t			devclass;
779	    struct amr_linux_ioctl	ali;
780	    int				adapter, error;
781
782	    devclass = devclass_find("amr");
783	    if (devclass == NULL)
784		return (ENOENT);
785
786	    error = copyin(addr, &ali, sizeof(ali));
787	    if (error)
788		return (error);
789	    if (ali.ui.fcs.opcode == 0x82)
790		adapter = 0;
791	    else
792		adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
793
794	    sc = devclass_get_softc(devclass, adapter);
795	    if (sc == NULL)
796		return (ENOENT);
797
798	return (amr_linux_ioctl_int(sc->amr_dev_t, cmd,
799	    addr, 0, td));
800	}
801    default:
802	debug(1, "unknown ioctl 0x%lx", cmd);
803	return(ENOIOCTL);
804    }
805
806    if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) ||	/* delete */
807	(au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) {		/* create */
808	if (sc->amr_allow_vol_config == 0) {
809	    error = EPERM;
810	    goto out;
811	}
812	logical_drives_changed = 1;
813#ifdef LSI
814	if ((error = amr_prepare_ld_delete(sc)) != 0)
815	    return (error);
816#endif
817    }
818
819    /* handle inbound data buffer */
820    if (au_length != 0 && au_cmd[0] != 0x06) {
821	if ((dp = malloc(au_length, M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) {
822	    error = ENOMEM;
823	    goto out;
824	}
825	if ((error = copyin(au_buffer, dp, au_length)) != 0) {
826	    free(dp, M_DEVBUF);
827	    return (error);
828	}
829	debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp);
830    }
831
832    /* Allocate this now before the mutex gets held */
833    if (au_cmd[0] == AMR_CMD_PASS)
834	ap = malloc(sizeof(struct amr_passthrough), M_DEVBUF, M_WAITOK|M_ZERO);
835
836    mtx_lock(&sc->amr_list_lock);
837    while ((ac = amr_alloccmd(sc)) == NULL)
838	msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
839
840    /* handle SCSI passthrough command */
841    if (au_cmd[0] == AMR_CMD_PASS) {
842        int len;
843
844	/* copy cdb */
845        len = au_cmd[2];
846	ap->ap_cdb_length = len;
847	bcopy(au_cmd + 3, ap->ap_cdb, len);
848
849	/* build passthrough */
850	ap->ap_timeout		= au_cmd[len + 3] & 0x07;
851	ap->ap_ars		= (au_cmd[len + 3] & 0x08) ? 1 : 0;
852	ap->ap_islogical		= (au_cmd[len + 3] & 0x80) ? 1 : 0;
853	ap->ap_logical_drive_no	= au_cmd[len + 4];
854	ap->ap_channel		= au_cmd[len + 5];
855	ap->ap_scsi_id 		= au_cmd[len + 6];
856	ap->ap_request_sense_length	= 14;
857	ap->ap_data_transfer_length	= au_length;
858	/* XXX what about the request-sense area? does the caller want it? */
859
860	/* build command */
861	ac->ac_data = ap;
862	ac->ac_length = sizeof(struct amr_passthrough);
863	ac->ac_ccb_data = dp;
864	ac->ac_ccb_length = au_length;
865
866	ac->ac_mailbox.mb_command = AMR_CMD_PASS;
867	ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB_DATAIN|AMR_CMD_CCB_DATAOUT;
868
869    } else {
870	/* direct command to controller */
871	mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox;
872
873	/* copy pertinent mailbox items */
874	mbi->mb_command = au_cmd[0];
875	mbi->mb_channel = au_cmd[1];
876	mbi->mb_param = au_cmd[2];
877	mbi->mb_pad[0] = au_cmd[3];
878	mbi->mb_drive = au_cmd[4];
879
880	/* build the command */
881	ac->ac_data = dp;
882	ac->ac_length = au_length;
883	ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
884    }
885
886    ac->ac_flags = ac_flags;
887
888    /* run the command */
889    error = amr_wait_command(ac);
890    mtx_unlock(&sc->amr_list_lock);
891    if (error)
892	goto out;
893
894    /* copy out data and set status */
895    if (au_length != 0) {
896	error = copyout(dp, au_buffer, au_length);
897    }
898    debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer);
899    if (dp != NULL)
900	debug(2, "%16d", (int)dp);
901    *au_statusp = ac->ac_status;
902
903out:
904    /*
905     * At this point, we know that there is a lock held and that these
906     * objects have been allocated.
907     */
908    mtx_lock(&sc->amr_list_lock);
909    if (ac != NULL)
910	amr_releasecmd(ac);
911    mtx_unlock(&sc->amr_list_lock);
912    if (dp != NULL)
913	free(dp, M_DEVBUF);
914    if (ap != NULL)
915	free(ap, M_DEVBUF);
916
917#ifndef LSI
918    if (logical_drives_changed)
919	amr_rescan_drives(dev);
920#endif
921
922    return(error);
923}
924
925/********************************************************************************
926 ********************************************************************************
927                                                                Status Monitoring
928 ********************************************************************************
929 ********************************************************************************/
930
931/********************************************************************************
932 * Perform a periodic check of the controller status
933 */
934static void
935amr_periodic(void *data)
936{
937    struct amr_softc	*sc = (struct amr_softc *)data;
938
939    debug_called(2);
940
941    /* XXX perform periodic status checks here */
942
943    /* compensate for missed interrupts */
944    amr_done(sc);
945
946    /* reschedule */
947    sc->amr_timeout = timeout(amr_periodic, sc, hz);
948}
949
950/********************************************************************************
951 ********************************************************************************
952                                                                 Command Wrappers
953 ********************************************************************************
954 ********************************************************************************/
955
956/********************************************************************************
957 * Interrogate the controller for the operational parameters we require.
958 */
959static int
960amr_query_controller(struct amr_softc *sc)
961{
962    struct amr_enquiry3	*aex;
963    struct amr_prodinfo	*ap;
964    struct amr_enquiry	*ae;
965    int			ldrv;
966    int			status;
967
968    /*
969     * If we haven't found the real limit yet, let us have a couple of commands in
970     * order to be able to probe.
971     */
972    if (sc->amr_maxio == 0)
973	sc->amr_maxio = 2;
974
975    /*
976     * Greater than 10 byte cdb support
977     */
978    sc->support_ext_cdb = amr_support_ext_cdb(sc);
979
980    if(sc->support_ext_cdb) {
981	debug(2,"supports extended CDBs.");
982    }
983
984    /*
985     * Try to issue an ENQUIRY3 command
986     */
987    if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
988			   AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) {
989
990	/*
991	 * Fetch current state of logical drives.
992	 */
993	for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
994	    sc->amr_drive[ldrv].al_size       = aex->ae_drivesize[ldrv];
995	    sc->amr_drive[ldrv].al_state      = aex->ae_drivestate[ldrv];
996	    sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv];
997	    debug(2, "  drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
998		  sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
999	}
1000	free(aex, M_DEVBUF);
1001
1002	/*
1003	 * Get product info for channel count.
1004	 */
1005	if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) {
1006	    device_printf(sc->amr_dev, "can't obtain product data from controller\n");
1007	    return(1);
1008	}
1009	sc->amr_maxdrives = 40;
1010	sc->amr_maxchan = ap->ap_nschan;
1011	sc->amr_maxio = ap->ap_maxio;
1012	sc->amr_type |= AMR_TYPE_40LD;
1013	free(ap, M_DEVBUF);
1014
1015	ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status);
1016	if (ap != NULL)
1017	    free(ap, M_DEVBUF);
1018	if (!status) {
1019	    sc->amr_ld_del_supported = 1;
1020	    device_printf(sc->amr_dev, "delete logical drives supported by controller\n");
1021	}
1022    } else {
1023
1024	/* failed, try the 8LD ENQUIRY commands */
1025	if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) {
1026	    if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) {
1027		device_printf(sc->amr_dev, "can't obtain configuration data from controller\n");
1028		return(1);
1029	    }
1030	    ae->ae_signature = 0;
1031	}
1032
1033	/*
1034	 * Fetch current state of logical drives.
1035	 */
1036	for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
1037	    sc->amr_drive[ldrv].al_size       = ae->ae_ldrv.al_size[ldrv];
1038	    sc->amr_drive[ldrv].al_state      = ae->ae_ldrv.al_state[ldrv];
1039	    sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv];
1040	    debug(2, "  drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
1041		  sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1042	}
1043
1044	sc->amr_maxdrives = 8;
1045	sc->amr_maxchan = ae->ae_adapter.aa_channels;
1046	sc->amr_maxio = ae->ae_adapter.aa_maxio;
1047	free(ae, M_DEVBUF);
1048    }
1049
1050    /*
1051     * Mark remaining drives as unused.
1052     */
1053    for (; ldrv < AMR_MAXLD; ldrv++)
1054	sc->amr_drive[ldrv].al_size = 0xffffffff;
1055
1056    /*
1057     * Cap the maximum number of outstanding I/Os.  AMI's Linux driver doesn't trust
1058     * the controller's reported value, and lockups have been seen when we do.
1059     */
1060    sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD);
1061
1062    return(0);
1063}
1064
1065/********************************************************************************
1066 * Run a generic enquiry-style command.
1067 */
1068static void *
1069amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status)
1070{
1071    struct amr_command	*ac;
1072    void		*result;
1073    u_int8_t		*mbox;
1074    int			error;
1075
1076    debug_called(1);
1077
1078    error = 1;
1079    result = NULL;
1080
1081    /* get ourselves a command buffer */
1082    mtx_lock(&sc->amr_list_lock);
1083    ac = amr_alloccmd(sc);
1084    mtx_unlock(&sc->amr_list_lock);
1085    if (ac == NULL)
1086	goto out;
1087    /* allocate the response structure */
1088    if ((result = malloc(bufsize, M_DEVBUF, M_ZERO|M_NOWAIT)) == NULL)
1089	goto out;
1090    /* set command flags */
1091
1092    ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN;
1093
1094    /* point the command at our data */
1095    ac->ac_data = result;
1096    ac->ac_length = bufsize;
1097
1098    /* build the command proper */
1099    mbox = (u_int8_t *)&ac->ac_mailbox;		/* XXX want a real structure for this? */
1100    mbox[0] = cmd;
1101    mbox[2] = cmdsub;
1102    mbox[3] = cmdqual;
1103    *status = 0;
1104
1105    /* can't assume that interrupts are going to work here, so play it safe */
1106    if (sc->amr_poll_command(ac))
1107	goto out;
1108    error = ac->ac_status;
1109    *status = ac->ac_status;
1110
1111 out:
1112    mtx_lock(&sc->amr_list_lock);
1113    if (ac != NULL)
1114	amr_releasecmd(ac);
1115    mtx_unlock(&sc->amr_list_lock);
1116    if ((error != 0) && (result != NULL)) {
1117	free(result, M_DEVBUF);
1118	result = NULL;
1119    }
1120    return(result);
1121}
1122
1123/********************************************************************************
1124 * Flush the controller's internal cache, return status.
1125 */
1126int
1127amr_flush(struct amr_softc *sc)
1128{
1129    struct amr_command	*ac;
1130    int			error;
1131
1132    /* get ourselves a command buffer */
1133    error = 1;
1134    mtx_lock(&sc->amr_list_lock);
1135    ac = amr_alloccmd(sc);
1136    mtx_unlock(&sc->amr_list_lock);
1137    if (ac == NULL)
1138	goto out;
1139    /* set command flags */
1140    ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1141
1142    /* build the command proper */
1143    ac->ac_mailbox.mb_command = AMR_CMD_FLUSH;
1144
1145    /* we have to poll, as the system may be going down or otherwise damaged */
1146    if (sc->amr_poll_command(ac))
1147	goto out;
1148    error = ac->ac_status;
1149
1150 out:
1151    mtx_lock(&sc->amr_list_lock);
1152    if (ac != NULL)
1153	amr_releasecmd(ac);
1154    mtx_unlock(&sc->amr_list_lock);
1155    return(error);
1156}
1157
1158/********************************************************************************
1159 * Detect extented cdb >> greater than 10 byte cdb support
1160 * returns '1' means this support exist
1161 * returns '0' means this support doesn't exist
1162 */
1163static int
1164amr_support_ext_cdb(struct amr_softc *sc)
1165{
1166    struct amr_command	*ac;
1167    u_int8_t		*mbox;
1168    int			error;
1169
1170    /* get ourselves a command buffer */
1171    error = 0;
1172    mtx_lock(&sc->amr_list_lock);
1173    ac = amr_alloccmd(sc);
1174    mtx_unlock(&sc->amr_list_lock);
1175    if (ac == NULL)
1176	goto out;
1177    /* set command flags */
1178    ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1179
1180    /* build the command proper */
1181    mbox = (u_int8_t *)&ac->ac_mailbox;		/* XXX want a real structure for this? */
1182    mbox[0] = 0xA4;
1183    mbox[2] = 0x16;
1184
1185
1186    /* we have to poll, as the system may be going down or otherwise damaged */
1187    if (sc->amr_poll_command(ac))
1188	goto out;
1189    if( ac->ac_status == AMR_STATUS_SUCCESS ) {
1190	    error = 1;
1191    }
1192
1193out:
1194    mtx_lock(&sc->amr_list_lock);
1195    if (ac != NULL)
1196	amr_releasecmd(ac);
1197    mtx_unlock(&sc->amr_list_lock);
1198    return(error);
1199}
1200
1201/********************************************************************************
1202 * Try to find I/O work for the controller from one or more of the work queues.
1203 *
1204 * We make the assumption that if the controller is not ready to take a command
1205 * at some given time, it will generate an interrupt at some later time when
1206 * it is.
1207 */
1208void
1209amr_startio(struct amr_softc *sc)
1210{
1211    struct amr_command	*ac;
1212
1213    /* spin until something prevents us from doing any work */
1214    for (;;) {
1215
1216	/* Don't bother to queue commands no bounce buffers are available. */
1217	if (sc->amr_state & AMR_STATE_QUEUE_FRZN)
1218	    break;
1219
1220	/* try to get a ready command */
1221	ac = amr_dequeue_ready(sc);
1222
1223	/* if that failed, build a command from a bio */
1224	if (ac == NULL)
1225	    (void)amr_bio_command(sc, &ac);
1226
1227#if AMR_ENABLE_CAM != 0
1228	/* if that failed, build a command from a ccb */
1229	if (ac == NULL)
1230	    (void)amr_cam_command(sc, &ac);
1231#endif
1232
1233	/* if we don't have anything to do, give up */
1234	if (ac == NULL)
1235	    break;
1236
1237	/* try to give the command to the controller; if this fails save it for later and give up */
1238	if (amr_start(ac)) {
1239	    debug(2, "controller busy, command deferred");
1240	    amr_requeue_ready(ac);	/* XXX schedule retry very soon? */
1241	    break;
1242	}
1243    }
1244}
1245
1246/********************************************************************************
1247 * Handle completion of an I/O command.
1248 */
1249static void
1250amr_completeio(struct amr_command *ac)
1251{
1252    struct amrd_softc		*sc = ac->ac_bio->bio_disk->d_drv1;
1253    static struct timeval	lastfail;
1254    static int			curfail;
1255
1256    if (ac->ac_status != AMR_STATUS_SUCCESS) {	/* could be more verbose here? */
1257	ac->ac_bio->bio_error = EIO;
1258	ac->ac_bio->bio_flags |= BIO_ERROR;
1259
1260	if (ppsratecheck(&lastfail, &curfail, 1))
1261	    device_printf(sc->amrd_dev, "I/O error - 0x%x\n", ac->ac_status);
1262/*	amr_printcommand(ac);*/
1263    }
1264    amrd_intr(ac->ac_bio);
1265    mtx_lock(&ac->ac_sc->amr_list_lock);
1266    amr_releasecmd(ac);
1267    mtx_unlock(&ac->ac_sc->amr_list_lock);
1268}
1269
1270/********************************************************************************
1271 ********************************************************************************
1272                                                               Command Processing
1273 ********************************************************************************
1274 ********************************************************************************/
1275
1276/********************************************************************************
1277 * Convert a bio off the top of the bio queue into a command.
1278 */
1279static int
1280amr_bio_command(struct amr_softc *sc, struct amr_command **acp)
1281{
1282    struct amr_command	*ac;
1283    struct amrd_softc	*amrd;
1284    struct bio		*bio;
1285    int			error;
1286    int			blkcount;
1287    int			driveno;
1288    int			cmd;
1289
1290    ac = NULL;
1291    error = 0;
1292
1293    /* get a command */
1294    if ((ac = amr_alloccmd(sc)) == NULL)
1295	return (ENOMEM);
1296
1297    /* get a bio to work on */
1298    if ((bio = amr_dequeue_bio(sc)) == NULL) {
1299	amr_releasecmd(ac);
1300	return (0);
1301    }
1302
1303    /* connect the bio to the command */
1304    ac->ac_complete = amr_completeio;
1305    ac->ac_bio = bio;
1306    ac->ac_data = bio->bio_data;
1307    ac->ac_length = bio->bio_bcount;
1308    if (bio->bio_cmd == BIO_READ) {
1309	ac->ac_flags |= AMR_CMD_DATAIN;
1310	if (AMR_IS_SG64(sc)) {
1311	    cmd = AMR_CMD_LREAD64;
1312	    ac->ac_flags |= AMR_CMD_SG64;
1313	} else
1314	    cmd = AMR_CMD_LREAD;
1315    } else {
1316	ac->ac_flags |= AMR_CMD_DATAOUT;
1317	if (AMR_IS_SG64(sc)) {
1318	    cmd = AMR_CMD_LWRITE64;
1319	    ac->ac_flags |= AMR_CMD_SG64;
1320	} else
1321	    cmd = AMR_CMD_LWRITE;
1322    }
1323    amrd = (struct amrd_softc *)bio->bio_disk->d_drv1;
1324    driveno = amrd->amrd_drive - sc->amr_drive;
1325    blkcount = (bio->bio_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE;
1326
1327    ac->ac_mailbox.mb_command = cmd;
1328    ac->ac_mailbox.mb_blkcount = blkcount;
1329    ac->ac_mailbox.mb_lba = bio->bio_pblkno;
1330    ac->ac_mailbox.mb_drive = driveno;
1331    if (sc->amr_state & AMR_STATE_REMAP_LD)
1332	ac->ac_mailbox.mb_drive |= 0x80;
1333
1334    /* we fill in the s/g related data when the command is mapped */
1335
1336    if ((bio->bio_pblkno + blkcount) > sc->amr_drive[driveno].al_size)
1337	device_printf(sc->amr_dev, "I/O beyond end of unit (%lld,%d > %lu)\n",
1338		      (long long)bio->bio_pblkno, blkcount,
1339		      (u_long)sc->amr_drive[driveno].al_size);
1340
1341    *acp = ac;
1342    return(error);
1343}
1344
1345/********************************************************************************
1346 * Take a command, submit it to the controller and sleep until it completes
1347 * or fails.  Interrupts must be enabled, returns nonzero on error.
1348 */
1349static int
1350amr_wait_command(struct amr_command *ac)
1351{
1352    int			error = 0;
1353    struct amr_softc	*sc = ac->ac_sc;
1354
1355    debug_called(1);
1356
1357    ac->ac_complete = NULL;
1358    ac->ac_flags |= AMR_CMD_SLEEP;
1359    if ((error = amr_start(ac)) != 0) {
1360	return(error);
1361    }
1362
1363    while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) {
1364	error = msleep(ac,&sc->amr_list_lock, PRIBIO, "amrwcmd", 0);
1365    }
1366
1367    return(error);
1368}
1369
1370/********************************************************************************
1371 * Take a command, submit it to the controller and busy-wait for it to return.
1372 * Returns nonzero on error.  Can be safely called with interrupts enabled.
1373 */
1374static int
1375amr_std_poll_command(struct amr_command *ac)
1376{
1377    struct amr_softc	*sc = ac->ac_sc;
1378    int			error, count;
1379
1380    debug_called(2);
1381
1382    ac->ac_complete = NULL;
1383    if ((error = amr_start(ac)) != 0)
1384	return(error);
1385
1386    count = 0;
1387    do {
1388	/*
1389	 * Poll for completion, although the interrupt handler may beat us to it.
1390	 * Note that the timeout here is somewhat arbitrary.
1391	 */
1392	amr_done(sc);
1393	DELAY(1000);
1394    } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000));
1395    if (!(ac->ac_flags & AMR_CMD_BUSY)) {
1396	error = 0;
1397    } else {
1398	/* XXX the slot is now marked permanently busy */
1399	error = EIO;
1400	device_printf(sc->amr_dev, "polled command timeout\n");
1401    }
1402    return(error);
1403}
1404
1405static void
1406amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1407{
1408    struct amr_command *ac = arg;
1409    struct amr_softc *sc = ac->ac_sc;
1410    int flags;
1411
1412    flags = 0;
1413    if (ac->ac_flags & AMR_CMD_DATAIN)
1414	flags |= BUS_DMASYNC_PREREAD;
1415    if (ac->ac_flags & AMR_CMD_DATAOUT)
1416	flags |= BUS_DMASYNC_PREWRITE;
1417
1418    if (AC_IS_SG64(ac)) {
1419	amr_setup_dma64map(arg, segs, nsegs, err);
1420	bus_dmamap_sync(sc->amr_buffer64_dmat,ac->ac_dma64map, flags);
1421    } else {
1422	amr_setup_dmamap(arg, segs, nsegs, err);
1423	bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap, flags);
1424    }
1425    sc->amr_poll_command1(sc, ac);
1426}
1427
1428/********************************************************************************
1429 * Take a command, submit it to the controller and busy-wait for it to return.
1430 * Returns nonzero on error.  Can be safely called with interrupts enabled.
1431 */
1432static int
1433amr_quartz_poll_command(struct amr_command *ac)
1434{
1435    bus_dma_tag_t	tag;
1436    bus_dmamap_t	datamap;
1437    struct amr_softc	*sc = ac->ac_sc;
1438    int			error;
1439
1440    debug_called(2);
1441
1442    error = 0;
1443
1444    if (AC_IS_SG64(ac)) {
1445	tag = sc->amr_buffer64_dmat;
1446	datamap = ac->ac_dma64map;
1447    } else {
1448	tag = sc->amr_buffer_dmat;
1449	datamap = ac->ac_dmamap;
1450    }
1451
1452    /* now we have a slot, we can map the command (unmapped in amr_complete) */
1453    if (ac->ac_data != 0) {
1454	if (bus_dmamap_load(tag, datamap, ac->ac_data, ac->ac_length,
1455	    amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) {
1456	    error = 1;
1457	}
1458    } else {
1459	error = amr_quartz_poll_command1(sc, ac);
1460    }
1461
1462    return (error);
1463}
1464
1465static int
1466amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac)
1467{
1468    int count, error;
1469
1470    mtx_lock(&sc->amr_hw_lock);
1471    if ((sc->amr_state & AMR_STATE_INTEN) == 0) {
1472	count=0;
1473	while (sc->amr_busyslots) {
1474	    msleep(sc, &sc->amr_hw_lock, PRIBIO | PCATCH, "amrpoll", hz);
1475	    if(count++>10) {
1476		break;
1477	    }
1478	}
1479
1480	if(sc->amr_busyslots) {
1481	    device_printf(sc->amr_dev, "adapter is busy\n");
1482	    mtx_unlock(&sc->amr_hw_lock);
1483	    if (ac->ac_data != NULL) {
1484		if (AC_IS_SG64(ac))
1485		    bus_dmamap_unload(sc->amr_buffer64_dmat, ac->ac_dma64map);
1486		else
1487		    bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_dmamap);
1488	    }
1489    	    ac->ac_status=0;
1490	    return(1);
1491	}
1492    }
1493
1494    bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE);
1495
1496    /* clear the poll/ack fields in the mailbox */
1497    sc->amr_mailbox->mb_ident = 0xFE;
1498    sc->amr_mailbox->mb_nstatus = 0xFF;
1499    sc->amr_mailbox->mb_status = 0xFF;
1500    sc->amr_mailbox->mb_poll = 0;
1501    sc->amr_mailbox->mb_ack = 0;
1502    sc->amr_mailbox->mb_busy = 1;
1503
1504    AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
1505
1506    while(sc->amr_mailbox->mb_nstatus == 0xFF)
1507	DELAY(1);
1508    while(sc->amr_mailbox->mb_status == 0xFF)
1509	DELAY(1);
1510    ac->ac_status=sc->amr_mailbox->mb_status;
1511    error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0;
1512    while(sc->amr_mailbox->mb_poll != 0x77)
1513	DELAY(1);
1514    sc->amr_mailbox->mb_poll = 0;
1515    sc->amr_mailbox->mb_ack = 0x77;
1516
1517    /* acknowledge that we have the commands */
1518    AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK);
1519    while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
1520	DELAY(1);
1521    mtx_unlock(&sc->amr_hw_lock);
1522
1523    /* unmap the command's data buffer */
1524    if (ac->ac_flags & AMR_CMD_DATAIN) {
1525	bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,
1526	    BUS_DMASYNC_POSTREAD);
1527    }
1528    if (ac->ac_flags & AMR_CMD_DATAOUT) {
1529	bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,
1530	    BUS_DMASYNC_POSTWRITE);
1531    }
1532    if (AC_IS_SG64(ac))
1533	bus_dmamap_unload(sc->amr_buffer64_dmat, ac->ac_dma64map);
1534    else
1535	bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_dmamap);
1536
1537    return(error);
1538}
1539
1540static __inline int
1541amr_freeslot(struct amr_command *ac)
1542{
1543    struct amr_softc *sc = ac->ac_sc;
1544    int			slot;
1545
1546    debug_called(3);
1547
1548    slot = ac->ac_slot;
1549    if (sc->amr_busycmd[slot] == NULL)
1550	panic("amr: slot %d not busy?\n", slot);
1551
1552    sc->amr_busycmd[slot] = NULL;
1553    atomic_subtract_int(&sc->amr_busyslots, 1);
1554
1555    return (0);
1556}
1557
1558/********************************************************************************
1559 * Map/unmap (ac)'s data in the controller's addressable space as required.
1560 *
1561 * These functions may be safely called multiple times on a given command.
1562 */
1563static void
1564amr_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1565{
1566    struct amr_command	*ac = (struct amr_command *)arg;
1567    struct amr_sgentry	*sg;
1568    int			i;
1569    u_int8_t		*sgc;
1570
1571    debug_called(3);
1572
1573    /* get base address of s/g table */
1574    sg = ac->ac_sg.sg32;
1575
1576    /* save data physical address */
1577
1578    /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1579    if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG && (
1580	 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG ||
1581	 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)) {
1582	sgc = &(((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param);
1583    } else {
1584	sgc = &ac->ac_mailbox.mb_nsgelem;
1585    }
1586
1587    /* decide whether we need to populate the s/g table */
1588    if (nsegments < 2) {
1589	*sgc = 0;
1590	ac->ac_mailbox.mb_nsgelem = 0;
1591	ac->ac_mailbox.mb_physaddr = segs[0].ds_addr;
1592    } else {
1593        ac->ac_mailbox.mb_nsgelem = nsegments;
1594	*sgc = nsegments;
1595	/* XXX Setting these to 0 might not be needed. */
1596	ac->ac_sg64_lo = 0;
1597	ac->ac_sg64_hi = 0;
1598	ac->ac_mailbox.mb_physaddr = ac->ac_sgbusaddr;
1599	for (i = 0; i < nsegments; i++, sg++) {
1600	    sg->sg_addr = segs[i].ds_addr;
1601	    sg->sg_count = segs[i].ds_len;
1602	}
1603    }
1604
1605}
1606
1607static void
1608amr_setup_dma64map(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1609{
1610    struct amr_command	*ac = (struct amr_command *)arg;
1611    struct amr_sg64entry *sg;
1612    int			i;
1613    u_int8_t		*sgc;
1614
1615    debug_called(3);
1616
1617    /* get base address of s/g table */
1618    sg = ac->ac_sg.sg64;
1619
1620    /* save data physical address */
1621
1622    /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1623    if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG && (
1624	 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG ||
1625	 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)) {
1626	sgc = &(((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param);
1627    } else {
1628	sgc = &ac->ac_mailbox.mb_nsgelem;
1629    }
1630
1631    ac->ac_mailbox.mb_nsgelem = nsegments;
1632    *sgc = nsegments;
1633    ac->ac_sg64_hi = 0;
1634    ac->ac_sg64_lo = ac->ac_sgbusaddr;
1635    ac->ac_mailbox.mb_physaddr = 0xffffffff;
1636    for (i = 0; i < nsegments; i++, sg++) {
1637	sg->sg_addr = segs[i].ds_addr;
1638	sg->sg_count = segs[i].ds_len;
1639    }
1640}
1641
1642static void
1643amr_setup_ccbmap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1644{
1645    struct amr_command          *ac = (struct amr_command *)arg;
1646    struct amr_softc            *sc = ac->ac_sc;
1647    struct amr_sgentry          *sg;
1648    struct amr_passthrough      *ap = (struct amr_passthrough *)ac->ac_data;
1649    struct amr_ext_passthrough	*aep = (struct amr_ext_passthrough *)ac->ac_data;
1650    int                         i;
1651
1652    /* get base address of s/g table */
1653    sg = ac->ac_sg.sg32;
1654
1655    /* decide whether we need to populate the s/g table */
1656    if( ac->ac_mailbox.mb_command == AMR_CMD_EXTPASS ) {
1657	if (nsegments < 2) {
1658	    aep->ap_no_sg_elements = 0;
1659	    aep->ap_data_transfer_address =  segs[0].ds_addr;
1660	} else {
1661	    /* save s/g table information in passthrough */
1662	    aep->ap_no_sg_elements = nsegments;
1663	    aep->ap_data_transfer_address = ac->ac_sgbusaddr;
1664	    /*
1665	     * populate s/g table (overwrites previous call which mapped the
1666	     * passthrough)
1667	     */
1668	    for (i = 0; i < nsegments; i++, sg++) {
1669		sg->sg_addr = segs[i].ds_addr;
1670		sg->sg_count = segs[i].ds_len;
1671		debug(3, " %d: 0x%x/%d", i, sg->sg_addr, sg->sg_count);
1672	    }
1673	}
1674	debug(3, "slot %d  %d segments at 0x%x\n", ac->ac_slot,
1675	    aep->ap_no_sg_elements, aep->ap_data_transfer_address);
1676    } else {
1677	if (nsegments < 2) {
1678	    ap->ap_no_sg_elements = 0;
1679	    ap->ap_data_transfer_address =  segs[0].ds_addr;
1680	} else {
1681	    /* save s/g table information in passthrough */
1682	    ap->ap_no_sg_elements = nsegments;
1683	    ap->ap_data_transfer_address = ac->ac_sgbusaddr;
1684	    /*
1685	     * populate s/g table (overwrites previous call which mapped the
1686	     * passthrough)
1687	     */
1688	    for (i = 0; i < nsegments; i++, sg++) {
1689		sg->sg_addr = segs[i].ds_addr;
1690		sg->sg_count = segs[i].ds_len;
1691		debug(3, " %d: 0x%x/%d", i, sg->sg_addr, sg->sg_count);
1692	    }
1693	}
1694	debug(3, "slot %d  %d segments at 0x%x\n", ac->ac_slot,
1695	    ap->ap_no_sg_elements, ap->ap_data_transfer_address);
1696    }
1697    if (ac->ac_flags & AMR_CMD_CCB_DATAIN)
1698	bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap,
1699	    BUS_DMASYNC_PREREAD);
1700    if (ac->ac_flags & AMR_CMD_CCB_DATAOUT)
1701	bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap,
1702	    BUS_DMASYNC_PREWRITE);
1703    if ((ac->ac_flags & (AMR_CMD_CCB_DATAIN | AMR_CMD_CCB_DATAOUT)) == 0)
1704	panic("no direction for ccb?\n");
1705
1706    if (ac->ac_flags & AMR_CMD_DATAIN)
1707	bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,BUS_DMASYNC_PREREAD);
1708    if (ac->ac_flags & AMR_CMD_DATAOUT)
1709	bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,BUS_DMASYNC_PREWRITE);
1710
1711    ac->ac_flags |= AMR_CMD_MAPPED;
1712
1713    if (sc->amr_submit_command(ac) == EBUSY) {
1714	amr_freeslot(ac);
1715	amr_requeue_ready(ac);
1716    }
1717}
1718
1719static void
1720amr_setup_ccb64map(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1721{
1722    struct amr_command          *ac = (struct amr_command *)arg;
1723    struct amr_softc            *sc = ac->ac_sc;
1724    struct amr_sg64entry        *sg;
1725    struct amr_passthrough      *ap = (struct amr_passthrough *)ac->ac_data;
1726    struct amr_ext_passthrough	*aep = (struct amr_ext_passthrough *)ac->ac_data;
1727    int                         i;
1728
1729    /* get base address of s/g table */
1730    sg = ac->ac_sg.sg64;
1731
1732    /* decide whether we need to populate the s/g table */
1733    if( ac->ac_mailbox.mb_command == AMR_CMD_EXTPASS ) {
1734	/* save s/g table information in passthrough */
1735	aep->ap_no_sg_elements = nsegments;
1736	aep->ap_data_transfer_address = ac->ac_sgbusaddr;
1737	/*
1738	 * populate s/g table (overwrites previous call which mapped the
1739	 * passthrough)
1740	 */
1741	for (i = 0; i < nsegments; i++, sg++) {
1742	    sg->sg_addr = segs[i].ds_addr;
1743	    sg->sg_count = segs[i].ds_len;
1744	    debug(3, " %d: 0x%x/%d", i, sg->sg_addr, sg->sg_count);
1745	}
1746	debug(3, "slot %d  %d segments at 0x%x\n", ac->ac_slot,
1747	    aep->ap_no_sg_elements, aep->ap_data_transfer_address);
1748    } else {
1749	/* save s/g table information in passthrough */
1750	ap->ap_no_sg_elements = nsegments;
1751	ap->ap_data_transfer_address = ac->ac_sgbusaddr;
1752	/*
1753	 * populate s/g table (overwrites previous call which mapped the
1754	 * passthrough)
1755	 */
1756	for (i = 0; i < nsegments; i++, sg++) {
1757	    sg->sg_addr = segs[i].ds_addr;
1758	    sg->sg_count = segs[i].ds_len;
1759	    debug(3, " %d: 0x%x/%d", i, sg->sg_addr, sg->sg_count);
1760	}
1761	debug(3, "slot %d  %d segments at 0x%x\n", ac->ac_slot,
1762	    ap->ap_no_sg_elements, ap->ap_data_transfer_address);
1763    }
1764    if (ac->ac_flags & AMR_CMD_CCB_DATAIN)
1765	bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_ccb_dma64map,
1766	    BUS_DMASYNC_PREREAD);
1767    if (ac->ac_flags & AMR_CMD_CCB_DATAOUT)
1768	bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_ccb_dma64map,
1769	    BUS_DMASYNC_PREWRITE);
1770    if ((ac->ac_flags & (AMR_CMD_CCB_DATAIN | AMR_CMD_CCB_DATAOUT)) == 0)
1771	panic("no direction for ccb?\n");
1772
1773    if (ac->ac_flags & AMR_CMD_DATAIN)
1774	bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_dma64map,
1775	    BUS_DMASYNC_PREREAD);
1776    if (ac->ac_flags & AMR_CMD_DATAOUT)
1777	bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_dma64map,
1778	    BUS_DMASYNC_PREWRITE);
1779
1780    ac->ac_flags |= AMR_CMD_MAPPED;
1781
1782    if (sc->amr_submit_command(ac) == EBUSY) {
1783	amr_freeslot(ac);
1784	amr_requeue_ready(ac);
1785    }
1786}
1787
1788static int
1789amr_mapcmd(struct amr_command *ac)
1790{
1791    bus_dma_tag_t	tag;
1792    bus_dmamap_t	datamap, ccbmap;
1793    bus_dmamap_callback_t *cb;
1794    bus_dmamap_callback_t *ccb_cb;
1795    struct amr_softc	*sc = ac->ac_sc;
1796
1797    debug_called(3);
1798
1799    if (AC_IS_SG64(ac)) {
1800	tag = sc->amr_buffer64_dmat;
1801	datamap = ac->ac_dma64map;
1802	ccbmap = ac->ac_ccb_dma64map;
1803	cb = amr_setup_dma64map;
1804	ccb_cb = amr_setup_ccb64map;
1805    } else {
1806	tag = sc->amr_buffer_dmat;
1807	datamap = ac->ac_dmamap;
1808	ccbmap = ac->ac_ccb_dmamap;
1809	cb = amr_setup_dmamap;
1810	ccb_cb = amr_setup_ccbmap;
1811    }
1812
1813    /* if the command involves data at all, and hasn't been mapped */
1814    if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) {
1815	if (ac->ac_ccb_data == NULL) {
1816	    /* map the data buffers into bus space and build the s/g list */
1817	    if (bus_dmamap_load(tag, datamap, ac->ac_data, ac->ac_length,
1818		amr_setup_data_dmamap, ac, 0) == EINPROGRESS) {
1819		sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1820	    }
1821	} else {
1822	    if (bus_dmamap_load(tag, datamap, ac->ac_data, ac->ac_length,
1823		cb, ac, BUS_DMA_NOWAIT) != 0) {
1824		return (ENOMEM);
1825	    }
1826	    if (bus_dmamap_load(tag, ccbmap, ac->ac_ccb_data,
1827		ac->ac_ccb_length, ccb_cb, ac, 0) == EINPROGRESS) {
1828		sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1829	    }
1830     }
1831   } else {
1832    	if (sc->amr_submit_command(ac) == EBUSY) {
1833	    amr_freeslot(ac);
1834	    amr_requeue_ready(ac);
1835	}
1836   }
1837
1838    return (0);
1839}
1840
1841static void
1842amr_unmapcmd(struct amr_command *ac)
1843{
1844    struct amr_softc	*sc = ac->ac_sc;
1845    int			flag;
1846
1847    debug_called(3);
1848
1849    /* if the command involved data at all and was mapped */
1850    if (ac->ac_flags & AMR_CMD_MAPPED) {
1851
1852	if (ac->ac_data != NULL) {
1853
1854	    flag = 0;
1855	    if (ac->ac_flags & AMR_CMD_DATAIN)
1856		flag |= BUS_DMASYNC_POSTREAD;
1857	    if (ac->ac_flags & AMR_CMD_DATAOUT)
1858		flag |= BUS_DMASYNC_POSTWRITE;
1859
1860	    if (AC_IS_SG64(ac)) {
1861		bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_dma64map, flag);
1862		bus_dmamap_unload(sc->amr_buffer64_dmat, ac->ac_dma64map);
1863	    } else {
1864		bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_dmamap, flag);
1865		bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_dmamap);
1866	    }
1867	}
1868
1869	if (ac->ac_ccb_data != NULL) {
1870
1871	    flag = 0;
1872	    if (ac->ac_flags & AMR_CMD_CCB_DATAIN)
1873		flag |= BUS_DMASYNC_POSTREAD;
1874	    if (ac->ac_flags & AMR_CMD_CCB_DATAOUT)
1875		flag |= BUS_DMASYNC_POSTWRITE;
1876
1877	    if (AC_IS_SG64(ac)) {
1878		bus_dmamap_sync(sc->amr_buffer64_dmat,ac->ac_ccb_dma64map,flag);
1879		bus_dmamap_unload(sc->amr_buffer64_dmat, ac->ac_ccb_dma64map);
1880	    } else {
1881		bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap, flag);
1882		bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_ccb_dmamap);
1883	    }
1884	}
1885	ac->ac_flags &= ~AMR_CMD_MAPPED;
1886    }
1887}
1888
1889static void
1890amr_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1891{
1892    struct amr_command *ac = arg;
1893    struct amr_softc *sc = ac->ac_sc;
1894    int flags;
1895
1896    flags = 0;
1897    if (ac->ac_flags & AMR_CMD_DATAIN)
1898	flags |= BUS_DMASYNC_PREREAD;
1899    if (ac->ac_flags & AMR_CMD_DATAOUT)
1900	flags |= BUS_DMASYNC_PREWRITE;
1901
1902    if (AC_IS_SG64(ac)) {
1903	amr_setup_dma64map(arg, segs, nsegs, err);
1904	bus_dmamap_sync(sc->amr_buffer64_dmat,ac->ac_dma64map, flags);
1905    } else {
1906	amr_setup_dmamap(arg, segs, nsegs, err);
1907	bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap, flags);
1908    }
1909    ac->ac_flags |= AMR_CMD_MAPPED;
1910
1911    if (sc->amr_submit_command(ac) == EBUSY) {
1912	amr_freeslot(ac);
1913	amr_requeue_ready(ac);
1914    }
1915}
1916
1917/********************************************************************************
1918 * Take a command and give it to the controller, returns 0 if successful, or
1919 * EBUSY if the command should be retried later.
1920 */
1921static int
1922amr_start(struct amr_command *ac)
1923{
1924    struct amr_softc *sc;
1925    int error = 0;
1926    int slot;
1927
1928    debug_called(3);
1929
1930    /* mark command as busy so that polling consumer can tell */
1931    sc = ac->ac_sc;
1932    ac->ac_flags |= AMR_CMD_BUSY;
1933
1934    /* get a command slot (freed in amr_done) */
1935    slot = ac->ac_slot;
1936    if (sc->amr_busycmd[slot] != NULL)
1937	panic("amr: slot %d busy?\n", slot);
1938    sc->amr_busycmd[slot] = ac;
1939    atomic_add_int(&sc->amr_busyslots, 1);
1940
1941    /* Now we have a slot, we can map the command (unmapped in amr_complete). */
1942    if ((error = amr_mapcmd(ac)) == ENOMEM) {
1943	/*
1944	 * Memroy resources are short, so free the slot and let this be tried
1945	 * later.
1946	 */
1947	amr_freeslot(ac);
1948    }
1949
1950    return (error);
1951}
1952
1953/********************************************************************************
1954 * Extract one or more completed commands from the controller (sc)
1955 *
1956 * Returns nonzero if any commands on the work queue were marked as completed.
1957 */
1958
1959int
1960amr_done(struct amr_softc *sc)
1961{
1962    struct amr_command	*ac;
1963    struct amr_mailbox	mbox;
1964    int			i, idx, result;
1965
1966    debug_called(3);
1967
1968    /* See if there's anything for us to do */
1969    result = 0;
1970
1971    /* loop collecting completed commands */
1972    for (;;) {
1973	/* poll for a completed command's identifier and status */
1974	if (sc->amr_get_work(sc, &mbox)) {
1975	    result = 1;
1976
1977	    /* iterate over completed commands in this result */
1978	    for (i = 0; i < mbox.mb_nstatus; i++) {
1979		/* get pointer to busy command */
1980		idx = mbox.mb_completed[i] - 1;
1981		ac = sc->amr_busycmd[idx];
1982
1983		/* really a busy command? */
1984		if (ac != NULL) {
1985
1986		    /* pull the command from the busy index */
1987		    amr_freeslot(ac);
1988
1989		    /* save status for later use */
1990		    ac->ac_status = mbox.mb_status;
1991		    amr_enqueue_completed(ac);
1992		    debug(3, "completed command with status %x", mbox.mb_status);
1993		} else {
1994		    device_printf(sc->amr_dev, "bad slot %d completed\n", idx);
1995		}
1996	    }
1997	} else
1998	    break;	/* no work */
1999    }
2000
2001    /* handle completion and timeouts */
2002    amr_complete(sc, 0);
2003
2004    return(result);
2005}
2006
2007/********************************************************************************
2008 * Do completion processing on done commands on (sc)
2009 */
2010
2011static void
2012amr_complete(void *context, int pending)
2013{
2014    struct amr_softc	*sc = (struct amr_softc *)context;
2015    struct amr_command	*ac;
2016
2017    debug_called(3);
2018
2019    /* pull completed commands off the queue */
2020    for (;;) {
2021	ac = amr_dequeue_completed(sc);
2022	if (ac == NULL)
2023	    break;
2024
2025	/* unmap the command's data buffer */
2026	amr_unmapcmd(ac);
2027
2028	/*
2029	 * Is there a completion handler?
2030	 */
2031	if (ac->ac_complete != NULL) {
2032	    /* unbusy the command */
2033	    ac->ac_flags &= ~AMR_CMD_BUSY;
2034	    ac->ac_complete(ac);
2035
2036	    /*
2037	     * Is someone sleeping on this one?
2038	     */
2039	} else {
2040	    mtx_lock(&sc->amr_list_lock);
2041	    ac->ac_flags &= ~AMR_CMD_BUSY;
2042	    if (ac->ac_flags & AMR_CMD_SLEEP) {
2043		/* unbusy the command */
2044		wakeup(ac);
2045	    }
2046	    mtx_unlock(&sc->amr_list_lock);
2047	}
2048
2049	if(!sc->amr_busyslots) {
2050	    wakeup(sc);
2051	}
2052    }
2053
2054    mtx_lock(&sc->amr_list_lock);
2055    sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
2056    amr_startio(sc);
2057    mtx_unlock(&sc->amr_list_lock);
2058}
2059
2060/********************************************************************************
2061 ********************************************************************************
2062                                                        Command Buffer Management
2063 ********************************************************************************
2064 ********************************************************************************/
2065
2066/********************************************************************************
2067 * Get a new command buffer.
2068 *
2069 * This may return NULL in low-memory cases.
2070 *
2071 * If possible, we recycle a command buffer that's been used before.
2072 */
2073struct amr_command *
2074amr_alloccmd(struct amr_softc *sc)
2075{
2076    struct amr_command	*ac;
2077
2078    debug_called(3);
2079
2080    ac = amr_dequeue_free(sc);
2081    if (ac == NULL) {
2082	amr_alloccmd_cluster(sc);
2083	ac = amr_dequeue_free(sc);
2084    }
2085    if (ac == NULL) {
2086	sc->amr_state |= AMR_STATE_QUEUE_FRZN;
2087	return(NULL);
2088    }
2089
2090    /* clear out significant fields */
2091    ac->ac_status = 0;
2092    bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox));
2093    ac->ac_flags = 0;
2094    ac->ac_bio = NULL;
2095    ac->ac_data = NULL;
2096    ac->ac_ccb_data = NULL;
2097    ac->ac_complete = NULL;
2098    return(ac);
2099}
2100
2101/********************************************************************************
2102 * Release a command buffer for recycling.
2103 */
2104void
2105amr_releasecmd(struct amr_command *ac)
2106{
2107    debug_called(3);
2108
2109    amr_enqueue_free(ac);
2110}
2111
2112/********************************************************************************
2113 * Allocate a new command cluster and initialise it.
2114 */
2115static void
2116amr_alloccmd_cluster(struct amr_softc *sc)
2117{
2118    struct amr_command_cluster	*acc;
2119    struct amr_command		*ac;
2120    int				i, nextslot;
2121
2122    if (sc->amr_nextslot > sc->amr_maxio)
2123	return;
2124    acc = malloc(AMR_CMD_CLUSTERSIZE, M_DEVBUF, M_NOWAIT | M_ZERO);
2125    if (acc != NULL) {
2126	nextslot = sc->amr_nextslot;
2127	TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link);
2128	for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2129	    ac = &acc->acc_command[i];
2130	    ac->ac_sc = sc;
2131	    ac->ac_slot = nextslot;
2132
2133	    /*
2134	     * The SG table for each slot is a fixed size and is assumed to
2135	     * to hold 64-bit s/g objects when the driver is configured to do
2136	     * 64-bit DMA.  32-bit DMA commands still use the same table, but
2137	     * cast down to 32-bit objects.
2138	     */
2139	    if (AMR_IS_SG64(sc)) {
2140		ac->ac_sgbusaddr = sc->amr_sgbusaddr +
2141		    (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry));
2142	        ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG);
2143	    } else {
2144		ac->ac_sgbusaddr = sc->amr_sgbusaddr +
2145		    (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry));
2146	        ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2147	    }
2148
2149	    if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap) ||
2150		bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_ccb_dmamap) ||
2151		(AMR_IS_SG64(sc) &&
2152		(bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map) ||
2153		bus_dmamap_create(sc->amr_buffer64_dmat, 0, &ac->ac_ccb_dma64map))))
2154		    break;
2155	    amr_releasecmd(ac);
2156	    if (++nextslot > sc->amr_maxio)
2157		break;
2158	}
2159	sc->amr_nextslot = nextslot;
2160    }
2161}
2162
2163/********************************************************************************
2164 * Free a command cluster
2165 */
2166static void
2167amr_freecmd_cluster(struct amr_command_cluster *acc)
2168{
2169    struct amr_softc	*sc = acc->acc_command[0].ac_sc;
2170    int			i;
2171
2172    for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2173	bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap);
2174	bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_ccb_dmamap);
2175	if (AMR_IS_SG64(sc))
2176		bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map);
2177		bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_ccb_dma64map);
2178    }
2179    free(acc, M_DEVBUF);
2180}
2181
2182/********************************************************************************
2183 ********************************************************************************
2184                                                         Interface-specific Shims
2185 ********************************************************************************
2186 ********************************************************************************/
2187
2188/********************************************************************************
2189 * Tell the controller that the mailbox contains a valid command
2190 */
2191static int
2192amr_quartz_submit_command(struct amr_command *ac)
2193{
2194    struct amr_softc	*sc = ac->ac_sc;
2195    int			i = 0;
2196
2197    mtx_lock(&sc->amr_hw_lock);
2198    while (sc->amr_mailbox->mb_busy && (i++ < 10))
2199        DELAY(1);
2200    if (sc->amr_mailbox->mb_busy) {
2201	mtx_unlock(&sc->amr_hw_lock);
2202	return (EBUSY);
2203    }
2204
2205    /*
2206     * Save the slot number so that we can locate this command when complete.
2207     * Note that ident = 0 seems to be special, so we don't use it.
2208     */
2209    ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2210    bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2211    sc->amr_mailbox->mb_busy = 1;
2212    sc->amr_mailbox->mb_poll = 0;
2213    sc->amr_mailbox->mb_ack  = 0;
2214    sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi;
2215    sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo;
2216
2217    AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
2218    mtx_unlock(&sc->amr_hw_lock);
2219    return(0);
2220}
2221
2222static int
2223amr_std_submit_command(struct amr_command *ac)
2224{
2225    struct amr_softc	*sc = ac->ac_sc;
2226
2227    mtx_lock(&sc->amr_hw_lock);
2228    if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) {
2229	mtx_unlock(&sc->amr_hw_lock);
2230	return (EBUSY);
2231    }
2232
2233    /*
2234     * Save the slot number so that we can locate this command when complete.
2235     * Note that ident = 0 seems to be special, so we don't use it.
2236     */
2237    ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2238    bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2239    sc->amr_mailbox->mb_busy = 1;
2240    sc->amr_mailbox->mb_poll = 0;
2241    sc->amr_mailbox->mb_ack  = 0;
2242
2243    AMR_SPOST_COMMAND(sc);
2244    mtx_unlock(&sc->amr_hw_lock);
2245    return(0);
2246}
2247
2248/********************************************************************************
2249 * Claim any work that the controller has completed; acknowledge completion,
2250 * save details of the completion in (mbsave)
2251 */
2252static int
2253amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2254{
2255    int		worked, i;
2256    u_int32_t	outd;
2257    u_int8_t	nstatus;
2258    u_int8_t	completed[46];
2259
2260    debug_called(3);
2261
2262    worked = 0;
2263
2264    /* work waiting for us? */
2265    if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) {
2266
2267	/* acknowledge interrupt */
2268	AMR_QPUT_ODB(sc, AMR_QODB_READY);
2269
2270	while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff)
2271	    DELAY(1);
2272	sc->amr_mailbox->mb_nstatus = 0xff;
2273
2274	/* wait until fw wrote out all completions */
2275	for (i = 0; i < nstatus; i++) {
2276	    while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff)
2277		DELAY(1);
2278	    sc->amr_mailbox->mb_completed[i] = 0xff;
2279	}
2280
2281	/* Save information for later processing */
2282	mbsave->mb_nstatus = nstatus;
2283	mbsave->mb_status = sc->amr_mailbox->mb_status;
2284	sc->amr_mailbox->mb_status = 0xff;
2285
2286	for (i = 0; i < nstatus; i++)
2287	    mbsave->mb_completed[i] = completed[i];
2288
2289	/* acknowledge that we have the commands */
2290	AMR_QPUT_IDB(sc, AMR_QIDB_ACK);
2291
2292#if 0
2293#ifndef AMR_QUARTZ_GOFASTER
2294	/*
2295	 * This waits for the controller to notice that we've taken the
2296	 * command from it.  It's very inefficient, and we shouldn't do it,
2297	 * but if we remove this code, we stop completing commands under
2298	 * load.
2299	 *
2300	 * Peter J says we shouldn't do this.  The documentation says we
2301	 * should.  Who is right?
2302	 */
2303	while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
2304	    ;				/* XXX aiee! what if it dies? */
2305#endif
2306#endif
2307
2308	worked = 1;			/* got some work */
2309    }
2310
2311    return(worked);
2312}
2313
2314static int
2315amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2316{
2317    int		worked;
2318    u_int8_t	istat;
2319
2320    debug_called(3);
2321
2322    worked = 0;
2323
2324    /* check for valid interrupt status */
2325    istat = AMR_SGET_ISTAT(sc);
2326    if ((istat & AMR_SINTR_VALID) != 0) {
2327	AMR_SPUT_ISTAT(sc, istat);	/* ack interrupt status */
2328
2329	/* save mailbox, which contains a list of completed commands */
2330	bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave));
2331
2332	AMR_SACK_INTERRUPT(sc);		/* acknowledge we have the mailbox */
2333	worked = 1;
2334    }
2335
2336    return(worked);
2337}
2338
2339/********************************************************************************
2340 * Notify the controller of the mailbox location.
2341 */
2342static void
2343amr_std_attach_mailbox(struct amr_softc *sc)
2344{
2345
2346    /* program the mailbox physical address */
2347    AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys         & 0xff);
2348    AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >>  8) & 0xff);
2349    AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff);
2350    AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff);
2351    AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR);
2352
2353    /* clear any outstanding interrupt and enable interrupts proper */
2354    AMR_SACK_INTERRUPT(sc);
2355    AMR_SENABLE_INTR(sc);
2356}
2357
2358#ifdef AMR_BOARD_INIT
2359/********************************************************************************
2360 * Initialise the controller
2361 */
2362static int
2363amr_quartz_init(struct amr_softc *sc)
2364{
2365    int		status, ostatus;
2366
2367    device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc));
2368
2369    AMR_QRESET(sc);
2370
2371    ostatus = 0xff;
2372    while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) {
2373	if (status != ostatus) {
2374	    device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status));
2375	    ostatus = status;
2376	}
2377	switch (status) {
2378	case AMR_QINIT_NOMEM:
2379	    return(ENOMEM);
2380
2381	case AMR_QINIT_SCAN:
2382	    /* XXX we could print channel/target here */
2383	    break;
2384	}
2385    }
2386    return(0);
2387}
2388
2389static int
2390amr_std_init(struct amr_softc *sc)
2391{
2392    int		status, ostatus;
2393
2394    device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc));
2395
2396    AMR_SRESET(sc);
2397
2398    ostatus = 0xff;
2399    while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) {
2400	if (status != ostatus) {
2401	    device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status));
2402	    ostatus = status;
2403	}
2404	switch (status) {
2405	case AMR_SINIT_NOMEM:
2406	    return(ENOMEM);
2407
2408	case AMR_SINIT_INPROG:
2409	    /* XXX we could print channel/target here? */
2410	    break;
2411	}
2412    }
2413    return(0);
2414}
2415#endif
2416
2417/********************************************************************************
2418 ********************************************************************************
2419                                                                        Debugging
2420 ********************************************************************************
2421 ********************************************************************************/
2422
2423/********************************************************************************
2424 * Identify the controller and print some information about it.
2425 */
2426static void
2427amr_describe_controller(struct amr_softc *sc)
2428{
2429    struct amr_prodinfo	*ap;
2430    struct amr_enquiry	*ae;
2431    char		*prod;
2432    int			status;
2433
2434    /*
2435     * Try to get 40LD product info, which tells us what the card is labelled as.
2436     */
2437    if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) {
2438	device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n",
2439		      ap->ap_product, ap->ap_firmware, ap->ap_bios,
2440		      ap->ap_memsize);
2441
2442	free(ap, M_DEVBUF);
2443	return;
2444    }
2445
2446    /*
2447     * Try 8LD extended ENQUIRY to get controller signature, and use lookup table.
2448     */
2449    if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) {
2450	prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature);
2451
2452    } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) {
2453
2454	/*
2455	 * Try to work it out based on the PCI signatures.
2456	 */
2457	switch (pci_get_device(sc->amr_dev)) {
2458	case 0x9010:
2459	    prod = "Series 428";
2460	    break;
2461	case 0x9060:
2462	    prod = "Series 434";
2463	    break;
2464	default:
2465	    prod = "unknown controller";
2466	    break;
2467	}
2468    } else {
2469	device_printf(sc->amr_dev, "<unsupported controller>\n");
2470	return;
2471    }
2472
2473    /*
2474     * HP NetRaid controllers have a special encoding of the firmware and
2475     * BIOS versions. The AMI version seems to have it as strings whereas
2476     * the HP version does it with a leading uppercase character and two
2477     * binary numbers.
2478     */
2479
2480    if(ae->ae_adapter.aa_firmware[2] >= 'A' &&
2481       ae->ae_adapter.aa_firmware[2] <= 'Z' &&
2482       ae->ae_adapter.aa_firmware[1] <  ' ' &&
2483       ae->ae_adapter.aa_firmware[0] <  ' ' &&
2484       ae->ae_adapter.aa_bios[2] >= 'A'     &&
2485       ae->ae_adapter.aa_bios[2] <= 'Z'     &&
2486       ae->ae_adapter.aa_bios[1] <  ' '     &&
2487       ae->ae_adapter.aa_bios[0] <  ' ') {
2488
2489	/* this looks like we have an HP NetRaid version of the MegaRaid */
2490
2491    	if(ae->ae_signature == AMR_SIG_438) {
2492    		/* the AMI 438 is a NetRaid 3si in HP-land */
2493    		prod = "HP NetRaid 3si";
2494    	}
2495
2496	device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n",
2497		      prod, ae->ae_adapter.aa_firmware[2],
2498		      ae->ae_adapter.aa_firmware[1],
2499		      ae->ae_adapter.aa_firmware[0],
2500		      ae->ae_adapter.aa_bios[2],
2501		      ae->ae_adapter.aa_bios[1],
2502		      ae->ae_adapter.aa_bios[0],
2503		      ae->ae_adapter.aa_memorysize);
2504    } else {
2505	device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n",
2506		      prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios,
2507		      ae->ae_adapter.aa_memorysize);
2508    }
2509    free(ae, M_DEVBUF);
2510}
2511
2512int
2513amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks)
2514{
2515    struct amr_command	*ac;
2516    int			error = EIO;
2517
2518    debug_called(1);
2519
2520    sc->amr_state |= AMR_STATE_INTEN;
2521
2522    /* get ourselves a command buffer */
2523    if ((ac = amr_alloccmd(sc)) == NULL)
2524	goto out;
2525    /* set command flags */
2526    ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
2527
2528    /* point the command at our data */
2529    ac->ac_data = data;
2530    ac->ac_length = blks * AMR_BLKSIZE;
2531
2532    /* build the command proper */
2533    ac->ac_mailbox.mb_command 	= AMR_CMD_LWRITE;
2534    ac->ac_mailbox.mb_blkcount	= blks;
2535    ac->ac_mailbox.mb_lba	= lba;
2536    ac->ac_mailbox.mb_drive	= unit;
2537
2538    /* can't assume that interrupts are going to work here, so play it safe */
2539    if (sc->amr_poll_command(ac))
2540	goto out;
2541    error = ac->ac_status;
2542
2543 out:
2544    if (ac != NULL)
2545	amr_releasecmd(ac);
2546
2547    sc->amr_state &= ~AMR_STATE_INTEN;
2548    return (error);
2549}
2550
2551
2552
2553#ifdef AMR_DEBUG
2554/********************************************************************************
2555 * Print the command (ac) in human-readable format
2556 */
2557#if 0
2558static void
2559amr_printcommand(struct amr_command *ac)
2560{
2561    struct amr_softc	*sc = ac->ac_sc;
2562    struct amr_sgentry	*sg;
2563    int			i;
2564
2565    device_printf(sc->amr_dev, "cmd %x  ident %d  drive %d\n",
2566		  ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive);
2567    device_printf(sc->amr_dev, "blkcount %d  lba %d\n",
2568		  ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba);
2569    device_printf(sc->amr_dev, "virtaddr %p  length %lu\n", ac->ac_data, (unsigned long)ac->ac_length);
2570    device_printf(sc->amr_dev, "sg physaddr %08x  nsg %d\n",
2571		  ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem);
2572    device_printf(sc->amr_dev, "ccb %p  bio %p\n", ac->ac_ccb_data, ac->ac_bio);
2573
2574    /* get base address of s/g table */
2575    sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2576    for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++)
2577	device_printf(sc->amr_dev, "  %x/%d\n", sg->sg_addr, sg->sg_count);
2578}
2579#endif
2580#endif
2581