1/*-
2 * Copyright (c) 1999,2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * Copyright (c) 2005 Scott Long
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*-
29 * Copyright (c) 2002 Eric Moore
30 * Copyright (c) 2002, 2004 LSI Logic Corporation
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 *    notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 *    notice, this list of conditions and the following disclaimer in the
40 *    documentation and/or other materials provided with the distribution.
41 * 3. The party using or redistributing the source code and binary forms
42 *    agrees to the disclaimer below and the terms and conditions set forth
43 *    herein.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58#include <sys/cdefs.h>
59__FBSDID("$FreeBSD$");
60
61/*
62 * Driver for the AMI MegaRaid family of controllers.
63 */
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/malloc.h>
68#include <sys/kernel.h>
69#include <sys/proc.h>
70#include <sys/sysctl.h>
71
72#include <sys/bio.h>
73#include <sys/bus.h>
74#include <sys/conf.h>
75#include <sys/stat.h>
76
77#include <machine/bus.h>
78#include <machine/cpu.h>
79#include <machine/resource.h>
80#include <sys/rman.h>
81
82#include <dev/pci/pcireg.h>
83#include <dev/pci/pcivar.h>
84
85#include <dev/amr/amrio.h>
86#include <dev/amr/amrreg.h>
87#include <dev/amr/amrvar.h>
88#define AMR_DEFINE_TABLES
89#include <dev/amr/amr_tables.h>
90
91SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD, 0, "AMR driver parameters");
92
93static d_open_t         amr_open;
94static d_close_t        amr_close;
95static d_ioctl_t        amr_ioctl;
96
97static struct cdevsw amr_cdevsw = {
98	.d_version =	D_VERSION,
99	.d_flags =	D_NEEDGIANT,
100	.d_open =	amr_open,
101	.d_close =	amr_close,
102	.d_ioctl =	amr_ioctl,
103	.d_name =	"amr",
104};
105
106int linux_no_adapter = 0;
107/*
108 * Initialisation, bus interface.
109 */
110static void	amr_startup(void *arg);
111
112/*
113 * Command wrappers
114 */
115static int	amr_query_controller(struct amr_softc *sc);
116static void	*amr_enquiry(struct amr_softc *sc, size_t bufsize,
117			     u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status);
118static void	amr_completeio(struct amr_command *ac);
119static int	amr_support_ext_cdb(struct amr_softc *sc);
120
121/*
122 * Command buffer allocation.
123 */
124static void	amr_alloccmd_cluster(struct amr_softc *sc);
125static void	amr_freecmd_cluster(struct amr_command_cluster *acc);
126
127/*
128 * Command processing.
129 */
130static int	amr_bio_command(struct amr_softc *sc, struct amr_command **acp);
131static int	amr_wait_command(struct amr_command *ac) __unused;
132static int	amr_mapcmd(struct amr_command *ac);
133static void	amr_unmapcmd(struct amr_command *ac);
134static int	amr_start(struct amr_command *ac);
135static void	amr_complete(void *context, ac_qhead_t *head);
136static void	amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
137static void	amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
138static void	amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
139static void	amr_abort_load(struct amr_command *ac);
140
141/*
142 * Status monitoring
143 */
144static void	amr_periodic(void *data);
145
146/*
147 * Interface-specific shims
148 */
149static int	amr_quartz_submit_command(struct amr_command *ac);
150static int	amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
151static int	amr_quartz_poll_command(struct amr_command *ac);
152static int	amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac);
153
154static int	amr_std_submit_command(struct amr_command *ac);
155static int	amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
156static int	amr_std_poll_command(struct amr_command *ac);
157static void	amr_std_attach_mailbox(struct amr_softc *sc);
158
159#ifdef AMR_BOARD_INIT
160static int	amr_quartz_init(struct amr_softc *sc);
161static int	amr_std_init(struct amr_softc *sc);
162#endif
163
164/*
165 * Debugging
166 */
167static void	amr_describe_controller(struct amr_softc *sc);
168#ifdef AMR_DEBUG
169#if 0
170static void	amr_printcommand(struct amr_command *ac);
171#endif
172#endif
173
174static void	amr_init_sysctl(struct amr_softc *sc);
175static int	amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr,
176		    int32_t flag, struct thread *td);
177
178static MALLOC_DEFINE(M_AMR, "amr", "AMR memory");
179
180/********************************************************************************
181 ********************************************************************************
182                                                                      Inline Glue
183 ********************************************************************************
184 ********************************************************************************/
185
186/********************************************************************************
187 ********************************************************************************
188                                                                Public Interfaces
189 ********************************************************************************
190 ********************************************************************************/
191
192/********************************************************************************
193 * Initialise the controller and softc.
194 */
195int
196amr_attach(struct amr_softc *sc)
197{
198    device_t child;
199
200    debug_called(1);
201
202    /*
203     * Initialise per-controller queues.
204     */
205    amr_init_qhead(&sc->amr_freecmds);
206    amr_init_qhead(&sc->amr_ready);
207    TAILQ_INIT(&sc->amr_cmd_clusters);
208    bioq_init(&sc->amr_bioq);
209
210    debug(2, "queue init done");
211
212    /*
213     * Configure for this controller type.
214     */
215    if (AMR_IS_QUARTZ(sc)) {
216	sc->amr_submit_command = amr_quartz_submit_command;
217	sc->amr_get_work       = amr_quartz_get_work;
218	sc->amr_poll_command   = amr_quartz_poll_command;
219	sc->amr_poll_command1  = amr_quartz_poll_command1;
220    } else {
221	sc->amr_submit_command = amr_std_submit_command;
222	sc->amr_get_work       = amr_std_get_work;
223	sc->amr_poll_command   = amr_std_poll_command;
224	amr_std_attach_mailbox(sc);
225    }
226
227#ifdef AMR_BOARD_INIT
228    if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc)))
229	return(ENXIO);
230#endif
231
232    /*
233     * Allocate initial commands.
234     */
235    amr_alloccmd_cluster(sc);
236
237    /*
238     * Quiz controller for features and limits.
239     */
240    if (amr_query_controller(sc))
241	return(ENXIO);
242
243    debug(2, "controller query complete");
244
245    /*
246     * preallocate the remaining commands.
247     */
248    while (sc->amr_nextslot < sc->amr_maxio)
249	amr_alloccmd_cluster(sc);
250
251    /*
252     * Setup sysctls.
253     */
254    amr_init_sysctl(sc);
255
256    /*
257     * Attach our 'real' SCSI channels to CAM.
258     */
259    child = device_add_child(sc->amr_dev, "amrp", -1);
260    sc->amr_pass = child;
261    if (child != NULL) {
262	device_set_softc(child, sc);
263	device_set_desc(child, "SCSI Passthrough Bus");
264	bus_generic_attach(sc->amr_dev);
265    }
266
267    /*
268     * Create the control device.
269     */
270    sc->amr_dev_t = make_dev(&amr_cdevsw, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR,
271			     S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev));
272    sc->amr_dev_t->si_drv1 = sc;
273    linux_no_adapter++;
274    if (device_get_unit(sc->amr_dev) == 0)
275	make_dev_alias(sc->amr_dev_t, "megadev0");
276
277    /*
278     * Schedule ourselves to bring the controller up once interrupts are
279     * available.
280     */
281    bzero(&sc->amr_ich, sizeof(struct intr_config_hook));
282    sc->amr_ich.ich_func = amr_startup;
283    sc->amr_ich.ich_arg = sc;
284    if (config_intrhook_establish(&sc->amr_ich) != 0) {
285	device_printf(sc->amr_dev, "can't establish configuration hook\n");
286	return(ENOMEM);
287    }
288
289    /*
290     * Print a little information about the controller.
291     */
292    amr_describe_controller(sc);
293
294    debug(2, "attach complete");
295    return(0);
296}
297
298/********************************************************************************
299 * Locate disk resources and attach children to them.
300 */
301static void
302amr_startup(void *arg)
303{
304    struct amr_softc	*sc = (struct amr_softc *)arg;
305    struct amr_logdrive	*dr;
306    int			i, error;
307
308    debug_called(1);
309
310    /* pull ourselves off the intrhook chain */
311    if (sc->amr_ich.ich_func)
312	config_intrhook_disestablish(&sc->amr_ich);
313    sc->amr_ich.ich_func = NULL;
314
315    /* get up-to-date drive information */
316    if (amr_query_controller(sc)) {
317	device_printf(sc->amr_dev, "can't scan controller for drives\n");
318	return;
319    }
320
321    /* iterate over available drives */
322    for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) {
323	/* are we already attached to this drive? */
324	if (dr->al_disk == 0) {
325	    /* generate geometry information */
326	    if (dr->al_size > 0x200000) {	/* extended translation? */
327		dr->al_heads = 255;
328		dr->al_sectors = 63;
329	    } else {
330		dr->al_heads = 64;
331		dr->al_sectors = 32;
332	    }
333	    dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors);
334
335	    dr->al_disk = device_add_child(sc->amr_dev, NULL, -1);
336	    if (dr->al_disk == 0)
337		device_printf(sc->amr_dev, "device_add_child failed\n");
338	    device_set_ivars(dr->al_disk, dr);
339	}
340    }
341
342    if ((error = bus_generic_attach(sc->amr_dev)) != 0)
343	device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error);
344
345    /* mark controller back up */
346    sc->amr_state &= ~AMR_STATE_SHUTDOWN;
347
348    /* interrupts will be enabled before we do anything more */
349    sc->amr_state |= AMR_STATE_INTEN;
350
351    /*
352     * Start the timeout routine.
353     */
354/*    sc->amr_timeout = timeout(amr_periodic, sc, hz);*/
355
356    return;
357}
358
359static void
360amr_init_sysctl(struct amr_softc *sc)
361{
362
363    SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
364	SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
365	OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0,
366	"");
367    SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
368	SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
369	OID_AUTO, "nextslot", CTLFLAG_RD, &sc->amr_nextslot, 0,
370	"");
371    SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
372	SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
373	OID_AUTO, "busyslots", CTLFLAG_RD, &sc->amr_busyslots, 0,
374	"");
375    SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
376	SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
377	OID_AUTO, "maxio", CTLFLAG_RD, &sc->amr_maxio, 0,
378	"");
379}
380
381
382/*******************************************************************************
383 * Free resources associated with a controller instance
384 */
385void
386amr_free(struct amr_softc *sc)
387{
388    struct amr_command_cluster	*acc;
389
390    /* detach from CAM */
391    if (sc->amr_pass != NULL)
392	device_delete_child(sc->amr_dev, sc->amr_pass);
393
394    /* cancel status timeout */
395    untimeout(amr_periodic, sc, sc->amr_timeout);
396
397    /* throw away any command buffers */
398    while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) {
399	TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link);
400	amr_freecmd_cluster(acc);
401    }
402
403    /* destroy control device */
404    if( sc->amr_dev_t != (struct cdev *)NULL)
405	    destroy_dev(sc->amr_dev_t);
406
407    if (mtx_initialized(&sc->amr_hw_lock))
408	mtx_destroy(&sc->amr_hw_lock);
409
410    if (mtx_initialized(&sc->amr_list_lock))
411	mtx_destroy(&sc->amr_list_lock);
412}
413
414/*******************************************************************************
415 * Receive a bio structure from a child device and queue it on a particular
416 * disk resource, then poke the disk resource to start as much work as it can.
417 */
418int
419amr_submit_bio(struct amr_softc *sc, struct bio *bio)
420{
421    debug_called(2);
422
423    mtx_lock(&sc->amr_list_lock);
424    amr_enqueue_bio(sc, bio);
425    amr_startio(sc);
426    mtx_unlock(&sc->amr_list_lock);
427    return(0);
428}
429
430/********************************************************************************
431 * Accept an open operation on the control device.
432 */
433static int
434amr_open(struct cdev *dev, int flags, int fmt, struct thread *td)
435{
436    int			unit = dev2unit(dev);
437    struct amr_softc	*sc = devclass_get_softc(devclass_find("amr"), unit);
438
439    debug_called(1);
440
441    sc->amr_state |= AMR_STATE_OPEN;
442    return(0);
443}
444
445#ifdef LSI
446static int
447amr_del_ld(struct amr_softc *sc, int drv_no, int status)
448{
449
450    debug_called(1);
451
452    sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
453    sc->amr_state &= ~AMR_STATE_LD_DELETE;
454    sc->amr_state |= AMR_STATE_REMAP_LD;
455    debug(1, "State Set");
456
457    if (!status) {
458	debug(1, "disk begin destroyed %d",drv_no);
459	if (--amr_disks_registered == 0)
460	    cdevsw_remove(&amrddisk_cdevsw);
461	debug(1, "disk begin destroyed success");
462    }
463    return 0;
464}
465
466static int
467amr_prepare_ld_delete(struct amr_softc *sc)
468{
469
470    debug_called(1);
471    if (sc->ld_del_supported == 0)
472	return(ENOIOCTL);
473
474    sc->amr_state |= AMR_STATE_QUEUE_FRZN;
475    sc->amr_state |= AMR_STATE_LD_DELETE;
476
477    /* 5 minutes for the all the commands to be flushed.*/
478    tsleep((void *)&sc->ld_del_supported, PCATCH | PRIBIO,"delete_logical_drv",hz * 60 * 1);
479    if ( sc->amr_busyslots )
480	return(ENOIOCTL);
481
482    return 0;
483}
484#endif
485
486/********************************************************************************
487 * Accept the last close on the control device.
488 */
489static int
490amr_close(struct cdev *dev, int flags, int fmt, struct thread *td)
491{
492    int			unit = dev2unit(dev);
493    struct amr_softc	*sc = devclass_get_softc(devclass_find("amr"), unit);
494
495    debug_called(1);
496
497    sc->amr_state &= ~AMR_STATE_OPEN;
498    return (0);
499}
500
501/********************************************************************************
502 * Handle controller-specific control operations.
503 */
504static void
505amr_rescan_drives(struct cdev *dev)
506{
507    struct amr_softc	*sc = (struct amr_softc *)dev->si_drv1;
508    int			i, error = 0;
509
510    sc->amr_state |= AMR_STATE_REMAP_LD;
511    while (sc->amr_busyslots) {
512	device_printf(sc->amr_dev, "idle controller\n");
513	amr_done(sc);
514    }
515
516    /* mark ourselves as in-shutdown */
517    sc->amr_state |= AMR_STATE_SHUTDOWN;
518
519    /* flush controller */
520    device_printf(sc->amr_dev, "flushing cache...");
521    printf("%s\n", amr_flush(sc) ? "failed" : "done");
522
523    /* delete all our child devices */
524    for(i = 0 ; i < AMR_MAXLD; i++) {
525	if(sc->amr_drive[i].al_disk != 0) {
526	    if((error = device_delete_child(sc->amr_dev,
527		sc->amr_drive[i].al_disk)) != 0)
528		goto shutdown_out;
529
530	     sc->amr_drive[i].al_disk = 0;
531	}
532    }
533
534shutdown_out:
535    amr_startup(sc);
536}
537
538/*
539 * Bug-for-bug compatibility with Linux!
540 * Some apps will send commands with inlen and outlen set to 0,
541 * even though they expect data to be transfered to them from the
542 * card.  Linux accidentally allows this by allocating a 4KB
543 * buffer for the transfer anyways, but it then throws it away
544 * without copying it back to the app.
545 *
546 * The amr(4) firmware relies on this feature.  In fact, it assumes
547 * the buffer is always a power of 2 up to a max of 64k.  There is
548 * also at least one case where it assumes a buffer less than 16k is
549 * greater than 16k.  However, forcing all buffers to a size of 32k
550 * causes stalls in the firmware.  Force each command smaller than
551 * 64k up to the next power of two except that commands between 8k
552 * and 16k are rounded up to 32k instead of 16k.
553 */
554static unsigned long
555amr_ioctl_buffer_length(unsigned long len)
556{
557
558    if (len <= 4 * 1024)
559	return (4 * 1024);
560    if (len <= 8 * 1024)
561	return (8 * 1024);
562    if (len <= 32 * 1024)
563	return (32 * 1024);
564    if (len <= 64 * 1024)
565	return (64 * 1024);
566    return (len);
567}
568
569int
570amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag,
571    struct thread *td)
572{
573    struct amr_softc		*sc = (struct amr_softc *)dev->si_drv1;
574    struct amr_command		*ac;
575    struct amr_mailbox		*mb;
576    struct amr_linux_ioctl	ali;
577    void			*dp, *temp;
578    int				error;
579    int				adapter, len, ac_flags = 0;
580    int				logical_drives_changed = 0;
581    u_int32_t			linux_version = 0x02100000;
582    u_int8_t			status;
583    struct amr_passthrough	*ap;	/* 60 bytes */
584
585    error = 0;
586    dp = NULL;
587    ac = NULL;
588    ap = NULL;
589
590    if ((error = copyin(addr, &ali, sizeof(ali))) != 0)
591	return (error);
592    switch (ali.ui.fcs.opcode) {
593    case 0x82:
594	switch(ali.ui.fcs.subopcode) {
595	case 'e':
596	    copyout(&linux_version, (void *)(uintptr_t)ali.data,
597		sizeof(linux_version));
598	    error = 0;
599	    break;
600
601	case 'm':
602	    copyout(&linux_no_adapter, (void *)(uintptr_t)ali.data,
603		sizeof(linux_no_adapter));
604	    td->td_retval[0] = linux_no_adapter;
605	    error = 0;
606	    break;
607
608	default:
609	    printf("Unknown subopcode\n");
610	    error = ENOIOCTL;
611	    break;
612	}
613    break;
614
615    case 0x80:
616    case 0x81:
617	if (ali.ui.fcs.opcode == 0x80)
618	    len = max(ali.outlen, ali.inlen);
619	else
620	    len = ali.ui.fcs.length;
621
622	adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
623
624	mb = (void *)&ali.mbox[0];
625
626	if ((ali.mbox[0] == FC_DEL_LOGDRV  && ali.mbox[2] == OP_DEL_LOGDRV) ||	/* delete */
627	    (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) {		/* create */
628	    if (sc->amr_allow_vol_config == 0) {
629		error = EPERM;
630		break;
631	    }
632	    logical_drives_changed = 1;
633	}
634
635	if (ali.mbox[0] == AMR_CMD_PASS) {
636	    mtx_lock(&sc->amr_list_lock);
637	    while ((ac = amr_alloccmd(sc)) == NULL)
638		msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
639	    mtx_unlock(&sc->amr_list_lock);
640	    ap = &ac->ac_ccb->ccb_pthru;
641
642	    error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap,
643		sizeof(struct amr_passthrough));
644	    if (error)
645		break;
646
647	    if (ap->ap_data_transfer_length)
648		dp = malloc(ap->ap_data_transfer_length, M_AMR,
649		    M_WAITOK | M_ZERO);
650
651	    if (ali.inlen) {
652		error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address,
653		    dp, ap->ap_data_transfer_length);
654		if (error)
655		    break;
656	    }
657
658	    ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB;
659	    bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
660	    ac->ac_mailbox.mb_command = AMR_CMD_PASS;
661	    ac->ac_flags = ac_flags;
662
663	    ac->ac_data = dp;
664	    ac->ac_length = ap->ap_data_transfer_length;
665	    temp = (void *)(uintptr_t)ap->ap_data_transfer_address;
666
667	    mtx_lock(&sc->amr_list_lock);
668	    error = amr_wait_command(ac);
669	    mtx_unlock(&sc->amr_list_lock);
670	    if (error)
671		break;
672
673	    status = ac->ac_status;
674	    error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status));
675	    if (error)
676		break;
677
678	    if (ali.outlen) {
679		error = copyout(dp, temp, ap->ap_data_transfer_length);
680	        if (error)
681		    break;
682	    }
683	    error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length);
684	    if (error)
685		break;
686
687	    error = 0;
688	    break;
689	} else if (ali.mbox[0] == AMR_CMD_PASS_64) {
690	    printf("No AMR_CMD_PASS_64\n");
691	    error = ENOIOCTL;
692	    break;
693	} else if (ali.mbox[0] == AMR_CMD_EXTPASS) {
694	    printf("No AMR_CMD_EXTPASS\n");
695	    error = ENOIOCTL;
696	    break;
697	} else {
698	    len = amr_ioctl_buffer_length(imax(ali.inlen, ali.outlen));
699
700	    dp = malloc(len, M_AMR, M_WAITOK | M_ZERO);
701
702	    if (ali.inlen) {
703		error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len);
704		if (error)
705		    break;
706	    }
707
708	    mtx_lock(&sc->amr_list_lock);
709	    while ((ac = amr_alloccmd(sc)) == NULL)
710		msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
711
712	    ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
713	    bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
714	    bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox));
715
716	    ac->ac_length = len;
717	    ac->ac_data = dp;
718	    ac->ac_flags = ac_flags;
719
720	    error = amr_wait_command(ac);
721	    mtx_unlock(&sc->amr_list_lock);
722	    if (error)
723		break;
724
725	    status = ac->ac_status;
726	    error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status));
727	    if (ali.outlen) {
728		error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, ali.outlen);
729		if (error)
730		    break;
731	    }
732
733	    error = 0;
734	    if (logical_drives_changed)
735		amr_rescan_drives(dev);
736	    break;
737	}
738	break;
739
740    default:
741	debug(1, "unknown linux ioctl 0x%lx", cmd);
742	printf("unknown linux ioctl 0x%lx\n", cmd);
743	error = ENOIOCTL;
744	break;
745    }
746
747    /*
748     * At this point, we know that there is a lock held and that these
749     * objects have been allocated.
750     */
751    mtx_lock(&sc->amr_list_lock);
752    if (ac != NULL)
753	amr_releasecmd(ac);
754    mtx_unlock(&sc->amr_list_lock);
755    if (dp != NULL)
756	free(dp, M_AMR);
757    return(error);
758}
759
760static int
761amr_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td)
762{
763    struct amr_softc		*sc = (struct amr_softc *)dev->si_drv1;
764    union {
765	void			*_p;
766	struct amr_user_ioctl	*au;
767#ifdef AMR_IO_COMMAND32
768	struct amr_user_ioctl32	*au32;
769#endif
770	int			*result;
771    } arg;
772    struct amr_command		*ac;
773    struct amr_mailbox_ioctl	*mbi;
774    void			*dp, *au_buffer;
775    unsigned long		au_length, real_length;
776    unsigned char		*au_cmd;
777    int				*au_statusp, au_direction;
778    int				error;
779    struct amr_passthrough	*ap;	/* 60 bytes */
780    int				logical_drives_changed = 0;
781
782    debug_called(1);
783
784    arg._p = (void *)addr;
785
786    error = 0;
787    dp = NULL;
788    ac = NULL;
789    ap = NULL;
790
791    switch(cmd) {
792
793    case AMR_IO_VERSION:
794	debug(1, "AMR_IO_VERSION");
795	*arg.result = AMR_IO_VERSION_NUMBER;
796	return(0);
797
798#ifdef AMR_IO_COMMAND32
799    /*
800     * Accept ioctl-s from 32-bit binaries on non-32-bit
801     * platforms, such as AMD. LSI's MEGAMGR utility is
802     * the only example known today...	-mi
803     */
804    case AMR_IO_COMMAND32:
805	debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]);
806	au_cmd = arg.au32->au_cmd;
807	au_buffer = (void *)(u_int64_t)arg.au32->au_buffer;
808	au_length = arg.au32->au_length;
809	au_direction = arg.au32->au_direction;
810	au_statusp = &arg.au32->au_status;
811	break;
812#endif
813
814    case AMR_IO_COMMAND:
815	debug(1, "AMR_IO_COMMAND  0x%x", arg.au->au_cmd[0]);
816	au_cmd = arg.au->au_cmd;
817	au_buffer = (void *)arg.au->au_buffer;
818	au_length = arg.au->au_length;
819	au_direction = arg.au->au_direction;
820	au_statusp = &arg.au->au_status;
821	break;
822
823    case 0xc0046d00:
824    case 0xc06e6d00:	/* Linux emulation */
825	{
826	    devclass_t			devclass;
827	    struct amr_linux_ioctl	ali;
828	    int				adapter, error;
829
830	    devclass = devclass_find("amr");
831	    if (devclass == NULL)
832		return (ENOENT);
833
834	    error = copyin(addr, &ali, sizeof(ali));
835	    if (error)
836		return (error);
837	    if (ali.ui.fcs.opcode == 0x82)
838		adapter = 0;
839	    else
840		adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
841
842	    sc = devclass_get_softc(devclass, adapter);
843	    if (sc == NULL)
844		return (ENOENT);
845
846	    return (amr_linux_ioctl_int(sc->amr_dev_t, cmd, addr, 0, td));
847	}
848    default:
849	debug(1, "unknown ioctl 0x%lx", cmd);
850	return(ENOIOCTL);
851    }
852
853    if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) ||	/* delete */
854	(au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) {		/* create */
855	if (sc->amr_allow_vol_config == 0) {
856	    error = EPERM;
857	    goto out;
858	}
859	logical_drives_changed = 1;
860#ifdef LSI
861	if ((error = amr_prepare_ld_delete(sc)) != 0)
862	    return (error);
863#endif
864    }
865
866    /* handle inbound data buffer */
867    real_length = amr_ioctl_buffer_length(au_length);
868    dp = malloc(real_length, M_AMR, M_WAITOK|M_ZERO);
869    if (au_length != 0 && au_cmd[0] != 0x06) {
870	if ((error = copyin(au_buffer, dp, au_length)) != 0) {
871	    free(dp, M_AMR);
872	    return (error);
873	}
874	debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp);
875    }
876
877    /* Allocate this now before the mutex gets held */
878
879    mtx_lock(&sc->amr_list_lock);
880    while ((ac = amr_alloccmd(sc)) == NULL)
881	msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
882
883    /* handle SCSI passthrough command */
884    if (au_cmd[0] == AMR_CMD_PASS) {
885        int len;
886
887	ap = &ac->ac_ccb->ccb_pthru;
888	bzero(ap, sizeof(struct amr_passthrough));
889
890	/* copy cdb */
891        len = au_cmd[2];
892	ap->ap_cdb_length = len;
893	bcopy(au_cmd + 3, ap->ap_cdb, len);
894
895	/* build passthrough */
896	ap->ap_timeout		= au_cmd[len + 3] & 0x07;
897	ap->ap_ars		= (au_cmd[len + 3] & 0x08) ? 1 : 0;
898	ap->ap_islogical		= (au_cmd[len + 3] & 0x80) ? 1 : 0;
899	ap->ap_logical_drive_no	= au_cmd[len + 4];
900	ap->ap_channel		= au_cmd[len + 5];
901	ap->ap_scsi_id 		= au_cmd[len + 6];
902	ap->ap_request_sense_length	= 14;
903	ap->ap_data_transfer_length	= au_length;
904	/* XXX what about the request-sense area? does the caller want it? */
905
906	/* build command */
907	ac->ac_mailbox.mb_command = AMR_CMD_PASS;
908	ac->ac_flags = AMR_CMD_CCB;
909
910    } else {
911	/* direct command to controller */
912	mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox;
913
914	/* copy pertinent mailbox items */
915	mbi->mb_command = au_cmd[0];
916	mbi->mb_channel = au_cmd[1];
917	mbi->mb_param = au_cmd[2];
918	mbi->mb_pad[0] = au_cmd[3];
919	mbi->mb_drive = au_cmd[4];
920	ac->ac_flags = 0;
921    }
922
923    /* build the command */
924    ac->ac_data = dp;
925    ac->ac_length = real_length;
926    ac->ac_flags |= AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
927
928    /* run the command */
929    error = amr_wait_command(ac);
930    mtx_unlock(&sc->amr_list_lock);
931    if (error)
932	goto out;
933
934    /* copy out data and set status */
935    if (au_length != 0) {
936	error = copyout(dp, au_buffer, au_length);
937    }
938    debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer);
939    debug(2, "%p status 0x%x", dp, ac->ac_status);
940    *au_statusp = ac->ac_status;
941
942out:
943    /*
944     * At this point, we know that there is a lock held and that these
945     * objects have been allocated.
946     */
947    mtx_lock(&sc->amr_list_lock);
948    if (ac != NULL)
949	amr_releasecmd(ac);
950    mtx_unlock(&sc->amr_list_lock);
951    if (dp != NULL)
952	free(dp, M_AMR);
953
954#ifndef LSI
955    if (logical_drives_changed)
956	amr_rescan_drives(dev);
957#endif
958
959    return(error);
960}
961
962/********************************************************************************
963 ********************************************************************************
964                                                                Status Monitoring
965 ********************************************************************************
966 ********************************************************************************/
967
968/********************************************************************************
969 * Perform a periodic check of the controller status
970 */
971static void
972amr_periodic(void *data)
973{
974    struct amr_softc	*sc = (struct amr_softc *)data;
975
976    debug_called(2);
977
978    /* XXX perform periodic status checks here */
979
980    /* compensate for missed interrupts */
981    amr_done(sc);
982
983    /* reschedule */
984    sc->amr_timeout = timeout(amr_periodic, sc, hz);
985}
986
987/********************************************************************************
988 ********************************************************************************
989                                                                 Command Wrappers
990 ********************************************************************************
991 ********************************************************************************/
992
993/********************************************************************************
994 * Interrogate the controller for the operational parameters we require.
995 */
996static int
997amr_query_controller(struct amr_softc *sc)
998{
999    struct amr_enquiry3	*aex;
1000    struct amr_prodinfo	*ap;
1001    struct amr_enquiry	*ae;
1002    int			ldrv;
1003    int			status;
1004
1005    /*
1006     * Greater than 10 byte cdb support
1007     */
1008    sc->support_ext_cdb = amr_support_ext_cdb(sc);
1009
1010    if(sc->support_ext_cdb) {
1011	debug(2,"supports extended CDBs.");
1012    }
1013
1014    /*
1015     * Try to issue an ENQUIRY3 command
1016     */
1017    if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
1018			   AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) {
1019
1020	/*
1021	 * Fetch current state of logical drives.
1022	 */
1023	for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
1024	    sc->amr_drive[ldrv].al_size       = aex->ae_drivesize[ldrv];
1025	    sc->amr_drive[ldrv].al_state      = aex->ae_drivestate[ldrv];
1026	    sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv];
1027	    debug(2, "  drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
1028		  sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1029	}
1030	free(aex, M_AMR);
1031
1032	/*
1033	 * Get product info for channel count.
1034	 */
1035	if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) {
1036	    device_printf(sc->amr_dev, "can't obtain product data from controller\n");
1037	    return(1);
1038	}
1039	sc->amr_maxdrives = 40;
1040	sc->amr_maxchan = ap->ap_nschan;
1041	sc->amr_maxio = ap->ap_maxio;
1042	sc->amr_type |= AMR_TYPE_40LD;
1043	free(ap, M_AMR);
1044
1045	ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status);
1046	if (ap != NULL)
1047	    free(ap, M_AMR);
1048	if (!status) {
1049	    sc->amr_ld_del_supported = 1;
1050	    device_printf(sc->amr_dev, "delete logical drives supported by controller\n");
1051	}
1052    } else {
1053
1054	/* failed, try the 8LD ENQUIRY commands */
1055	if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) {
1056	    if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) {
1057		device_printf(sc->amr_dev, "can't obtain configuration data from controller\n");
1058		return(1);
1059	    }
1060	    ae->ae_signature = 0;
1061	}
1062
1063	/*
1064	 * Fetch current state of logical drives.
1065	 */
1066	for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
1067	    sc->amr_drive[ldrv].al_size       = ae->ae_ldrv.al_size[ldrv];
1068	    sc->amr_drive[ldrv].al_state      = ae->ae_ldrv.al_state[ldrv];
1069	    sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv];
1070	    debug(2, "  drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
1071		  sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1072	}
1073
1074	sc->amr_maxdrives = 8;
1075	sc->amr_maxchan = ae->ae_adapter.aa_channels;
1076	sc->amr_maxio = ae->ae_adapter.aa_maxio;
1077	free(ae, M_AMR);
1078    }
1079
1080    /*
1081     * Mark remaining drives as unused.
1082     */
1083    for (; ldrv < AMR_MAXLD; ldrv++)
1084	sc->amr_drive[ldrv].al_size = 0xffffffff;
1085
1086    /*
1087     * Cap the maximum number of outstanding I/Os.  AMI's Linux driver doesn't trust
1088     * the controller's reported value, and lockups have been seen when we do.
1089     */
1090    sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD);
1091
1092    return(0);
1093}
1094
1095/********************************************************************************
1096 * Run a generic enquiry-style command.
1097 */
1098static void *
1099amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status)
1100{
1101    struct amr_command	*ac;
1102    void		*result;
1103    u_int8_t		*mbox;
1104    int			error;
1105
1106    debug_called(1);
1107
1108    error = 1;
1109    result = NULL;
1110
1111    /* get ourselves a command buffer */
1112    mtx_lock(&sc->amr_list_lock);
1113    ac = amr_alloccmd(sc);
1114    mtx_unlock(&sc->amr_list_lock);
1115    if (ac == NULL)
1116	goto out;
1117    /* allocate the response structure */
1118    if ((result = malloc(bufsize, M_AMR, M_ZERO|M_NOWAIT)) == NULL)
1119	goto out;
1120    /* set command flags */
1121
1122    ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN;
1123
1124    /* point the command at our data */
1125    ac->ac_data = result;
1126    ac->ac_length = bufsize;
1127
1128    /* build the command proper */
1129    mbox = (u_int8_t *)&ac->ac_mailbox;		/* XXX want a real structure for this? */
1130    mbox[0] = cmd;
1131    mbox[2] = cmdsub;
1132    mbox[3] = cmdqual;
1133    *status = 0;
1134
1135    /* can't assume that interrupts are going to work here, so play it safe */
1136    if (sc->amr_poll_command(ac))
1137	goto out;
1138    error = ac->ac_status;
1139    *status = ac->ac_status;
1140
1141 out:
1142    mtx_lock(&sc->amr_list_lock);
1143    if (ac != NULL)
1144	amr_releasecmd(ac);
1145    mtx_unlock(&sc->amr_list_lock);
1146    if ((error != 0) && (result != NULL)) {
1147	free(result, M_AMR);
1148	result = NULL;
1149    }
1150    return(result);
1151}
1152
1153/********************************************************************************
1154 * Flush the controller's internal cache, return status.
1155 */
1156int
1157amr_flush(struct amr_softc *sc)
1158{
1159    struct amr_command	*ac;
1160    int			error;
1161
1162    /* get ourselves a command buffer */
1163    error = 1;
1164    mtx_lock(&sc->amr_list_lock);
1165    ac = amr_alloccmd(sc);
1166    mtx_unlock(&sc->amr_list_lock);
1167    if (ac == NULL)
1168	goto out;
1169    /* set command flags */
1170    ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1171
1172    /* build the command proper */
1173    ac->ac_mailbox.mb_command = AMR_CMD_FLUSH;
1174
1175    /* we have to poll, as the system may be going down or otherwise damaged */
1176    if (sc->amr_poll_command(ac))
1177	goto out;
1178    error = ac->ac_status;
1179
1180 out:
1181    mtx_lock(&sc->amr_list_lock);
1182    if (ac != NULL)
1183	amr_releasecmd(ac);
1184    mtx_unlock(&sc->amr_list_lock);
1185    return(error);
1186}
1187
1188/********************************************************************************
1189 * Detect extented cdb >> greater than 10 byte cdb support
1190 * returns '1' means this support exist
1191 * returns '0' means this support doesn't exist
1192 */
1193static int
1194amr_support_ext_cdb(struct amr_softc *sc)
1195{
1196    struct amr_command	*ac;
1197    u_int8_t		*mbox;
1198    int			error;
1199
1200    /* get ourselves a command buffer */
1201    error = 0;
1202    mtx_lock(&sc->amr_list_lock);
1203    ac = amr_alloccmd(sc);
1204    mtx_unlock(&sc->amr_list_lock);
1205    if (ac == NULL)
1206	goto out;
1207    /* set command flags */
1208    ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1209
1210    /* build the command proper */
1211    mbox = (u_int8_t *)&ac->ac_mailbox;		/* XXX want a real structure for this? */
1212    mbox[0] = 0xA4;
1213    mbox[2] = 0x16;
1214
1215
1216    /* we have to poll, as the system may be going down or otherwise damaged */
1217    if (sc->amr_poll_command(ac))
1218	goto out;
1219    if( ac->ac_status == AMR_STATUS_SUCCESS ) {
1220	    error = 1;
1221    }
1222
1223out:
1224    mtx_lock(&sc->amr_list_lock);
1225    if (ac != NULL)
1226	amr_releasecmd(ac);
1227    mtx_unlock(&sc->amr_list_lock);
1228    return(error);
1229}
1230
1231/********************************************************************************
1232 * Try to find I/O work for the controller from one or more of the work queues.
1233 *
1234 * We make the assumption that if the controller is not ready to take a command
1235 * at some given time, it will generate an interrupt at some later time when
1236 * it is.
1237 */
1238void
1239amr_startio(struct amr_softc *sc)
1240{
1241    struct amr_command	*ac;
1242
1243    /* spin until something prevents us from doing any work */
1244    for (;;) {
1245
1246	/* Don't bother to queue commands no bounce buffers are available. */
1247	if (sc->amr_state & AMR_STATE_QUEUE_FRZN)
1248	    break;
1249
1250	/* try to get a ready command */
1251	ac = amr_dequeue_ready(sc);
1252
1253	/* if that failed, build a command from a bio */
1254	if (ac == NULL)
1255	    (void)amr_bio_command(sc, &ac);
1256
1257	/* if that failed, build a command from a ccb */
1258	if ((ac == NULL) && (sc->amr_cam_command != NULL))
1259	    sc->amr_cam_command(sc, &ac);
1260
1261	/* if we don't have anything to do, give up */
1262	if (ac == NULL)
1263	    break;
1264
1265	/* try to give the command to the controller; if this fails save it for later and give up */
1266	if (amr_start(ac)) {
1267	    debug(2, "controller busy, command deferred");
1268	    amr_requeue_ready(ac);	/* XXX schedule retry very soon? */
1269	    break;
1270	}
1271    }
1272}
1273
1274/********************************************************************************
1275 * Handle completion of an I/O command.
1276 */
1277static void
1278amr_completeio(struct amr_command *ac)
1279{
1280    struct amrd_softc		*sc = ac->ac_bio->bio_disk->d_drv1;
1281    static struct timeval	lastfail;
1282    static int			curfail;
1283
1284    if (ac->ac_status != AMR_STATUS_SUCCESS) {	/* could be more verbose here? */
1285	ac->ac_bio->bio_error = EIO;
1286	ac->ac_bio->bio_flags |= BIO_ERROR;
1287
1288	if (ppsratecheck(&lastfail, &curfail, 1))
1289	    device_printf(sc->amrd_dev, "I/O error - 0x%x\n", ac->ac_status);
1290/*	amr_printcommand(ac);*/
1291    }
1292    amrd_intr(ac->ac_bio);
1293    mtx_lock(&ac->ac_sc->amr_list_lock);
1294    amr_releasecmd(ac);
1295    mtx_unlock(&ac->ac_sc->amr_list_lock);
1296}
1297
1298/********************************************************************************
1299 ********************************************************************************
1300                                                               Command Processing
1301 ********************************************************************************
1302 ********************************************************************************/
1303
1304/********************************************************************************
1305 * Convert a bio off the top of the bio queue into a command.
1306 */
1307static int
1308amr_bio_command(struct amr_softc *sc, struct amr_command **acp)
1309{
1310    struct amr_command	*ac;
1311    struct amrd_softc	*amrd;
1312    struct bio		*bio;
1313    int			error;
1314    int			blkcount;
1315    int			driveno;
1316    int			cmd;
1317
1318    ac = NULL;
1319    error = 0;
1320
1321    /* get a command */
1322    if ((ac = amr_alloccmd(sc)) == NULL)
1323	return (ENOMEM);
1324
1325    /* get a bio to work on */
1326    if ((bio = amr_dequeue_bio(sc)) == NULL) {
1327	amr_releasecmd(ac);
1328	return (0);
1329    }
1330
1331    /* connect the bio to the command */
1332    ac->ac_complete = amr_completeio;
1333    ac->ac_bio = bio;
1334    ac->ac_data = bio->bio_data;
1335    ac->ac_length = bio->bio_bcount;
1336    cmd = 0;
1337    switch (bio->bio_cmd) {
1338    case BIO_READ:
1339	ac->ac_flags |= AMR_CMD_DATAIN;
1340	if (AMR_IS_SG64(sc)) {
1341	    cmd = AMR_CMD_LREAD64;
1342	    ac->ac_flags |= AMR_CMD_SG64;
1343	} else
1344	    cmd = AMR_CMD_LREAD;
1345	break;
1346    case BIO_WRITE:
1347	ac->ac_flags |= AMR_CMD_DATAOUT;
1348	if (AMR_IS_SG64(sc)) {
1349	    cmd = AMR_CMD_LWRITE64;
1350	    ac->ac_flags |= AMR_CMD_SG64;
1351	} else
1352	    cmd = AMR_CMD_LWRITE;
1353	break;
1354    case BIO_FLUSH:
1355	ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1356	cmd = AMR_CMD_FLUSH;
1357	break;
1358    }
1359    amrd = (struct amrd_softc *)bio->bio_disk->d_drv1;
1360    driveno = amrd->amrd_drive - sc->amr_drive;
1361    blkcount = (bio->bio_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE;
1362
1363    ac->ac_mailbox.mb_command = cmd;
1364    if (bio->bio_cmd & (BIO_READ|BIO_WRITE)) {
1365	ac->ac_mailbox.mb_blkcount = blkcount;
1366	ac->ac_mailbox.mb_lba = bio->bio_pblkno;
1367	if ((bio->bio_pblkno + blkcount) > sc->amr_drive[driveno].al_size) {
1368	    device_printf(sc->amr_dev,
1369			  "I/O beyond end of unit (%lld,%d > %lu)\n",
1370			  (long long)bio->bio_pblkno, blkcount,
1371			  (u_long)sc->amr_drive[driveno].al_size);
1372	}
1373    }
1374    ac->ac_mailbox.mb_drive = driveno;
1375    if (sc->amr_state & AMR_STATE_REMAP_LD)
1376	ac->ac_mailbox.mb_drive |= 0x80;
1377
1378    /* we fill in the s/g related data when the command is mapped */
1379
1380
1381    *acp = ac;
1382    return(error);
1383}
1384
1385/********************************************************************************
1386 * Take a command, submit it to the controller and sleep until it completes
1387 * or fails.  Interrupts must be enabled, returns nonzero on error.
1388 */
1389static int
1390amr_wait_command(struct amr_command *ac)
1391{
1392    int			error = 0;
1393    struct amr_softc	*sc = ac->ac_sc;
1394
1395    debug_called(1);
1396
1397    ac->ac_complete = NULL;
1398    ac->ac_flags |= AMR_CMD_SLEEP;
1399    if ((error = amr_start(ac)) != 0) {
1400	return(error);
1401    }
1402
1403    while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) {
1404	error = msleep(ac,&sc->amr_list_lock, PRIBIO, "amrwcmd", 0);
1405    }
1406
1407    return(error);
1408}
1409
1410/********************************************************************************
1411 * Take a command, submit it to the controller and busy-wait for it to return.
1412 * Returns nonzero on error.  Can be safely called with interrupts enabled.
1413 */
1414static int
1415amr_std_poll_command(struct amr_command *ac)
1416{
1417    struct amr_softc	*sc = ac->ac_sc;
1418    int			error, count;
1419
1420    debug_called(2);
1421
1422    ac->ac_complete = NULL;
1423    if ((error = amr_start(ac)) != 0)
1424	return(error);
1425
1426    count = 0;
1427    do {
1428	/*
1429	 * Poll for completion, although the interrupt handler may beat us to it.
1430	 * Note that the timeout here is somewhat arbitrary.
1431	 */
1432	amr_done(sc);
1433	DELAY(1000);
1434    } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000));
1435    if (!(ac->ac_flags & AMR_CMD_BUSY)) {
1436	error = 0;
1437    } else {
1438	/* XXX the slot is now marked permanently busy */
1439	error = EIO;
1440	device_printf(sc->amr_dev, "polled command timeout\n");
1441    }
1442    return(error);
1443}
1444
1445static void
1446amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1447{
1448    struct amr_command *ac = arg;
1449    struct amr_softc *sc = ac->ac_sc;
1450    int mb_channel;
1451
1452    if (err) {
1453	device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__);
1454	ac->ac_status = AMR_STATUS_ABORTED;
1455	return;
1456    }
1457
1458    amr_setup_sg(arg, segs, nsegs, err);
1459
1460    /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1461    mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1462    if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1463        ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1464        (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1465	((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1466
1467    ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1468    ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1469    if (AC_IS_SG64(ac)) {
1470	ac->ac_sg64_hi = 0;
1471	ac->ac_sg64_lo = ac->ac_sgbusaddr;
1472    }
1473
1474    sc->amr_poll_command1(sc, ac);
1475}
1476
1477/********************************************************************************
1478 * Take a command, submit it to the controller and busy-wait for it to return.
1479 * Returns nonzero on error.  Can be safely called with interrupts enabled.
1480 */
1481static int
1482amr_quartz_poll_command(struct amr_command *ac)
1483{
1484    struct amr_softc	*sc = ac->ac_sc;
1485    int			error;
1486
1487    debug_called(2);
1488
1489    error = 0;
1490
1491    if (AC_IS_SG64(ac)) {
1492	ac->ac_tag = sc->amr_buffer64_dmat;
1493	ac->ac_datamap = ac->ac_dma64map;
1494    } else {
1495	ac->ac_tag = sc->amr_buffer_dmat;
1496	ac->ac_datamap = ac->ac_dmamap;
1497    }
1498
1499    /* now we have a slot, we can map the command (unmapped in amr_complete) */
1500    if (ac->ac_data != 0) {
1501	if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1502	    ac->ac_length, amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) {
1503	    error = 1;
1504	}
1505    } else {
1506	error = amr_quartz_poll_command1(sc, ac);
1507    }
1508
1509    return (error);
1510}
1511
1512static int
1513amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac)
1514{
1515    int count, error;
1516
1517    mtx_lock(&sc->amr_hw_lock);
1518    if ((sc->amr_state & AMR_STATE_INTEN) == 0) {
1519	count=0;
1520	while (sc->amr_busyslots) {
1521	    msleep(sc, &sc->amr_hw_lock, PRIBIO | PCATCH, "amrpoll", hz);
1522	    if(count++>10) {
1523		break;
1524	    }
1525	}
1526
1527	if(sc->amr_busyslots) {
1528	    device_printf(sc->amr_dev, "adapter is busy\n");
1529	    mtx_unlock(&sc->amr_hw_lock);
1530	    if (ac->ac_data != NULL) {
1531		bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1532	    }
1533    	    ac->ac_status=0;
1534	    return(1);
1535	}
1536    }
1537
1538    bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE);
1539
1540    /* clear the poll/ack fields in the mailbox */
1541    sc->amr_mailbox->mb_ident = 0xFE;
1542    sc->amr_mailbox->mb_nstatus = 0xFF;
1543    sc->amr_mailbox->mb_status = 0xFF;
1544    sc->amr_mailbox->mb_poll = 0;
1545    sc->amr_mailbox->mb_ack = 0;
1546    sc->amr_mailbox->mb_busy = 1;
1547
1548    AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
1549
1550    while(sc->amr_mailbox->mb_nstatus == 0xFF)
1551	DELAY(1);
1552    while(sc->amr_mailbox->mb_status == 0xFF)
1553	DELAY(1);
1554    ac->ac_status=sc->amr_mailbox->mb_status;
1555    error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0;
1556    while(sc->amr_mailbox->mb_poll != 0x77)
1557	DELAY(1);
1558    sc->amr_mailbox->mb_poll = 0;
1559    sc->amr_mailbox->mb_ack = 0x77;
1560
1561    /* acknowledge that we have the commands */
1562    AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK);
1563    while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
1564	DELAY(1);
1565    mtx_unlock(&sc->amr_hw_lock);
1566
1567    /* unmap the command's data buffer */
1568    if (ac->ac_flags & AMR_CMD_DATAIN) {
1569	bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTREAD);
1570    }
1571    if (ac->ac_flags & AMR_CMD_DATAOUT) {
1572	bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTWRITE);
1573    }
1574    bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1575
1576    return(error);
1577}
1578
1579static __inline int
1580amr_freeslot(struct amr_command *ac)
1581{
1582    struct amr_softc *sc = ac->ac_sc;
1583    int			slot;
1584
1585    debug_called(3);
1586
1587    slot = ac->ac_slot;
1588    if (sc->amr_busycmd[slot] == NULL)
1589	panic("amr: slot %d not busy?\n", slot);
1590
1591    sc->amr_busycmd[slot] = NULL;
1592    atomic_subtract_int(&sc->amr_busyslots, 1);
1593
1594    return (0);
1595}
1596
1597/********************************************************************************
1598 * Map/unmap (ac)'s data in the controller's addressable space as required.
1599 *
1600 * These functions may be safely called multiple times on a given command.
1601 */
1602static void
1603amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1604{
1605    struct amr_command	*ac = (struct amr_command *)arg;
1606    struct amr_sgentry	*sg;
1607    struct amr_sg64entry *sg64;
1608    int flags, i;
1609
1610    debug_called(3);
1611
1612    /* get base address of s/g table */
1613    sg = ac->ac_sg.sg32;
1614    sg64 = ac->ac_sg.sg64;
1615
1616    if (AC_IS_SG64(ac)) {
1617	ac->ac_nsegments = nsegments;
1618	ac->ac_mb_physaddr = 0xffffffff;
1619	for (i = 0; i < nsegments; i++, sg64++) {
1620	    sg64->sg_addr = segs[i].ds_addr;
1621	    sg64->sg_count = segs[i].ds_len;
1622	}
1623    } else {
1624	/* decide whether we need to populate the s/g table */
1625	if (nsegments < 2) {
1626	    ac->ac_nsegments = 0;
1627	    ac->ac_mb_physaddr = segs[0].ds_addr;
1628	} else {
1629            ac->ac_nsegments = nsegments;
1630	    ac->ac_mb_physaddr = ac->ac_sgbusaddr;
1631	    for (i = 0; i < nsegments; i++, sg++) {
1632		sg->sg_addr = segs[i].ds_addr;
1633		sg->sg_count = segs[i].ds_len;
1634	    }
1635	}
1636    }
1637
1638    flags = 0;
1639    if (ac->ac_flags & AMR_CMD_DATAIN)
1640	flags |= BUS_DMASYNC_PREREAD;
1641    if (ac->ac_flags & AMR_CMD_DATAOUT)
1642	flags |= BUS_DMASYNC_PREWRITE;
1643    bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flags);
1644    ac->ac_flags |= AMR_CMD_MAPPED;
1645}
1646
1647static void
1648amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1649{
1650    struct amr_command *ac = arg;
1651    struct amr_softc *sc = ac->ac_sc;
1652    int mb_channel;
1653
1654    if (err) {
1655	device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__);
1656	amr_abort_load(ac);
1657	return;
1658    }
1659
1660    amr_setup_sg(arg, segs, nsegs, err);
1661
1662    /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1663    mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1664    if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1665        ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1666        (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1667	((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1668
1669    ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1670    ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1671    if (AC_IS_SG64(ac)) {
1672	ac->ac_sg64_hi = 0;
1673	ac->ac_sg64_lo = ac->ac_sgbusaddr;
1674    }
1675
1676    if (sc->amr_submit_command(ac) == EBUSY) {
1677	amr_freeslot(ac);
1678	amr_requeue_ready(ac);
1679    }
1680}
1681
1682static void
1683amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1684{
1685    struct amr_command *ac = arg;
1686    struct amr_softc *sc = ac->ac_sc;
1687    struct amr_passthrough *ap = &ac->ac_ccb->ccb_pthru;
1688    struct amr_ext_passthrough *aep = &ac->ac_ccb->ccb_epthru;
1689
1690    if (err) {
1691	device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__);
1692	amr_abort_load(ac);
1693	return;
1694    }
1695
1696    /* Set up the mailbox portion of the command to point at the ccb */
1697    ac->ac_mailbox.mb_nsgelem = 0;
1698    ac->ac_mailbox.mb_physaddr = ac->ac_ccb_busaddr;
1699
1700    amr_setup_sg(arg, segs, nsegs, err);
1701
1702    switch (ac->ac_mailbox.mb_command) {
1703    case AMR_CMD_EXTPASS:
1704	aep->ap_no_sg_elements = ac->ac_nsegments;
1705	aep->ap_data_transfer_address = ac->ac_mb_physaddr;
1706        break;
1707    case AMR_CMD_PASS:
1708	ap->ap_no_sg_elements = ac->ac_nsegments;
1709	ap->ap_data_transfer_address = ac->ac_mb_physaddr;
1710	break;
1711    default:
1712	panic("Unknown ccb command");
1713    }
1714
1715    if (sc->amr_submit_command(ac) == EBUSY) {
1716	amr_freeslot(ac);
1717	amr_requeue_ready(ac);
1718    }
1719}
1720
1721static int
1722amr_mapcmd(struct amr_command *ac)
1723{
1724    bus_dmamap_callback_t *cb;
1725    struct amr_softc	*sc = ac->ac_sc;
1726
1727    debug_called(3);
1728
1729    if (AC_IS_SG64(ac)) {
1730	ac->ac_tag = sc->amr_buffer64_dmat;
1731	ac->ac_datamap = ac->ac_dma64map;
1732    } else {
1733	ac->ac_tag = sc->amr_buffer_dmat;
1734	ac->ac_datamap = ac->ac_dmamap;
1735    }
1736
1737    if (ac->ac_flags & AMR_CMD_CCB)
1738	cb = amr_setup_ccb;
1739    else
1740	cb = amr_setup_data;
1741
1742    /* if the command involves data at all, and hasn't been mapped */
1743    if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) {
1744	/* map the data buffers into bus space and build the s/g list */
1745	if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1746	     ac->ac_length, cb, ac, 0) == EINPROGRESS) {
1747	    sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1748	}
1749   } else {
1750    	if (sc->amr_submit_command(ac) == EBUSY) {
1751	    amr_freeslot(ac);
1752	    amr_requeue_ready(ac);
1753	}
1754   }
1755
1756    return (0);
1757}
1758
1759static void
1760amr_unmapcmd(struct amr_command *ac)
1761{
1762    int			flag;
1763
1764    debug_called(3);
1765
1766    /* if the command involved data at all and was mapped */
1767    if (ac->ac_flags & AMR_CMD_MAPPED) {
1768
1769	if (ac->ac_data != NULL) {
1770
1771	    flag = 0;
1772	    if (ac->ac_flags & AMR_CMD_DATAIN)
1773		flag |= BUS_DMASYNC_POSTREAD;
1774	    if (ac->ac_flags & AMR_CMD_DATAOUT)
1775		flag |= BUS_DMASYNC_POSTWRITE;
1776
1777	    bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flag);
1778	    bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1779	}
1780
1781	ac->ac_flags &= ~AMR_CMD_MAPPED;
1782    }
1783}
1784
1785static void
1786amr_abort_load(struct amr_command *ac)
1787{
1788    ac_qhead_t head;
1789    struct amr_softc *sc = ac->ac_sc;
1790
1791    mtx_assert(&sc->amr_list_lock, MA_OWNED);
1792
1793    ac->ac_status = AMR_STATUS_ABORTED;
1794    amr_init_qhead(&head);
1795    amr_enqueue_completed(ac, &head);
1796
1797    mtx_unlock(&sc->amr_list_lock);
1798    amr_complete(sc, &head);
1799    mtx_lock(&sc->amr_list_lock);
1800}
1801
1802/********************************************************************************
1803 * Take a command and give it to the controller, returns 0 if successful, or
1804 * EBUSY if the command should be retried later.
1805 */
1806static int
1807amr_start(struct amr_command *ac)
1808{
1809    struct amr_softc *sc;
1810    int error = 0;
1811    int slot;
1812
1813    debug_called(3);
1814
1815    /* mark command as busy so that polling consumer can tell */
1816    sc = ac->ac_sc;
1817    ac->ac_flags |= AMR_CMD_BUSY;
1818
1819    /* get a command slot (freed in amr_done) */
1820    slot = ac->ac_slot;
1821    if (sc->amr_busycmd[slot] != NULL)
1822	panic("amr: slot %d busy?\n", slot);
1823    sc->amr_busycmd[slot] = ac;
1824    atomic_add_int(&sc->amr_busyslots, 1);
1825
1826    /* Now we have a slot, we can map the command (unmapped in amr_complete). */
1827    if ((error = amr_mapcmd(ac)) == ENOMEM) {
1828	/*
1829	 * Memroy resources are short, so free the slot and let this be tried
1830	 * later.
1831	 */
1832	amr_freeslot(ac);
1833    }
1834
1835    return (error);
1836}
1837
1838/********************************************************************************
1839 * Extract one or more completed commands from the controller (sc)
1840 *
1841 * Returns nonzero if any commands on the work queue were marked as completed.
1842 */
1843
1844int
1845amr_done(struct amr_softc *sc)
1846{
1847    ac_qhead_t		head;
1848    struct amr_command	*ac;
1849    struct amr_mailbox	mbox;
1850    int			i, idx, result;
1851
1852    debug_called(3);
1853
1854    /* See if there's anything for us to do */
1855    result = 0;
1856    amr_init_qhead(&head);
1857
1858    /* loop collecting completed commands */
1859    for (;;) {
1860	/* poll for a completed command's identifier and status */
1861	if (sc->amr_get_work(sc, &mbox)) {
1862	    result = 1;
1863
1864	    /* iterate over completed commands in this result */
1865	    for (i = 0; i < mbox.mb_nstatus; i++) {
1866		/* get pointer to busy command */
1867		idx = mbox.mb_completed[i] - 1;
1868		ac = sc->amr_busycmd[idx];
1869
1870		/* really a busy command? */
1871		if (ac != NULL) {
1872
1873		    /* pull the command from the busy index */
1874		    amr_freeslot(ac);
1875
1876		    /* save status for later use */
1877		    ac->ac_status = mbox.mb_status;
1878		    amr_enqueue_completed(ac, &head);
1879		    debug(3, "completed command with status %x", mbox.mb_status);
1880		} else {
1881		    device_printf(sc->amr_dev, "bad slot %d completed\n", idx);
1882		}
1883	    }
1884	} else
1885	    break;	/* no work */
1886    }
1887
1888    /* handle completion and timeouts */
1889    amr_complete(sc, &head);
1890
1891    return(result);
1892}
1893
1894/********************************************************************************
1895 * Do completion processing on done commands on (sc)
1896 */
1897
1898static void
1899amr_complete(void *context, ac_qhead_t *head)
1900{
1901    struct amr_softc	*sc = (struct amr_softc *)context;
1902    struct amr_command	*ac;
1903
1904    debug_called(3);
1905
1906    /* pull completed commands off the queue */
1907    for (;;) {
1908	ac = amr_dequeue_completed(sc, head);
1909	if (ac == NULL)
1910	    break;
1911
1912	/* unmap the command's data buffer */
1913	amr_unmapcmd(ac);
1914
1915	/*
1916	 * Is there a completion handler?
1917	 */
1918	if (ac->ac_complete != NULL) {
1919	    /* unbusy the command */
1920	    ac->ac_flags &= ~AMR_CMD_BUSY;
1921	    ac->ac_complete(ac);
1922
1923	    /*
1924	     * Is someone sleeping on this one?
1925	     */
1926	} else {
1927	    mtx_lock(&sc->amr_list_lock);
1928	    ac->ac_flags &= ~AMR_CMD_BUSY;
1929	    if (ac->ac_flags & AMR_CMD_SLEEP) {
1930		/* unbusy the command */
1931		wakeup(ac);
1932	    }
1933	    mtx_unlock(&sc->amr_list_lock);
1934	}
1935
1936	if(!sc->amr_busyslots) {
1937	    wakeup(sc);
1938	}
1939    }
1940
1941    mtx_lock(&sc->amr_list_lock);
1942    sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
1943    amr_startio(sc);
1944    mtx_unlock(&sc->amr_list_lock);
1945}
1946
1947/********************************************************************************
1948 ********************************************************************************
1949                                                        Command Buffer Management
1950 ********************************************************************************
1951 ********************************************************************************/
1952
1953/********************************************************************************
1954 * Get a new command buffer.
1955 *
1956 * This may return NULL in low-memory cases.
1957 *
1958 * If possible, we recycle a command buffer that's been used before.
1959 */
1960struct amr_command *
1961amr_alloccmd(struct amr_softc *sc)
1962{
1963    struct amr_command	*ac;
1964
1965    debug_called(3);
1966
1967    ac = amr_dequeue_free(sc);
1968    if (ac == NULL) {
1969	sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1970	return(NULL);
1971    }
1972
1973    /* clear out significant fields */
1974    ac->ac_status = 0;
1975    bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox));
1976    ac->ac_flags = 0;
1977    ac->ac_bio = NULL;
1978    ac->ac_data = NULL;
1979    ac->ac_complete = NULL;
1980    ac->ac_retries = 0;
1981    ac->ac_tag = NULL;
1982    ac->ac_datamap = NULL;
1983    return(ac);
1984}
1985
1986/********************************************************************************
1987 * Release a command buffer for recycling.
1988 */
1989void
1990amr_releasecmd(struct amr_command *ac)
1991{
1992    debug_called(3);
1993
1994    amr_enqueue_free(ac);
1995}
1996
1997/********************************************************************************
1998 * Allocate a new command cluster and initialise it.
1999 */
2000static void
2001amr_alloccmd_cluster(struct amr_softc *sc)
2002{
2003    struct amr_command_cluster	*acc;
2004    struct amr_command		*ac;
2005    int				i, nextslot;
2006
2007    /*
2008     * If we haven't found the real limit yet, let us have a couple of
2009     * commands in order to be able to probe.
2010     */
2011    if (sc->amr_maxio == 0)
2012	sc->amr_maxio = 2;
2013
2014    if (sc->amr_nextslot > sc->amr_maxio)
2015	return;
2016    acc = malloc(AMR_CMD_CLUSTERSIZE, M_AMR, M_NOWAIT | M_ZERO);
2017    if (acc != NULL) {
2018	nextslot = sc->amr_nextslot;
2019	mtx_lock(&sc->amr_list_lock);
2020	TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link);
2021	mtx_unlock(&sc->amr_list_lock);
2022	for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2023	    ac = &acc->acc_command[i];
2024	    ac->ac_sc = sc;
2025	    ac->ac_slot = nextslot;
2026
2027	    /*
2028	     * The SG table for each slot is a fixed size and is assumed to
2029	     * to hold 64-bit s/g objects when the driver is configured to do
2030	     * 64-bit DMA.  32-bit DMA commands still use the same table, but
2031	     * cast down to 32-bit objects.
2032	     */
2033	    if (AMR_IS_SG64(sc)) {
2034		ac->ac_sgbusaddr = sc->amr_sgbusaddr +
2035		    (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry));
2036	        ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG);
2037	    } else {
2038		ac->ac_sgbusaddr = sc->amr_sgbusaddr +
2039		    (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry));
2040	        ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2041	    }
2042
2043	    ac->ac_ccb = sc->amr_ccb + ac->ac_slot;
2044	    ac->ac_ccb_busaddr = sc->amr_ccb_busaddr +
2045		(ac->ac_slot * sizeof(union amr_ccb));
2046
2047	    if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap))
2048		break;
2049	    if (AMR_IS_SG64(sc) &&
2050		(bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map)))
2051		break;
2052	    amr_releasecmd(ac);
2053	    if (++nextslot > sc->amr_maxio)
2054		break;
2055	}
2056	sc->amr_nextslot = nextslot;
2057    }
2058}
2059
2060/********************************************************************************
2061 * Free a command cluster
2062 */
2063static void
2064amr_freecmd_cluster(struct amr_command_cluster *acc)
2065{
2066    struct amr_softc	*sc = acc->acc_command[0].ac_sc;
2067    int			i;
2068
2069    for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2070	if (acc->acc_command[i].ac_sc == NULL)
2071	    break;
2072	bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap);
2073	if (AMR_IS_SG64(sc))
2074		bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map);
2075    }
2076    free(acc, M_AMR);
2077}
2078
2079/********************************************************************************
2080 ********************************************************************************
2081                                                         Interface-specific Shims
2082 ********************************************************************************
2083 ********************************************************************************/
2084
2085/********************************************************************************
2086 * Tell the controller that the mailbox contains a valid command
2087 */
2088static int
2089amr_quartz_submit_command(struct amr_command *ac)
2090{
2091    struct amr_softc	*sc = ac->ac_sc;
2092    static struct timeval lastfail;
2093    static int		curfail;
2094    int			i = 0;
2095
2096    mtx_lock(&sc->amr_hw_lock);
2097    while (sc->amr_mailbox->mb_busy && (i++ < 10)) {
2098        DELAY(1);
2099	/* This is a no-op read that flushes pending mailbox updates */
2100	AMR_QGET_ODB(sc);
2101    }
2102    if (sc->amr_mailbox->mb_busy) {
2103	mtx_unlock(&sc->amr_hw_lock);
2104	if (ac->ac_retries++ > 1000) {
2105	    if (ppsratecheck(&lastfail, &curfail, 1))
2106		device_printf(sc->amr_dev, "Too many retries on command %p.  "
2107			      "Controller is likely dead\n", ac);
2108	    ac->ac_retries = 0;
2109	}
2110	return (EBUSY);
2111    }
2112
2113    /*
2114     * Save the slot number so that we can locate this command when complete.
2115     * Note that ident = 0 seems to be special, so we don't use it.
2116     */
2117    ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2118    bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2119    sc->amr_mailbox->mb_busy = 1;
2120    sc->amr_mailbox->mb_poll = 0;
2121    sc->amr_mailbox->mb_ack  = 0;
2122    sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi;
2123    sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo;
2124
2125    AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
2126    mtx_unlock(&sc->amr_hw_lock);
2127    return(0);
2128}
2129
2130static int
2131amr_std_submit_command(struct amr_command *ac)
2132{
2133    struct amr_softc	*sc = ac->ac_sc;
2134    static struct timeval lastfail;
2135    static int		curfail;
2136
2137    mtx_lock(&sc->amr_hw_lock);
2138    if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) {
2139	mtx_unlock(&sc->amr_hw_lock);
2140	if (ac->ac_retries++ > 1000) {
2141	    if (ppsratecheck(&lastfail, &curfail, 1))
2142		device_printf(sc->amr_dev, "Too many retries on command %p.  "
2143			      "Controller is likely dead\n", ac);
2144	    ac->ac_retries = 0;
2145	}
2146	return (EBUSY);
2147    }
2148
2149    /*
2150     * Save the slot number so that we can locate this command when complete.
2151     * Note that ident = 0 seems to be special, so we don't use it.
2152     */
2153    ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2154    bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2155    sc->amr_mailbox->mb_busy = 1;
2156    sc->amr_mailbox->mb_poll = 0;
2157    sc->amr_mailbox->mb_ack  = 0;
2158
2159    AMR_SPOST_COMMAND(sc);
2160    mtx_unlock(&sc->amr_hw_lock);
2161    return(0);
2162}
2163
2164/********************************************************************************
2165 * Claim any work that the controller has completed; acknowledge completion,
2166 * save details of the completion in (mbsave)
2167 */
2168static int
2169amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2170{
2171    int		worked, i;
2172    u_int32_t	outd;
2173    u_int8_t	nstatus;
2174    u_int8_t	completed[46];
2175
2176    debug_called(3);
2177
2178    worked = 0;
2179
2180    /* work waiting for us? */
2181    if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) {
2182
2183	/* acknowledge interrupt */
2184	AMR_QPUT_ODB(sc, AMR_QODB_READY);
2185
2186	while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff)
2187	    DELAY(1);
2188	sc->amr_mailbox->mb_nstatus = 0xff;
2189
2190	/* wait until fw wrote out all completions */
2191	for (i = 0; i < nstatus; i++) {
2192	    while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff)
2193		DELAY(1);
2194	    sc->amr_mailbox->mb_completed[i] = 0xff;
2195	}
2196
2197	/* Save information for later processing */
2198	mbsave->mb_nstatus = nstatus;
2199	mbsave->mb_status = sc->amr_mailbox->mb_status;
2200	sc->amr_mailbox->mb_status = 0xff;
2201
2202	for (i = 0; i < nstatus; i++)
2203	    mbsave->mb_completed[i] = completed[i];
2204
2205	/* acknowledge that we have the commands */
2206	AMR_QPUT_IDB(sc, AMR_QIDB_ACK);
2207
2208#if 0
2209#ifndef AMR_QUARTZ_GOFASTER
2210	/*
2211	 * This waits for the controller to notice that we've taken the
2212	 * command from it.  It's very inefficient, and we shouldn't do it,
2213	 * but if we remove this code, we stop completing commands under
2214	 * load.
2215	 *
2216	 * Peter J says we shouldn't do this.  The documentation says we
2217	 * should.  Who is right?
2218	 */
2219	while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
2220	    ;				/* XXX aiee! what if it dies? */
2221#endif
2222#endif
2223
2224	worked = 1;			/* got some work */
2225    }
2226
2227    return(worked);
2228}
2229
2230static int
2231amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2232{
2233    int		worked;
2234    u_int8_t	istat;
2235
2236    debug_called(3);
2237
2238    worked = 0;
2239
2240    /* check for valid interrupt status */
2241    istat = AMR_SGET_ISTAT(sc);
2242    if ((istat & AMR_SINTR_VALID) != 0) {
2243	AMR_SPUT_ISTAT(sc, istat);	/* ack interrupt status */
2244
2245	/* save mailbox, which contains a list of completed commands */
2246	bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave));
2247
2248	AMR_SACK_INTERRUPT(sc);		/* acknowledge we have the mailbox */
2249	worked = 1;
2250    }
2251
2252    return(worked);
2253}
2254
2255/********************************************************************************
2256 * Notify the controller of the mailbox location.
2257 */
2258static void
2259amr_std_attach_mailbox(struct amr_softc *sc)
2260{
2261
2262    /* program the mailbox physical address */
2263    AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys         & 0xff);
2264    AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >>  8) & 0xff);
2265    AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff);
2266    AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff);
2267    AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR);
2268
2269    /* clear any outstanding interrupt and enable interrupts proper */
2270    AMR_SACK_INTERRUPT(sc);
2271    AMR_SENABLE_INTR(sc);
2272}
2273
2274#ifdef AMR_BOARD_INIT
2275/********************************************************************************
2276 * Initialise the controller
2277 */
2278static int
2279amr_quartz_init(struct amr_softc *sc)
2280{
2281    int		status, ostatus;
2282
2283    device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc));
2284
2285    AMR_QRESET(sc);
2286
2287    ostatus = 0xff;
2288    while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) {
2289	if (status != ostatus) {
2290	    device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status));
2291	    ostatus = status;
2292	}
2293	switch (status) {
2294	case AMR_QINIT_NOMEM:
2295	    return(ENOMEM);
2296
2297	case AMR_QINIT_SCAN:
2298	    /* XXX we could print channel/target here */
2299	    break;
2300	}
2301    }
2302    return(0);
2303}
2304
2305static int
2306amr_std_init(struct amr_softc *sc)
2307{
2308    int		status, ostatus;
2309
2310    device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc));
2311
2312    AMR_SRESET(sc);
2313
2314    ostatus = 0xff;
2315    while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) {
2316	if (status != ostatus) {
2317	    device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status));
2318	    ostatus = status;
2319	}
2320	switch (status) {
2321	case AMR_SINIT_NOMEM:
2322	    return(ENOMEM);
2323
2324	case AMR_SINIT_INPROG:
2325	    /* XXX we could print channel/target here? */
2326	    break;
2327	}
2328    }
2329    return(0);
2330}
2331#endif
2332
2333/********************************************************************************
2334 ********************************************************************************
2335                                                                        Debugging
2336 ********************************************************************************
2337 ********************************************************************************/
2338
2339/********************************************************************************
2340 * Identify the controller and print some information about it.
2341 */
2342static void
2343amr_describe_controller(struct amr_softc *sc)
2344{
2345    struct amr_prodinfo	*ap;
2346    struct amr_enquiry	*ae;
2347    char		*prod;
2348    int			status;
2349
2350    /*
2351     * Try to get 40LD product info, which tells us what the card is labelled as.
2352     */
2353    if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) {
2354	device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n",
2355		      ap->ap_product, ap->ap_firmware, ap->ap_bios,
2356		      ap->ap_memsize);
2357
2358	free(ap, M_AMR);
2359	return;
2360    }
2361
2362    /*
2363     * Try 8LD extended ENQUIRY to get controller signature, and use lookup table.
2364     */
2365    if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) {
2366	prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature);
2367
2368    } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) {
2369
2370	/*
2371	 * Try to work it out based on the PCI signatures.
2372	 */
2373	switch (pci_get_device(sc->amr_dev)) {
2374	case 0x9010:
2375	    prod = "Series 428";
2376	    break;
2377	case 0x9060:
2378	    prod = "Series 434";
2379	    break;
2380	default:
2381	    prod = "unknown controller";
2382	    break;
2383	}
2384    } else {
2385	device_printf(sc->amr_dev, "<unsupported controller>\n");
2386	return;
2387    }
2388
2389    /*
2390     * HP NetRaid controllers have a special encoding of the firmware and
2391     * BIOS versions. The AMI version seems to have it as strings whereas
2392     * the HP version does it with a leading uppercase character and two
2393     * binary numbers.
2394     */
2395
2396    if(ae->ae_adapter.aa_firmware[2] >= 'A' &&
2397       ae->ae_adapter.aa_firmware[2] <= 'Z' &&
2398       ae->ae_adapter.aa_firmware[1] <  ' ' &&
2399       ae->ae_adapter.aa_firmware[0] <  ' ' &&
2400       ae->ae_adapter.aa_bios[2] >= 'A'     &&
2401       ae->ae_adapter.aa_bios[2] <= 'Z'     &&
2402       ae->ae_adapter.aa_bios[1] <  ' '     &&
2403       ae->ae_adapter.aa_bios[0] <  ' ') {
2404
2405	/* this looks like we have an HP NetRaid version of the MegaRaid */
2406
2407    	if(ae->ae_signature == AMR_SIG_438) {
2408    		/* the AMI 438 is a NetRaid 3si in HP-land */
2409    		prod = "HP NetRaid 3si";
2410    	}
2411
2412	device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n",
2413		      prod, ae->ae_adapter.aa_firmware[2],
2414		      ae->ae_adapter.aa_firmware[1],
2415		      ae->ae_adapter.aa_firmware[0],
2416		      ae->ae_adapter.aa_bios[2],
2417		      ae->ae_adapter.aa_bios[1],
2418		      ae->ae_adapter.aa_bios[0],
2419		      ae->ae_adapter.aa_memorysize);
2420    } else {
2421	device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n",
2422		      prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios,
2423		      ae->ae_adapter.aa_memorysize);
2424    }
2425    free(ae, M_AMR);
2426}
2427
2428int
2429amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks)
2430{
2431    struct amr_command	*ac;
2432    int			error = EIO;
2433
2434    debug_called(1);
2435
2436    sc->amr_state |= AMR_STATE_INTEN;
2437
2438    /* get ourselves a command buffer */
2439    if ((ac = amr_alloccmd(sc)) == NULL)
2440	goto out;
2441    /* set command flags */
2442    ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
2443
2444    /* point the command at our data */
2445    ac->ac_data = data;
2446    ac->ac_length = blks * AMR_BLKSIZE;
2447
2448    /* build the command proper */
2449    ac->ac_mailbox.mb_command 	= AMR_CMD_LWRITE;
2450    ac->ac_mailbox.mb_blkcount	= blks;
2451    ac->ac_mailbox.mb_lba	= lba;
2452    ac->ac_mailbox.mb_drive	= unit;
2453
2454    /* can't assume that interrupts are going to work here, so play it safe */
2455    if (sc->amr_poll_command(ac))
2456	goto out;
2457    error = ac->ac_status;
2458
2459 out:
2460    if (ac != NULL)
2461	amr_releasecmd(ac);
2462
2463    sc->amr_state &= ~AMR_STATE_INTEN;
2464    return (error);
2465}
2466
2467
2468
2469#ifdef AMR_DEBUG
2470/********************************************************************************
2471 * Print the command (ac) in human-readable format
2472 */
2473#if 0
2474static void
2475amr_printcommand(struct amr_command *ac)
2476{
2477    struct amr_softc	*sc = ac->ac_sc;
2478    struct amr_sgentry	*sg;
2479    int			i;
2480
2481    device_printf(sc->amr_dev, "cmd %x  ident %d  drive %d\n",
2482		  ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive);
2483    device_printf(sc->amr_dev, "blkcount %d  lba %d\n",
2484		  ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba);
2485    device_printf(sc->amr_dev, "virtaddr %p  length %lu\n", ac->ac_data, (unsigned long)ac->ac_length);
2486    device_printf(sc->amr_dev, "sg physaddr %08x  nsg %d\n",
2487		  ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem);
2488    device_printf(sc->amr_dev, "ccb %p  bio %p\n", ac->ac_ccb_data, ac->ac_bio);
2489
2490    /* get base address of s/g table */
2491    sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2492    for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++)
2493	device_printf(sc->amr_dev, "  %x/%d\n", sg->sg_addr, sg->sg_count);
2494}
2495#endif
2496#endif
2497