Deleted Added
sdiff udiff text old ( 148499 ) new ( 148850 )
full compact
1/*-
2 * Copyright (c) 1999,2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * Copyright (c) 2005 Scott Long
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*-
29 * Copyright (c) 2002 Eric Moore
30 * Copyright (c) 2002, 2004 LSI Logic Corporation
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. The party using or redistributing the source code and binary forms
42 * agrees to the disclaimer below and the terms and conditions set forth
43 * herein.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58#include <sys/cdefs.h>
59__FBSDID("$FreeBSD: head/sys/dev/amr/amr.c 148499 2005-07-29 01:53:45Z ps $");
60
61/*
62 * Driver for the AMI MegaRaid family of controllers.
63 */
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/malloc.h>
68#include <sys/kernel.h>
69
70#include <dev/amr/amr_compat.h>
71#include <sys/bus.h>
72#include <sys/conf.h>
73#include <sys/stat.h>
74
75#include <machine/bus.h>
76#include <machine/resource.h>
77#include <sys/rman.h>
78
79#include <dev/pci/pcireg.h>
80#include <dev/pci/pcivar.h>
81
82#include <dev/amr/amrio.h>
83#include <dev/amr/amrreg.h>
84#include <dev/amr/amrvar.h>
85#define AMR_DEFINE_TABLES
86#include <dev/amr/amr_tables.h>
87
88static d_open_t amr_open;
89static d_close_t amr_close;
90static d_ioctl_t amr_ioctl;
91
92static struct cdevsw amr_cdevsw = {
93 .d_version = D_VERSION,
94 .d_flags = D_NEEDGIANT,
95 .d_open = amr_open,
96 .d_close = amr_close,
97 .d_ioctl = amr_ioctl,
98 .d_name = "amr",
99};
100
101/*
102 * Initialisation, bus interface.
103 */
104static void amr_startup(void *arg);
105
106/*
107 * Command wrappers
108 */
109static int amr_query_controller(struct amr_softc *sc);
110static void *amr_enquiry(struct amr_softc *sc, size_t bufsize,
111 u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual);
112static void amr_completeio(struct amr_command *ac);
113static int amr_support_ext_cdb(struct amr_softc *sc);
114
115/*
116 * Command buffer allocation.
117 */
118static void amr_alloccmd_cluster(struct amr_softc *sc);
119static void amr_freecmd_cluster(struct amr_command_cluster *acc);
120
121/*
122 * Command processing.
123 */
124static int amr_bio_command(struct amr_softc *sc, struct amr_command **acp);
125static int amr_wait_command(struct amr_command *ac) __unused;
126static int amr_getslot(struct amr_command *ac);
127static int amr_mapcmd(struct amr_command *ac);
128static void amr_unmapcmd(struct amr_command *ac);
129static int amr_start(struct amr_command *ac);
130static int amr_start1(struct amr_softc *sc, struct amr_command *ac);
131static void amr_complete(void *context, int pending);
132static void amr_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
133static void amr_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
134
135/*
136 * Status monitoring
137 */
138static void amr_periodic(void *data);
139
140/*
141 * Interface-specific shims
142 */
143static int amr_quartz_submit_command(struct amr_softc *sc);
144static int amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
145static int amr_quartz_poll_command(struct amr_command *ac);
146static int amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac);
147
148static int amr_std_submit_command(struct amr_softc *sc);
149static int amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
150static int amr_std_poll_command(struct amr_command *ac);
151static void amr_std_attach_mailbox(struct amr_softc *sc);
152
153#ifdef AMR_BOARD_INIT
154static int amr_quartz_init(struct amr_softc *sc);
155static int amr_std_init(struct amr_softc *sc);
156#endif
157
158/*
159 * Debugging
160 */
161static void amr_describe_controller(struct amr_softc *sc);
162#ifdef AMR_DEBUG
163#if 0
164static void amr_printcommand(struct amr_command *ac);
165#endif
166#endif
167
168/********************************************************************************
169 ********************************************************************************
170 Inline Glue
171 ********************************************************************************
172 ********************************************************************************/
173
174/********************************************************************************
175 ********************************************************************************
176 Public Interfaces
177 ********************************************************************************
178 ********************************************************************************/
179
180/********************************************************************************
181 * Initialise the controller and softc.
182 */
183int
184amr_attach(struct amr_softc *sc)
185{
186
187 debug_called(1);
188
189 /*
190 * Initialise per-controller queues.
191 */
192 TAILQ_INIT(&sc->amr_completed);
193 TAILQ_INIT(&sc->amr_freecmds);
194 TAILQ_INIT(&sc->amr_cmd_clusters);
195 TAILQ_INIT(&sc->amr_ready);
196 bioq_init(&sc->amr_bioq);
197
198 debug(2, "queue init done");
199
200 /*
201 * Configure for this controller type.
202 */
203 if (AMR_IS_QUARTZ(sc)) {
204 sc->amr_submit_command = amr_quartz_submit_command;
205 sc->amr_get_work = amr_quartz_get_work;
206 sc->amr_poll_command = amr_quartz_poll_command;
207 sc->amr_poll_command1 = amr_quartz_poll_command1;
208 } else {
209 sc->amr_submit_command = amr_std_submit_command;
210 sc->amr_get_work = amr_std_get_work;
211 sc->amr_poll_command = amr_std_poll_command;
212 amr_std_attach_mailbox(sc);;
213 }
214
215#ifdef AMR_BOARD_INIT
216 if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc))))
217 return(ENXIO);
218#endif
219
220 /*
221 * Quiz controller for features and limits.
222 */
223 if (amr_query_controller(sc))
224 return(ENXIO);
225
226 debug(2, "controller query complete");
227
228 /*
229 * Attach our 'real' SCSI channels to CAM.
230 */
231 if (amr_cam_attach(sc))
232 return(ENXIO);
233 debug(2, "CAM attach done");
234
235 /*
236 * Create the control device.
237 */
238 sc->amr_dev_t = make_dev(&amr_cdevsw, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR,
239 S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev));
240 sc->amr_dev_t->si_drv1 = sc;
241
242 /*
243 * Schedule ourselves to bring the controller up once interrupts are
244 * available.
245 */
246 bzero(&sc->amr_ich, sizeof(struct intr_config_hook));
247 sc->amr_ich.ich_func = amr_startup;
248 sc->amr_ich.ich_arg = sc;
249 if (config_intrhook_establish(&sc->amr_ich) != 0) {
250 device_printf(sc->amr_dev, "can't establish configuration hook\n");
251 return(ENOMEM);
252 }
253
254 /*
255 * Print a little information about the controller.
256 */
257 amr_describe_controller(sc);
258
259 debug(2, "attach complete");
260 return(0);
261}
262
263/********************************************************************************
264 * Locate disk resources and attach children to them.
265 */
266static void
267amr_startup(void *arg)
268{
269 struct amr_softc *sc = (struct amr_softc *)arg;
270 struct amr_logdrive *dr;
271 int i, error;
272
273 debug_called(1);
274
275 /* pull ourselves off the intrhook chain */
276 config_intrhook_disestablish(&sc->amr_ich);
277
278 /* get up-to-date drive information */
279 if (amr_query_controller(sc)) {
280 device_printf(sc->amr_dev, "can't scan controller for drives\n");
281 return;
282 }
283
284 /* iterate over available drives */
285 for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) {
286 /* are we already attached to this drive? */
287 if (dr->al_disk == 0) {
288 /* generate geometry information */
289 if (dr->al_size > 0x200000) { /* extended translation? */
290 dr->al_heads = 255;
291 dr->al_sectors = 63;
292 } else {
293 dr->al_heads = 64;
294 dr->al_sectors = 32;
295 }
296 dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors);
297
298 dr->al_disk = device_add_child(sc->amr_dev, NULL, -1);
299 if (dr->al_disk == 0)
300 device_printf(sc->amr_dev, "device_add_child failed\n");
301 device_set_ivars(dr->al_disk, dr);
302 }
303 }
304
305 if ((error = bus_generic_attach(sc->amr_dev)) != 0)
306 device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error);
307
308 /* mark controller back up */
309 sc->amr_state &= ~AMR_STATE_SHUTDOWN;
310
311 /* interrupts will be enabled before we do anything more */
312 sc->amr_state |= AMR_STATE_INTEN;
313
314 /*
315 * Start the timeout routine.
316 */
317/* sc->amr_timeout = timeout(amr_periodic, sc, hz);*/
318
319 return;
320}
321
322/*******************************************************************************
323 * Free resources associated with a controller instance
324 */
325void
326amr_free(struct amr_softc *sc)
327{
328 struct amr_command_cluster *acc;
329
330 /* detach from CAM */
331 amr_cam_detach(sc);
332
333 /* cancel status timeout */
334 untimeout(amr_periodic, sc, sc->amr_timeout);
335
336 /* throw away any command buffers */
337 while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) {
338 TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link);
339 amr_freecmd_cluster(acc);
340 }
341
342 /* destroy control device */
343 if( sc->amr_dev_t != (struct cdev *)NULL)
344 destroy_dev(sc->amr_dev_t);
345
346 if (mtx_initialized(&sc->amr_io_lock))
347 mtx_destroy(&sc->amr_io_lock);
348}
349
350/*******************************************************************************
351 * Receive a bio structure from a child device and queue it on a particular
352 * disk resource, then poke the disk resource to start as much work as it can.
353 */
354int
355amr_submit_bio(struct amr_softc *sc, struct bio *bio)
356{
357 debug_called(2);
358
359 mtx_lock(&sc->amr_io_lock);
360 amr_enqueue_bio(sc, bio);
361 amr_startio(sc);
362 mtx_unlock(&sc->amr_io_lock);
363 return(0);
364}
365
366/********************************************************************************
367 * Accept an open operation on the control device.
368 */
369static int
370amr_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
371{
372 int unit = minor(dev);
373 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
374
375 debug_called(1);
376
377 sc->amr_state |= AMR_STATE_OPEN;
378 return(0);
379}
380
381/********************************************************************************
382 * Accept the last close on the control device.
383 */
384static int
385amr_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
386{
387 int unit = minor(dev);
388 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
389
390 debug_called(1);
391
392 sc->amr_state &= ~AMR_STATE_OPEN;
393 return (0);
394}
395
396/********************************************************************************
397 * Handle controller-specific control operations.
398 */
399static int
400amr_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, d_thread_t *td)
401{
402 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
403 union {
404 void *_p;
405 struct amr_user_ioctl *au;
406#ifdef AMR_IO_COMMAND32
407 struct amr_user_ioctl32 *au32;
408#endif
409 int *result;
410 } arg;
411 struct amr_command *ac;
412 struct amr_mailbox_ioctl *mbi;
413 void *dp, *au_buffer;
414 unsigned long au_length;
415 unsigned char *au_cmd;
416 int *au_statusp, au_direction;
417 int error;
418 struct amr_passthrough *ap; /* 60 bytes */
419
420 debug_called(1);
421
422 arg._p = (void *)addr;
423
424 switch(cmd) {
425
426 case AMR_IO_VERSION:
427 debug(1, "AMR_IO_VERSION");
428 *arg.result = AMR_IO_VERSION_NUMBER;
429 return(0);
430
431#ifdef AMR_IO_COMMAND32
432 /*
433 * Accept ioctl-s from 32-bit binaries on non-32-bit
434 * platforms, such as AMD. LSI's MEGAMGR utility is
435 * the only example known today... -mi
436 */
437 case AMR_IO_COMMAND32:
438 debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]);
439 au_cmd = arg.au32->au_cmd;
440 au_buffer = (void *)(u_int64_t)arg.au32->au_buffer;
441 au_length = arg.au32->au_length;
442 au_direction = arg.au32->au_direction;
443 au_statusp = &arg.au32->au_status;
444 break;
445#endif
446
447 case AMR_IO_COMMAND:
448 debug(1, "AMR_IO_COMMAND 0x%x", arg.au->au_cmd[0]);
449 au_cmd = arg.au->au_cmd;
450 au_buffer = (void *)arg.au->au_buffer;
451 au_length = arg.au->au_length;
452 au_direction = arg.au->au_direction;
453 au_statusp = &arg.au->au_status;
454 break;
455
456 default:
457 debug(1, "unknown ioctl 0x%lx", cmd);
458 return(ENOIOCTL);
459 }
460
461 error = 0;
462 dp = NULL;
463 ac = NULL;
464 ap = NULL;
465
466 /* Logical Drive not supported by the driver */
467 if (au_cmd[0] == 0xa4 && au_cmd[1] == 0x1c)
468 return (ENOIOCTL);
469
470 /* handle inbound data buffer */
471 if (au_length != 0 && au_cmd[0] != 0x06) {
472 dp = malloc(au_length, M_DEVBUF, M_WAITOK|M_ZERO);
473
474 if ((error = copyin(au_buffer, dp, au_length)) != 0) {
475 free(dp, M_DEVBUF);
476 return (error);
477 }
478 debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp);
479 }
480
481 /* Allocate this now before the mutex gets held */
482 if (au_cmd[0] == AMR_CMD_PASS)
483 ap = malloc(sizeof(struct amr_passthrough), M_DEVBUF, M_WAITOK|M_ZERO);
484
485 mtx_lock(&sc->amr_io_lock);
486 if ((ac = amr_alloccmd(sc)) == NULL) {
487 error = ENOMEM;
488 goto out;
489 }
490
491 /* handle SCSI passthrough command */
492 if (au_cmd[0] == AMR_CMD_PASS) {
493 int len;
494
495 /* copy cdb */
496 len = au_cmd[2];
497 ap->ap_cdb_length = len;
498 bcopy(au_cmd + 3, ap->ap_cdb, len);
499
500 /* build passthrough */
501 ap->ap_timeout = au_cmd[len + 3] & 0x07;
502 ap->ap_ars = (au_cmd[len + 3] & 0x08) ? 1 : 0;
503 ap->ap_islogical = (au_cmd[len + 3] & 0x80) ? 1 : 0;
504 ap->ap_logical_drive_no = au_cmd[len + 4];
505 ap->ap_channel = au_cmd[len + 5];
506 ap->ap_scsi_id = au_cmd[len + 6];
507 ap->ap_request_sense_length = 14;
508 ap->ap_data_transfer_length = au_length;
509 /* XXX what about the request-sense area? does the caller want it? */
510
511 /* build command */
512 ac->ac_data = ap;
513 ac->ac_length = sizeof(struct amr_passthrough);
514 ac->ac_flags |= AMR_CMD_DATAOUT;
515 ac->ac_ccb_data = dp;
516 ac->ac_ccb_length = au_length;
517 if (au_direction & AMR_IO_READ)
518 ac->ac_flags |= AMR_CMD_CCB_DATAIN;
519 if (au_direction & AMR_IO_WRITE)
520 ac->ac_flags |= AMR_CMD_CCB_DATAOUT;
521
522 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
523
524 } else {
525 /* direct command to controller */
526 mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox;
527
528 /* copy pertinent mailbox items */
529 mbi->mb_command = au_cmd[0];
530 mbi->mb_channel = au_cmd[1];
531 mbi->mb_param = au_cmd[2];
532 mbi->mb_pad[0] = au_cmd[3];
533 mbi->mb_drive = au_cmd[4];
534
535 /* build the command */
536 ac->ac_data = dp;
537 ac->ac_length = au_length;
538 if (au_direction & AMR_IO_READ)
539 ac->ac_flags |= AMR_CMD_DATAIN;
540 if (au_direction & AMR_IO_WRITE)
541 ac->ac_flags |= AMR_CMD_DATAOUT;
542 }
543
544 /* run the command */
545 if ((error = amr_wait_command(ac)) != 0)
546 goto out;
547
548 /* copy out data and set status */
549 if (au_length != 0) {
550 mtx_unlock(&sc->amr_io_lock);
551 error = copyout(dp, au_buffer, au_length);
552 mtx_lock(&sc->amr_io_lock);
553 }
554 debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer);
555 if (dp != NULL)
556 debug(2, "%16d", (int)dp);
557 *au_statusp = ac->ac_status;
558
559out:
560 /*
561 * At this point, we know that there is a lock held and that these
562 * objects have been allocated.
563 */
564 if (ac != NULL)
565 amr_releasecmd(ac);
566 mtx_unlock(&sc->amr_io_lock);
567 if (dp != NULL)
568 free(dp, M_DEVBUF);
569 if (ap != NULL)
570 free(ap, M_DEVBUF);
571 return(error);
572}
573
574/********************************************************************************
575 ********************************************************************************
576 Status Monitoring
577 ********************************************************************************
578 ********************************************************************************/
579
580/********************************************************************************
581 * Perform a periodic check of the controller status
582 */
583static void
584amr_periodic(void *data)
585{
586 struct amr_softc *sc = (struct amr_softc *)data;
587
588 debug_called(2);
589
590 /* XXX perform periodic status checks here */
591
592 /* compensate for missed interrupts */
593 amr_done(sc);
594
595 /* reschedule */
596 sc->amr_timeout = timeout(amr_periodic, sc, hz);
597}
598
599/********************************************************************************
600 ********************************************************************************
601 Command Wrappers
602 ********************************************************************************
603 ********************************************************************************/
604
605/********************************************************************************
606 * Interrogate the controller for the operational parameters we require.
607 */
608static int
609amr_query_controller(struct amr_softc *sc)
610{
611 struct amr_enquiry3 *aex;
612 struct amr_prodinfo *ap;
613 struct amr_enquiry *ae;
614 int ldrv;
615
616 mtx_lock(&sc->amr_io_lock);
617
618 /*
619 * If we haven't found the real limit yet, let us have a couple of commands in
620 * order to be able to probe.
621 */
622 if (sc->amr_maxio == 0)
623 sc->amr_maxio = 2;
624
625 /*
626 * Greater than 10 byte cdb support
627 */
628 sc->support_ext_cdb = amr_support_ext_cdb(sc);
629
630 if(sc->support_ext_cdb) {
631 debug(2,"supports extended CDBs.");
632 }
633
634 /*
635 * Try to issue an ENQUIRY3 command
636 */
637 if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
638 AMR_CONFIG_ENQ3_SOLICITED_FULL)) != NULL) {
639
640 /*
641 * Fetch current state of logical drives.
642 */
643 for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
644 sc->amr_drive[ldrv].al_size = aex->ae_drivesize[ldrv];
645 sc->amr_drive[ldrv].al_state = aex->ae_drivestate[ldrv];
646 sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv];
647 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
648 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
649 }
650 free(aex, M_DEVBUF);
651
652 /*
653 * Get product info for channel count.
654 */
655 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0)) == NULL) {
656 device_printf(sc->amr_dev, "can't obtain product data from controller\n");
657 mtx_unlock(&sc->amr_io_lock);
658 return(1);
659 }
660 sc->amr_maxdrives = 40;
661 sc->amr_maxchan = ap->ap_nschan;
662 sc->amr_maxio = ap->ap_maxio;
663 sc->amr_type |= AMR_TYPE_40LD;
664 free(ap, M_DEVBUF);
665
666 } else {
667
668 /* failed, try the 8LD ENQUIRY commands */
669 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0)) == NULL) {
670 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0)) == NULL) {
671 device_printf(sc->amr_dev, "can't obtain configuration data from controller\n");
672 mtx_unlock(&sc->amr_io_lock);
673 return(1);
674 }
675 ae->ae_signature = 0;
676 }
677
678 /*
679 * Fetch current state of logical drives.
680 */
681 for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
682 sc->amr_drive[ldrv].al_size = ae->ae_ldrv.al_size[ldrv];
683 sc->amr_drive[ldrv].al_state = ae->ae_ldrv.al_state[ldrv];
684 sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv];
685 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
686 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
687 }
688
689 sc->amr_maxdrives = 8;
690 sc->amr_maxchan = ae->ae_adapter.aa_channels;
691 sc->amr_maxio = ae->ae_adapter.aa_maxio;
692 free(ae, M_DEVBUF);
693 }
694
695 /*
696 * Mark remaining drives as unused.
697 */
698 for (; ldrv < AMR_MAXLD; ldrv++)
699 sc->amr_drive[ldrv].al_size = 0xffffffff;
700
701 /*
702 * Cap the maximum number of outstanding I/Os. AMI's Linux driver doesn't trust
703 * the controller's reported value, and lockups have been seen when we do.
704 */
705 sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD);
706
707 mtx_unlock(&sc->amr_io_lock);
708 return(0);
709}
710
711/********************************************************************************
712 * Run a generic enquiry-style command.
713 */
714static void *
715amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual)
716{
717 struct amr_command *ac;
718 void *result;
719 u_int8_t *mbox;
720 int error;
721
722 debug_called(1);
723
724 error = 1;
725 result = NULL;
726
727 /* get ourselves a command buffer */
728 if ((ac = amr_alloccmd(sc)) == NULL)
729 goto out;
730 /* allocate the response structure */
731 if ((result = malloc(bufsize, M_DEVBUF, M_ZERO|M_NOWAIT)) == NULL)
732 goto out;
733 /* set command flags */
734
735 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN;
736
737 /* point the command at our data */
738 ac->ac_data = result;
739 ac->ac_length = bufsize;
740
741 /* build the command proper */
742 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
743 mbox[0] = cmd;
744 mbox[2] = cmdsub;
745 mbox[3] = cmdqual;
746
747 /* can't assume that interrupts are going to work here, so play it safe */
748 if (sc->amr_poll_command(ac))
749 goto out;
750 error = ac->ac_status;
751
752 out:
753 if (ac != NULL)
754 amr_releasecmd(ac);
755 if ((error != 0) && (result != NULL)) {
756 free(result, M_DEVBUF);
757 result = NULL;
758 }
759 return(result);
760}
761
762/********************************************************************************
763 * Flush the controller's internal cache, return status.
764 */
765int
766amr_flush(struct amr_softc *sc)
767{
768 struct amr_command *ac;
769 int error;
770
771 /* get ourselves a command buffer */
772 error = 1;
773 mtx_lock(&sc->amr_io_lock);
774 if ((ac = amr_alloccmd(sc)) == NULL)
775 goto out;
776 /* set command flags */
777 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
778
779 /* build the command proper */
780 ac->ac_mailbox.mb_command = AMR_CMD_FLUSH;
781
782 /* we have to poll, as the system may be going down or otherwise damaged */
783 if (sc->amr_poll_command(ac))
784 goto out;
785 error = ac->ac_status;
786
787 out:
788 if (ac != NULL)
789 amr_releasecmd(ac);
790 mtx_unlock(&sc->amr_io_lock);
791 return(error);
792}
793
794/********************************************************************************
795 * Detect extented cdb >> greater than 10 byte cdb support
796 * returns '1' means this support exist
797 * returns '0' means this support doesn't exist
798 */
799static int
800amr_support_ext_cdb(struct amr_softc *sc)
801{
802 struct amr_command *ac;
803 u_int8_t *mbox;
804 int error;
805
806 /* get ourselves a command buffer */
807 error = 0;
808 if ((ac = amr_alloccmd(sc)) == NULL)
809 goto out;
810 /* set command flags */
811 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
812
813 /* build the command proper */
814 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
815 mbox[0] = 0xA4;
816 mbox[2] = 0x16;
817
818
819 /* we have to poll, as the system may be going down or otherwise damaged */
820 if (sc->amr_poll_command(ac))
821 goto out;
822 if( ac->ac_status == AMR_STATUS_SUCCESS ) {
823 error = 1;
824 }
825
826out:
827 if (ac != NULL)
828 amr_releasecmd(ac);
829 return(error);
830}
831
832/********************************************************************************
833 * Try to find I/O work for the controller from one or more of the work queues.
834 *
835 * We make the assumption that if the controller is not ready to take a command
836 * at some given time, it will generate an interrupt at some later time when
837 * it is.
838 */
839void
840amr_startio(struct amr_softc *sc)
841{
842 struct amr_command *ac;
843
844 /* spin until something prevents us from doing any work */
845 for (;;) {
846
847 /* Don't bother to queue commands no bounce buffers are available. */
848 if (sc->amr_state & AMR_STATE_QUEUE_FRZN)
849 break;
850
851 /* try to get a ready command */
852 ac = amr_dequeue_ready(sc);
853
854 /* if that failed, build a command from a bio */
855 if (ac == NULL)
856 (void)amr_bio_command(sc, &ac);
857
858 /* if that failed, build a command from a ccb */
859 if (ac == NULL)
860 (void)amr_cam_command(sc, &ac);
861
862 /* if we don't have anything to do, give up */
863 if (ac == NULL)
864 break;
865
866 /* try to give the command to the controller; if this fails save it for later and give up */
867 if (amr_start(ac)) {
868 debug(2, "controller busy, command deferred");
869 amr_requeue_ready(ac); /* XXX schedule retry very soon? */
870 break;
871 }
872 }
873}
874
875/********************************************************************************
876 * Handle completion of an I/O command.
877 */
878static void
879amr_completeio(struct amr_command *ac)
880{
881 struct amrd_softc *sc = ac->ac_bio->bio_disk->d_drv1;
882
883 if (ac->ac_status != AMR_STATUS_SUCCESS) { /* could be more verbose here? */
884 ac->ac_bio->bio_error = EIO;
885 ac->ac_bio->bio_flags |= BIO_ERROR;
886
887 device_printf(sc->amrd_dev, "I/O error - 0x%x\n", ac->ac_status);
888/* amr_printcommand(ac);*/
889 }
890 amrd_intr(ac->ac_bio);
891 amr_releasecmd(ac);
892}
893
894/********************************************************************************
895 ********************************************************************************
896 Command Processing
897 ********************************************************************************
898 ********************************************************************************/
899
900/********************************************************************************
901 * Convert a bio off the top of the bio queue into a command.
902 */
903static int
904amr_bio_command(struct amr_softc *sc, struct amr_command **acp)
905{
906 struct amr_command *ac;
907 struct amrd_softc *amrd;
908 struct bio *bio;
909 int error;
910 int blkcount;
911 int driveno;
912 int cmd;
913
914 ac = NULL;
915 error = 0;
916
917 /* get a command */
918 if ((ac = amr_alloccmd(sc)) == NULL)
919 return (ENOMEM);
920
921 /* get a bio to work on */
922 if ((bio = amr_dequeue_bio(sc)) == NULL) {
923 amr_releasecmd(ac);
924 return (0);
925 }
926
927 /* connect the bio to the command */
928 ac->ac_complete = amr_completeio;
929 ac->ac_bio = bio;
930 ac->ac_data = bio->bio_data;
931 ac->ac_length = bio->bio_bcount;
932 if (BIO_IS_READ(bio)) {
933 ac->ac_flags |= AMR_CMD_DATAIN;
934 cmd = AMR_CMD_LREAD;
935 } else {
936 ac->ac_flags |= AMR_CMD_DATAOUT;
937 cmd = AMR_CMD_LWRITE;
938 }
939 amrd = (struct amrd_softc *)bio->bio_disk->d_drv1;
940 driveno = amrd->amrd_drive - sc->amr_drive;
941 blkcount = (bio->bio_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE;
942
943 ac->ac_mailbox.mb_command = cmd;
944 ac->ac_mailbox.mb_blkcount = blkcount;
945 ac->ac_mailbox.mb_lba = bio->bio_pblkno;
946 ac->ac_mailbox.mb_drive = driveno;
947 /* we fill in the s/g related data when the command is mapped */
948
949 if ((bio->bio_pblkno + blkcount) > sc->amr_drive[driveno].al_size)
950 device_printf(sc->amr_dev, "I/O beyond end of unit (%lld,%d > %lu)\n",
951 (long long)bio->bio_pblkno, blkcount,
952 (u_long)sc->amr_drive[driveno].al_size);
953
954 *acp = ac;
955 return(error);
956}
957
958/********************************************************************************
959 * Take a command, submit it to the controller and sleep until it completes
960 * or fails. Interrupts must be enabled, returns nonzero on error.
961 */
962static int
963amr_wait_command(struct amr_command *ac)
964{
965 int error = 0;
966
967 debug_called(1);
968
969 ac->ac_complete = NULL;
970 ac->ac_flags |= AMR_CMD_SLEEP;
971 if ((error = amr_start(ac)) != 0)
972 return(error);
973
974 while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) {
975 error = msleep(ac, &ac->ac_sc->amr_io_lock, PRIBIO, "amrwcmd", 0);
976 }
977 return(error);
978}
979
980/********************************************************************************
981 * Take a command, submit it to the controller and busy-wait for it to return.
982 * Returns nonzero on error. Can be safely called with interrupts enabled.
983 */
984static int
985amr_std_poll_command(struct amr_command *ac)
986{
987 struct amr_softc *sc = ac->ac_sc;
988 int error, count;
989
990 debug_called(2);
991
992 ac->ac_complete = NULL;
993 if ((error = amr_start(ac)) != 0)
994 return(error);
995
996 count = 0;
997 do {
998 /*
999 * Poll for completion, although the interrupt handler may beat us to it.
1000 * Note that the timeout here is somewhat arbitrary.
1001 */
1002 amr_done(sc);
1003 DELAY(1000);
1004 } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000));
1005 if (!(ac->ac_flags & AMR_CMD_BUSY)) {
1006 error = 0;
1007 } else {
1008 /* XXX the slot is now marked permanently busy */
1009 error = EIO;
1010 device_printf(sc->amr_dev, "polled command timeout\n");
1011 }
1012 return(error);
1013}
1014
1015static void
1016amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1017{
1018 struct amr_command *ac = arg;
1019 struct amr_softc *sc = ac->ac_sc;
1020
1021 amr_setup_dmamap(arg, segs, nsegs, err);
1022 if (ac->ac_flags & AMR_CMD_DATAIN) {
1023 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,
1024 BUS_DMASYNC_PREREAD);
1025 }
1026 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1027 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,
1028 BUS_DMASYNC_PREWRITE);
1029 }
1030 sc->amr_poll_command1(sc, ac);
1031}
1032
1033/********************************************************************************
1034 * Take a command, submit it to the controller and busy-wait for it to return.
1035 * Returns nonzero on error. Can be safely called with interrupts enabled.
1036 */
1037static int
1038amr_quartz_poll_command(struct amr_command *ac)
1039{
1040 struct amr_softc *sc = ac->ac_sc;
1041 int s, error;
1042
1043 debug_called(2);
1044
1045 s = splbio();
1046 error = 0;
1047
1048 /* now we have a slot, we can map the command (unmapped in amr_complete) */
1049 if (ac->ac_data != 0) {
1050 if (bus_dmamap_load(sc->amr_buffer_dmat, ac->ac_dmamap, ac->ac_data,
1051 ac->ac_length, amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) {
1052 error = 1;
1053 }
1054 } else {
1055 error = amr_quartz_poll_command1(sc, ac);
1056 }
1057
1058 splx(s);
1059 return (error);
1060}
1061
1062static int
1063amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac)
1064{
1065 int count, error;
1066
1067 if ((sc->amr_state & AMR_STATE_INTEN) == 0) {
1068 count=0;
1069 while (sc->amr_busyslots) {
1070 msleep(sc, &sc->amr_io_lock, PRIBIO | PCATCH, "amrpoll", hz);
1071 if(count++>10) {
1072 break;
1073 }
1074 }
1075
1076 if(sc->amr_busyslots) {
1077 device_printf(sc->amr_dev, "adapter is busy\n");
1078 if (ac->ac_data != NULL)
1079 bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_dmamap);
1080 ac->ac_status=0;
1081 return(1);
1082 }
1083 }
1084
1085 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE);
1086
1087 /* clear the poll/ack fields in the mailbox */
1088 sc->amr_mailbox->mb_ident = 0xFE;
1089 sc->amr_mailbox->mb_nstatus = 0xFF;
1090 sc->amr_mailbox->mb_status = 0xFF;
1091 sc->amr_mailbox->mb_poll = 0;
1092 sc->amr_mailbox->mb_ack = 0;
1093 sc->amr_mailbox->mb_busy = 1;
1094
1095 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
1096
1097 while(sc->amr_mailbox->mb_nstatus == 0xFF);
1098 while(sc->amr_mailbox->mb_status == 0xFF);
1099 ac->ac_status=sc->amr_mailbox->mb_status;
1100 error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0;
1101 while(sc->amr_mailbox->mb_poll != 0x77);
1102 sc->amr_mailbox->mb_poll = 0;
1103 sc->amr_mailbox->mb_ack = 0x77;
1104
1105 /* acknowledge that we have the commands */
1106 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK);
1107 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK);
1108
1109 /* unmap the command's data buffer */
1110 if (ac->ac_flags & AMR_CMD_DATAIN) {
1111 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,
1112 BUS_DMASYNC_POSTREAD);
1113 }
1114 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1115 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,
1116 BUS_DMASYNC_POSTWRITE);
1117 }
1118 bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_dmamap);
1119
1120 return(error);
1121}
1122
1123/********************************************************************************
1124 * Get a free command slot for a command if it doesn't already have one.
1125 *
1126 * May be safely called multiple times for a given command.
1127 */
1128static int
1129amr_getslot(struct amr_command *ac)
1130{
1131 struct amr_softc *sc = ac->ac_sc;
1132 int slot;
1133
1134 debug_called(3);
1135
1136 slot = ac->ac_slot;
1137 if (sc->amr_busycmd[slot] != NULL)
1138 panic("amr: slot %d busy?\n", slot);
1139
1140 sc->amr_busycmd[slot] = ac;
1141 sc->amr_busyslots++;
1142
1143 return (0);
1144}
1145
1146/********************************************************************************
1147 * Map/unmap (ac)'s data in the controller's addressable space as required.
1148 *
1149 * These functions may be safely called multiple times on a given command.
1150 */
1151static void
1152amr_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1153{
1154 struct amr_command *ac = (struct amr_command *)arg;
1155 struct amr_softc *sc = ac->ac_sc;
1156 struct amr_sgentry *sg;
1157 int i;
1158 u_int8_t *sgc;
1159
1160 debug_called(3);
1161
1162 /* get base address of s/g table */
1163 sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
1164
1165 /* save data physical address */
1166 ac->ac_dataphys = segs[0].ds_addr;
1167
1168 /* for AMR_CMD_CONFIG the s/g count goes elsewhere */
1169 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG) {
1170 sgc = &(((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param);
1171 } else {
1172 sgc = &ac->ac_mailbox.mb_nsgelem;
1173 }
1174
1175 /* decide whether we need to populate the s/g table */
1176 if (nsegments < 2) {
1177 *sgc = 0;
1178 ac->ac_mailbox.mb_nsgelem = 0;
1179 ac->ac_mailbox.mb_physaddr = ac->ac_dataphys;
1180 } else {
1181 ac->ac_mailbox.mb_nsgelem = nsegments;
1182 *sgc = nsegments;
1183 ac->ac_mailbox.mb_physaddr = sc->amr_sgbusaddr +
1184 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry));
1185 for (i = 0; i < nsegments; i++, sg++) {
1186 sg->sg_addr = segs[i].ds_addr;
1187 sg->sg_count = segs[i].ds_len;
1188 }
1189 }
1190
1191}
1192
1193static void
1194amr_setup_ccbmap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1195{
1196 struct amr_command *ac = (struct amr_command *)arg;
1197 struct amr_softc *sc = ac->ac_sc;
1198 struct amr_sgentry *sg;
1199 struct amr_passthrough *ap = (struct amr_passthrough *)ac->ac_data;
1200 struct amr_ext_passthrough *aep = (struct amr_ext_passthrough *)ac->ac_data;
1201 int i;
1202
1203 /* get base address of s/g table */
1204 sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
1205
1206 /* decide whether we need to populate the s/g table */
1207 if( ac->ac_mailbox.mb_command == AMR_CMD_EXTPASS ) {
1208 if (nsegments < 2) {
1209 aep->ap_no_sg_elements = 0;
1210 aep->ap_data_transfer_address = segs[0].ds_addr;
1211 } else {
1212 /* save s/g table information in passthrough */
1213 aep->ap_no_sg_elements = nsegments;
1214 aep->ap_data_transfer_address = sc->amr_sgbusaddr +
1215 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry));
1216 /*
1217 * populate s/g table (overwrites previous call which mapped the
1218 * passthrough)
1219 */
1220 for (i = 0; i < nsegments; i++, sg++) {
1221 sg->sg_addr = segs[i].ds_addr;
1222 sg->sg_count = segs[i].ds_len;
1223 debug(3, " %d: 0x%x/%d", i, sg->sg_addr, sg->sg_count);
1224 }
1225 }
1226 debug(3, "slot %d %d segments at 0x%x, passthrough at 0x%x\n",
1227 ac->ac_slot, aep->ap_no_sg_elements, aep->ap_data_transfer_address,
1228 ac->ac_dataphys);
1229 } else {
1230 if (nsegments < 2) {
1231 ap->ap_no_sg_elements = 0;
1232 ap->ap_data_transfer_address = segs[0].ds_addr;
1233 } else {
1234 /* save s/g table information in passthrough */
1235 ap->ap_no_sg_elements = nsegments;
1236 ap->ap_data_transfer_address = sc->amr_sgbusaddr +
1237 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry));
1238 /*
1239 * populate s/g table (overwrites previous call which mapped the
1240 * passthrough)
1241 */
1242 for (i = 0; i < nsegments; i++, sg++) {
1243 sg->sg_addr = segs[i].ds_addr;
1244 sg->sg_count = segs[i].ds_len;
1245 debug(3, " %d: 0x%x/%d", i, sg->sg_addr, sg->sg_count);
1246 }
1247 }
1248 debug(3, "slot %d %d segments at 0x%x, passthrough at 0x%x",
1249 ac->ac_slot, ap->ap_no_sg_elements, ap->ap_data_transfer_address,
1250 ac->ac_dataphys);
1251 }
1252 if (ac->ac_flags & AMR_CMD_CCB_DATAIN)
1253 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap,
1254 BUS_DMASYNC_PREREAD);
1255 if (ac->ac_flags & AMR_CMD_CCB_DATAOUT)
1256 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap,
1257 BUS_DMASYNC_PREWRITE);
1258 if ((ac->ac_flags & (AMR_CMD_CCB_DATAIN | AMR_CMD_CCB_DATAOUT)) == 0)
1259 panic("no direction for ccb?\n");
1260
1261 if (ac->ac_flags & AMR_CMD_DATAIN)
1262 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,BUS_DMASYNC_PREREAD);
1263 if (ac->ac_flags & AMR_CMD_DATAOUT)
1264 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,BUS_DMASYNC_PREWRITE);
1265
1266 ac->ac_flags |= AMR_CMD_MAPPED;
1267
1268 amr_start1(sc, ac);
1269}
1270
1271static int
1272amr_mapcmd(struct amr_command *ac)
1273{
1274 struct amr_softc *sc = ac->ac_sc;
1275
1276 debug_called(3);
1277
1278 /* if the command involves data at all, and hasn't been mapped */
1279 if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) {
1280 if (ac->ac_ccb_data == NULL) {
1281 /* map the data buffers into bus space and build the s/g list */
1282 if (bus_dmamap_load(sc->amr_buffer_dmat, ac->ac_dmamap, ac->ac_data,
1283 ac->ac_length, amr_setup_data_dmamap, ac, 0) == EINPROGRESS) {
1284 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1285 }
1286 } else {
1287 if (bus_dmamap_load(sc->amr_buffer_dmat, ac->ac_dmamap, ac->ac_data,
1288 ac->ac_length, amr_setup_dmamap, ac, BUS_DMA_NOWAIT) != 0){
1289 return (ENOMEM);
1290 }
1291 if (bus_dmamap_load(sc->amr_buffer_dmat, ac->ac_ccb_dmamap,
1292 ac->ac_ccb_data, ac->ac_ccb_length, amr_setup_ccbmap, ac,
1293 0) == EINPROGRESS) {
1294 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1295 }
1296 }
1297 } else if ((ac->ac_flags & AMR_CMD_MAPPED) == 0) {
1298 amr_start1(sc, ac);
1299 }
1300
1301 return (0);
1302}
1303
1304static void
1305amr_unmapcmd(struct amr_command *ac)
1306{
1307 struct amr_softc *sc = ac->ac_sc;
1308
1309 debug_called(3);
1310
1311 /* if the command involved data at all and was mapped */
1312 if (ac->ac_flags & AMR_CMD_MAPPED) {
1313
1314 if (ac->ac_data != NULL) {
1315 if (ac->ac_flags & AMR_CMD_DATAIN)
1316 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_dmamap,
1317 BUS_DMASYNC_POSTREAD);
1318 if (ac->ac_flags & AMR_CMD_DATAOUT)
1319 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_dmamap,
1320 BUS_DMASYNC_POSTWRITE);
1321 bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_dmamap);
1322 }
1323
1324 if (ac->ac_ccb_data != NULL) {
1325 if (ac->ac_flags & AMR_CMD_CCB_DATAIN)
1326 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap,
1327 BUS_DMASYNC_POSTREAD);
1328 if (ac->ac_flags & AMR_CMD_CCB_DATAOUT)
1329 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap,
1330 BUS_DMASYNC_POSTWRITE);
1331 bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_ccb_dmamap);
1332 }
1333 ac->ac_flags &= ~AMR_CMD_MAPPED;
1334 }
1335}
1336
1337static void
1338amr_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1339{
1340 struct amr_command *ac = arg;
1341 struct amr_softc *sc = ac->ac_sc;
1342
1343 amr_setup_dmamap(arg, segs, nsegs, err);
1344
1345 if (ac->ac_flags & AMR_CMD_DATAIN)
1346 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,BUS_DMASYNC_PREREAD);
1347 if (ac->ac_flags & AMR_CMD_DATAOUT)
1348 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,BUS_DMASYNC_PREWRITE);
1349 ac->ac_flags |= AMR_CMD_MAPPED;
1350
1351 amr_start1(sc, ac);
1352}
1353
1354/********************************************************************************
1355 * Take a command and give it to the controller, returns 0 if successful, or
1356 * EBUSY if the command should be retried later.
1357 */
1358static int
1359amr_start(struct amr_command *ac)
1360{
1361 struct amr_softc *sc;
1362 int error = 0;
1363
1364 debug_called(3);
1365
1366 /* mark command as busy so that polling consumer can tell */
1367 sc = ac->ac_sc;
1368 ac->ac_flags |= AMR_CMD_BUSY;
1369
1370 /* get a command slot (freed in amr_done) */
1371 if (amr_getslot(ac)) {
1372 return(EBUSY);
1373 }
1374
1375 /* Now we have a slot, we can map the command (unmapped in amr_complete). */
1376 if ((error = amr_mapcmd(ac)) == ENOMEM) {
1377 /*
1378 * Memroy resources are short, so free the slot and let this be tried
1379 * later.
1380 */
1381 sc->amr_busycmd[ac->ac_slot] = NULL;
1382 sc->amr_busyslots--;
1383 }
1384
1385 return (error);
1386}
1387
1388
1389static int
1390amr_start1(struct amr_softc *sc, struct amr_command *ac)
1391{
1392 int done, s, i;
1393
1394 /* mark the new mailbox we are going to copy in as busy */
1395 ac->ac_mailbox.mb_busy = 1;
1396
1397 /* clear the poll/ack fields in the mailbox */
1398 sc->amr_mailbox->mb_poll = 0;
1399 sc->amr_mailbox->mb_ack = 0;
1400
1401 /*
1402 * Save the slot number so that we can locate this command when complete.
1403 * Note that ident = 0 seems to be special, so we don't use it.
1404 */
1405 ac->ac_mailbox.mb_ident = ac->ac_slot + 1;
1406
1407 /*
1408 * Spin waiting for the mailbox, give up after ~1 second. We expect the
1409 * controller to be able to handle our I/O.
1410 *
1411 * XXX perhaps we should wait for less time, and count on the deferred command
1412 * handling to deal with retries?
1413 */
1414 debug(4, "wait for mailbox");
1415 for (i = 10000, done = 0; (i > 0) && !done; i--) {
1416 s = splbio();
1417
1418 /* is the mailbox free? */
1419 if (sc->amr_mailbox->mb_busy == 0) {
1420 debug(4, "got mailbox");
1421 sc->amr_mailbox64->mb64_segment = 0;
1422 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE);
1423 done = 1;
1424
1425 /* not free, spin waiting */
1426 } else {
1427 debug(4, "busy flag %x\n", sc->amr_mailbox->mb_busy);
1428 /* this is somewhat ugly */
1429 DELAY(100);
1430 }
1431 splx(s); /* drop spl to allow completion interrupts */
1432 }
1433
1434 /*
1435 * Now give the command to the controller
1436 */
1437 if (done) {
1438 if (sc->amr_submit_command(sc)) {
1439 /* the controller wasn't ready to take the command, forget that we tried to post it */
1440 sc->amr_mailbox->mb_busy = 0;
1441 return(EBUSY);
1442 }
1443 debug(3, "posted command");
1444 return(0);
1445 }
1446
1447 /*
1448 * The controller wouldn't take the command. Return the command as busy
1449 * so that it is retried later.
1450 */
1451 return(EBUSY);
1452}
1453
1454/********************************************************************************
1455 * Extract one or more completed commands from the controller (sc)
1456 *
1457 * Returns nonzero if any commands on the work queue were marked as completed.
1458 */
1459
1460int
1461amr_done(struct amr_softc *sc)
1462{
1463 struct amr_command *ac;
1464 struct amr_mailbox mbox;
1465 int i, idx, result;
1466
1467 debug_called(3);
1468
1469 /* See if there's anything for us to do */
1470 result = 0;
1471
1472 /* loop collecting completed commands */
1473 for (;;) {
1474 /* poll for a completed command's identifier and status */
1475 if (sc->amr_get_work(sc, &mbox)) {
1476 result = 1;
1477
1478 /* iterate over completed commands in this result */
1479 for (i = 0; i < mbox.mb_nstatus; i++) {
1480 /* get pointer to busy command */
1481 idx = mbox.mb_completed[i] - 1;
1482 ac = sc->amr_busycmd[idx];
1483
1484 /* really a busy command? */
1485 if (ac != NULL) {
1486
1487 /* pull the command from the busy index */
1488 sc->amr_busycmd[idx] = NULL;
1489 sc->amr_busyslots--;
1490
1491 /* save status for later use */
1492 ac->ac_status = mbox.mb_status;
1493 amr_enqueue_completed(ac);
1494 debug(3, "completed command with status %x", mbox.mb_status);
1495 } else {
1496 device_printf(sc->amr_dev, "bad slot %d completed\n", idx);
1497 }
1498 }
1499 } else {
1500 break; /* no work */
1501 }
1502 }
1503
1504 /* handle completion and timeouts */
1505 amr_complete(sc, 0);
1506
1507 return(result);
1508}
1509
1510/********************************************************************************
1511 * Do completion processing on done commands on (sc)
1512 */
1513
1514static void
1515amr_complete(void *context, int pending)
1516{
1517 struct amr_softc *sc = (struct amr_softc *)context;
1518 struct amr_command *ac;
1519
1520 debug_called(3);
1521
1522 /* pull completed commands off the queue */
1523 for (;;) {
1524 ac = amr_dequeue_completed(sc);
1525 if (ac == NULL)
1526 break;
1527
1528 /* unmap the command's data buffer */
1529 amr_unmapcmd(ac);
1530
1531 /* unbusy the command */
1532 ac->ac_flags &= ~AMR_CMD_BUSY;
1533
1534 /*
1535 * Is there a completion handler?
1536 */
1537 if (ac->ac_complete != NULL) {
1538 ac->ac_complete(ac);
1539
1540 /*
1541 * Is someone sleeping on this one?
1542 */
1543 } else if (ac->ac_flags & AMR_CMD_SLEEP) {
1544 wakeup(ac);
1545 }
1546
1547 if(!sc->amr_busyslots) {
1548 wakeup(sc);
1549 }
1550 }
1551
1552 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
1553 amr_startio(sc);
1554}
1555
1556/********************************************************************************
1557 ********************************************************************************
1558 Command Buffer Management
1559 ********************************************************************************
1560 ********************************************************************************/
1561
1562/********************************************************************************
1563 * Get a new command buffer.
1564 *
1565 * This may return NULL in low-memory cases.
1566 *
1567 * If possible, we recycle a command buffer that's been used before.
1568 */
1569struct amr_command *
1570amr_alloccmd(struct amr_softc *sc)
1571{
1572 struct amr_command *ac;
1573
1574 debug_called(3);
1575
1576 ac = amr_dequeue_free(sc);
1577 if (ac == NULL) {
1578 amr_alloccmd_cluster(sc);
1579 ac = amr_dequeue_free(sc);
1580 }
1581 if (ac == NULL) {
1582 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1583 return(NULL);
1584 }
1585
1586 /* clear out significant fields */
1587 ac->ac_status = 0;
1588 bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox));
1589 ac->ac_flags = 0;
1590 ac->ac_bio = NULL;
1591 ac->ac_data = NULL;
1592 ac->ac_ccb_data = NULL;
1593 ac->ac_complete = NULL;
1594 return(ac);
1595}
1596
1597/********************************************************************************
1598 * Release a command buffer for recycling.
1599 */
1600void
1601amr_releasecmd(struct amr_command *ac)
1602{
1603 debug_called(3);
1604
1605 amr_enqueue_free(ac);
1606}
1607
1608/********************************************************************************
1609 * Allocate a new command cluster and initialise it.
1610 */
1611static void
1612amr_alloccmd_cluster(struct amr_softc *sc)
1613{
1614 struct amr_command_cluster *acc;
1615 struct amr_command *ac;
1616 int s, i, nextslot;
1617
1618 if (sc->amr_nextslot > sc->amr_maxio)
1619 return;
1620 acc = malloc(AMR_CMD_CLUSTERSIZE, M_DEVBUF, M_NOWAIT | M_ZERO);
1621 if (acc != NULL) {
1622 s = splbio();
1623 nextslot = sc->amr_nextslot;
1624 TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link);
1625 splx(s);
1626 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
1627 ac = &acc->acc_command[i];
1628 ac->ac_sc = sc;
1629 ac->ac_slot = nextslot;
1630 if (!bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap) &&
1631 !bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_ccb_dmamap))
1632 amr_releasecmd(ac);
1633 if (++nextslot > sc->amr_maxio)
1634 break;
1635 }
1636 sc->amr_nextslot = nextslot;
1637 }
1638}
1639
1640/********************************************************************************
1641 * Free a command cluster
1642 */
1643static void
1644amr_freecmd_cluster(struct amr_command_cluster *acc)
1645{
1646 struct amr_softc *sc = acc->acc_command[0].ac_sc;
1647 int i;
1648
1649 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++)
1650 bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap);
1651 free(acc, M_DEVBUF);
1652}
1653
1654/********************************************************************************
1655 ********************************************************************************
1656 Interface-specific Shims
1657 ********************************************************************************
1658 ********************************************************************************/
1659
1660/********************************************************************************
1661 * Tell the controller that the mailbox contains a valid command
1662 */
1663static int
1664amr_quartz_submit_command(struct amr_softc *sc)
1665{
1666 debug_called(3);
1667
1668 if (AMR_QGET_IDB(sc) & AMR_QIDB_SUBMIT)
1669 return(EBUSY);
1670 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
1671 return(0);
1672}
1673
1674static int
1675amr_std_submit_command(struct amr_softc *sc)
1676{
1677 debug_called(3);
1678
1679 if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG)
1680 return(EBUSY);
1681 AMR_SPOST_COMMAND(sc);
1682 return(0);
1683}
1684
1685/********************************************************************************
1686 * Claim any work that the controller has completed; acknowledge completion,
1687 * save details of the completion in (mbsave)
1688 */
1689static int
1690amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
1691{
1692 int s, worked;
1693 u_int32_t outd;
1694 u_int8_t nstatus;
1695
1696 debug_called(3);
1697
1698 worked = 0;
1699 s = splbio();
1700
1701 /* work waiting for us? */
1702 if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) {
1703
1704 /* acknowledge interrupt */
1705 AMR_QPUT_ODB(sc, AMR_QODB_READY);
1706
1707 while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff)
1708 ;
1709 sc->amr_mailbox->mb_nstatus = 0xff;
1710
1711 /* save mailbox, which contains a list of completed commands */
1712 bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave));
1713 mbsave->mb_nstatus = nstatus;
1714
1715 /* acknowledge that we have the commands */
1716 AMR_QPUT_IDB(sc, AMR_QIDB_ACK);
1717
1718#ifndef AMR_QUARTZ_GOFASTER
1719 /*
1720 * This waits for the controller to notice that we've taken the
1721 * command from it. It's very inefficient, and we shouldn't do it,
1722 * but if we remove this code, we stop completing commands under
1723 * load.
1724 *
1725 * Peter J says we shouldn't do this. The documentation says we
1726 * should. Who is right?
1727 */
1728 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
1729 ; /* XXX aiee! what if it dies? */
1730#endif
1731
1732 worked = 1; /* got some work */
1733 }
1734
1735 splx(s);
1736 return(worked);
1737}
1738
1739static int
1740amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
1741{
1742 int s, worked;
1743 u_int8_t istat;
1744
1745 debug_called(3);
1746
1747 worked = 0;
1748 s = splbio();
1749
1750 /* check for valid interrupt status */
1751 istat = AMR_SGET_ISTAT(sc);
1752 if ((istat & AMR_SINTR_VALID) != 0) {
1753 AMR_SPUT_ISTAT(sc, istat); /* ack interrupt status */
1754
1755 /* save mailbox, which contains a list of completed commands */
1756 bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave));
1757
1758 AMR_SACK_INTERRUPT(sc); /* acknowledge we have the mailbox */
1759 worked = 1;
1760 }
1761
1762 splx(s);
1763 return(worked);
1764}
1765
1766/********************************************************************************
1767 * Notify the controller of the mailbox location.
1768 */
1769static void
1770amr_std_attach_mailbox(struct amr_softc *sc)
1771{
1772
1773 /* program the mailbox physical address */
1774 AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys & 0xff);
1775 AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >> 8) & 0xff);
1776 AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff);
1777 AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff);
1778 AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR);
1779
1780 /* clear any outstanding interrupt and enable interrupts proper */
1781 AMR_SACK_INTERRUPT(sc);
1782 AMR_SENABLE_INTR(sc);
1783}
1784
1785#ifdef AMR_BOARD_INIT
1786/********************************************************************************
1787 * Initialise the controller
1788 */
1789static int
1790amr_quartz_init(struct amr_softc *sc)
1791{
1792 int status, ostatus;
1793
1794 device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc));
1795
1796 AMR_QRESET(sc);
1797
1798 ostatus = 0xff;
1799 while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) {
1800 if (status != ostatus) {
1801 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status));
1802 ostatus = status;
1803 }
1804 switch (status) {
1805 case AMR_QINIT_NOMEM:
1806 return(ENOMEM);
1807
1808 case AMR_QINIT_SCAN:
1809 /* XXX we could print channel/target here */
1810 break;
1811 }
1812 }
1813 return(0);
1814}
1815
1816static int
1817amr_std_init(struct amr_softc *sc)
1818{
1819 int status, ostatus;
1820
1821 device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc));
1822
1823 AMR_SRESET(sc);
1824
1825 ostatus = 0xff;
1826 while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) {
1827 if (status != ostatus) {
1828 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status));
1829 ostatus = status;
1830 }
1831 switch (status) {
1832 case AMR_SINIT_NOMEM:
1833 return(ENOMEM);
1834
1835 case AMR_SINIT_INPROG:
1836 /* XXX we could print channel/target here? */
1837 break;
1838 }
1839 }
1840 return(0);
1841}
1842#endif
1843
1844/********************************************************************************
1845 ********************************************************************************
1846 Debugging
1847 ********************************************************************************
1848 ********************************************************************************/
1849
1850/********************************************************************************
1851 * Identify the controller and print some information about it.
1852 */
1853static void
1854amr_describe_controller(struct amr_softc *sc)
1855{
1856 struct amr_prodinfo *ap;
1857 struct amr_enquiry *ae;
1858 char *prod;
1859
1860 mtx_lock(&sc->amr_io_lock);
1861 /*
1862 * Try to get 40LD product info, which tells us what the card is labelled as.
1863 */
1864 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0)) != NULL) {
1865 device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n",
1866 ap->ap_product, ap->ap_firmware, ap->ap_bios,
1867 ap->ap_memsize);
1868
1869 free(ap, M_DEVBUF);
1870 mtx_unlock(&sc->amr_io_lock);
1871 return;
1872 }
1873
1874 /*
1875 * Try 8LD extended ENQUIRY to get controller signature, and use lookup table.
1876 */
1877 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0)) != NULL) {
1878 prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature);
1879
1880 } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0)) != NULL) {
1881
1882 /*
1883 * Try to work it out based on the PCI signatures.
1884 */
1885 switch (pci_get_device(sc->amr_dev)) {
1886 case 0x9010:
1887 prod = "Series 428";
1888 break;
1889 case 0x9060:
1890 prod = "Series 434";
1891 break;
1892 default:
1893 prod = "unknown controller";
1894 break;
1895 }
1896 } else {
1897 device_printf(sc->amr_dev, "<unsupported controller>\n");
1898 mtx_unlock(&sc->amr_io_lock);
1899 return;
1900 }
1901
1902 /*
1903 * HP NetRaid controllers have a special encoding of the firmware and
1904 * BIOS versions. The AMI version seems to have it as strings whereas
1905 * the HP version does it with a leading uppercase character and two
1906 * binary numbers.
1907 */
1908
1909 if(ae->ae_adapter.aa_firmware[2] >= 'A' &&
1910 ae->ae_adapter.aa_firmware[2] <= 'Z' &&
1911 ae->ae_adapter.aa_firmware[1] < ' ' &&
1912 ae->ae_adapter.aa_firmware[0] < ' ' &&
1913 ae->ae_adapter.aa_bios[2] >= 'A' &&
1914 ae->ae_adapter.aa_bios[2] <= 'Z' &&
1915 ae->ae_adapter.aa_bios[1] < ' ' &&
1916 ae->ae_adapter.aa_bios[0] < ' ') {
1917
1918 /* this looks like we have an HP NetRaid version of the MegaRaid */
1919
1920 if(ae->ae_signature == AMR_SIG_438) {
1921 /* the AMI 438 is a NetRaid 3si in HP-land */
1922 prod = "HP NetRaid 3si";
1923 }
1924
1925 device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n",
1926 prod, ae->ae_adapter.aa_firmware[2],
1927 ae->ae_adapter.aa_firmware[1],
1928 ae->ae_adapter.aa_firmware[0],
1929 ae->ae_adapter.aa_bios[2],
1930 ae->ae_adapter.aa_bios[1],
1931 ae->ae_adapter.aa_bios[0],
1932 ae->ae_adapter.aa_memorysize);
1933 } else {
1934 device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n",
1935 prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios,
1936 ae->ae_adapter.aa_memorysize);
1937 }
1938 free(ae, M_DEVBUF);
1939 mtx_unlock(&sc->amr_io_lock);
1940}
1941
1942int
1943amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks)
1944{
1945 struct amr_command *ac;
1946 int error = EIO;
1947
1948 debug_called(1);
1949
1950 sc->amr_state |= AMR_STATE_INTEN;
1951
1952 /* get ourselves a command buffer */
1953 if ((ac = amr_alloccmd(sc)) == NULL)
1954 goto out;
1955 /* set command flags */
1956 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1957
1958 /* point the command at our data */
1959 ac->ac_data = data;
1960 ac->ac_length = blks * AMR_BLKSIZE;
1961
1962 /* build the command proper */
1963 ac->ac_mailbox.mb_command = AMR_CMD_LWRITE;
1964 ac->ac_mailbox.mb_blkcount = blks;
1965 ac->ac_mailbox.mb_lba = lba;
1966 ac->ac_mailbox.mb_drive = unit;
1967
1968 /* can't assume that interrupts are going to work here, so play it safe */
1969 if (sc->amr_poll_command(ac))
1970 goto out;
1971 error = ac->ac_status;
1972
1973 out:
1974 if (ac != NULL)
1975 amr_releasecmd(ac);
1976
1977 sc->amr_state &= ~AMR_STATE_INTEN;
1978 return (error);
1979}
1980
1981
1982
1983#ifdef AMR_DEBUG
1984/********************************************************************************
1985 * Print the command (ac) in human-readable format
1986 */
1987#if 0
1988static void
1989amr_printcommand(struct amr_command *ac)
1990{
1991 struct amr_softc *sc = ac->ac_sc;
1992 struct amr_sgentry *sg;
1993 int i;
1994
1995 device_printf(sc->amr_dev, "cmd %x ident %d drive %d\n",
1996 ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive);
1997 device_printf(sc->amr_dev, "blkcount %d lba %d\n",
1998 ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba);
1999 device_printf(sc->amr_dev, "virtaddr %p length %lu\n", ac->ac_data, (unsigned long)ac->ac_length);
2000 device_printf(sc->amr_dev, "sg physaddr %08x nsg %d\n",
2001 ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem);
2002 device_printf(sc->amr_dev, "ccb %p bio %p\n", ac->ac_ccb_data, ac->ac_bio);
2003
2004 /* get base address of s/g table */
2005 sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2006 for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++)
2007 device_printf(sc->amr_dev, " %x/%d\n", sg->sg_addr, sg->sg_count);
2008}
2009#endif
2010#endif