Deleted Added
sdiff udiff text old ( 174194 ) new ( 174544 )
full compact
1/*-
2 * Copyright (c) 1999,2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * Copyright (c) 2005 Scott Long
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*-
29 * Copyright (c) 2002 Eric Moore
30 * Copyright (c) 2002, 2004 LSI Logic Corporation
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. The party using or redistributing the source code and binary forms
42 * agrees to the disclaimer below and the terms and conditions set forth
43 * herein.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58#include <sys/cdefs.h>
59__FBSDID("$FreeBSD: head/sys/dev/amr/amr.c 174544 2007-12-12 05:55:03Z scottl $");
60
61/*
62 * Driver for the AMI MegaRaid family of controllers.
63 */
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/malloc.h>
68#include <sys/kernel.h>
69#include <sys/proc.h>
70#include <sys/sysctl.h>
71
72#include <sys/bio.h>
73#include <sys/bus.h>
74#include <sys/conf.h>
75#include <sys/stat.h>
76
77#include <machine/bus.h>
78#include <machine/cpu.h>
79#include <machine/resource.h>
80#include <sys/rman.h>
81
82#include <dev/pci/pcireg.h>
83#include <dev/pci/pcivar.h>
84
85#include <dev/amr/amrio.h>
86#include <dev/amr/amrreg.h>
87#include <dev/amr/amrvar.h>
88#define AMR_DEFINE_TABLES
89#include <dev/amr/amr_tables.h>
90
91/*
92 * The CAM interface appears to be completely broken. Disable it.
93 */
94#ifndef AMR_ENABLE_CAM
95#define AMR_ENABLE_CAM 1
96#endif
97
98SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD, 0, "AMR driver parameters");
99
100static d_open_t amr_open;
101static d_close_t amr_close;
102static d_ioctl_t amr_ioctl;
103
104static struct cdevsw amr_cdevsw = {
105 .d_version = D_VERSION,
106 .d_flags = D_NEEDGIANT,
107 .d_open = amr_open,
108 .d_close = amr_close,
109 .d_ioctl = amr_ioctl,
110 .d_name = "amr",
111};
112
113int linux_no_adapter = 0;
114/*
115 * Initialisation, bus interface.
116 */
117static void amr_startup(void *arg);
118
119/*
120 * Command wrappers
121 */
122static int amr_query_controller(struct amr_softc *sc);
123static void *amr_enquiry(struct amr_softc *sc, size_t bufsize,
124 u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status);
125static void amr_completeio(struct amr_command *ac);
126static int amr_support_ext_cdb(struct amr_softc *sc);
127
128/*
129 * Command buffer allocation.
130 */
131static void amr_alloccmd_cluster(struct amr_softc *sc);
132static void amr_freecmd_cluster(struct amr_command_cluster *acc);
133
134/*
135 * Command processing.
136 */
137static int amr_bio_command(struct amr_softc *sc, struct amr_command **acp);
138static int amr_wait_command(struct amr_command *ac) __unused;
139static int amr_mapcmd(struct amr_command *ac);
140static void amr_unmapcmd(struct amr_command *ac);
141static int amr_start(struct amr_command *ac);
142static void amr_complete(void *context, int pending);
143static void amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
144static void amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
145static void amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
146
147/*
148 * Status monitoring
149 */
150static void amr_periodic(void *data);
151
152/*
153 * Interface-specific shims
154 */
155static int amr_quartz_submit_command(struct amr_command *ac);
156static int amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
157static int amr_quartz_poll_command(struct amr_command *ac);
158static int amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac);
159
160static int amr_std_submit_command(struct amr_command *ac);
161static int amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
162static int amr_std_poll_command(struct amr_command *ac);
163static void amr_std_attach_mailbox(struct amr_softc *sc);
164
165#ifdef AMR_BOARD_INIT
166static int amr_quartz_init(struct amr_softc *sc);
167static int amr_std_init(struct amr_softc *sc);
168#endif
169
170/*
171 * Debugging
172 */
173static void amr_describe_controller(struct amr_softc *sc);
174#ifdef AMR_DEBUG
175#if 0
176static void amr_printcommand(struct amr_command *ac);
177#endif
178#endif
179
180static void amr_init_sysctl(struct amr_softc *sc);
181static int amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr,
182 int32_t flag, d_thread_t *td);
183
184MALLOC_DEFINE(M_AMR, "amr", "AMR memory");
185
186/********************************************************************************
187 ********************************************************************************
188 Inline Glue
189 ********************************************************************************
190 ********************************************************************************/
191
192/********************************************************************************
193 ********************************************************************************
194 Public Interfaces
195 ********************************************************************************
196 ********************************************************************************/
197
198/********************************************************************************
199 * Initialise the controller and softc.
200 */
201int
202amr_attach(struct amr_softc *sc)
203{
204
205 debug_called(1);
206
207 /*
208 * Initialise per-controller queues.
209 */
210 TAILQ_INIT(&sc->amr_completed);
211 TAILQ_INIT(&sc->amr_freecmds);
212 TAILQ_INIT(&sc->amr_cmd_clusters);
213 TAILQ_INIT(&sc->amr_ready);
214 bioq_init(&sc->amr_bioq);
215
216 debug(2, "queue init done");
217
218 /*
219 * Configure for this controller type.
220 */
221 if (AMR_IS_QUARTZ(sc)) {
222 sc->amr_submit_command = amr_quartz_submit_command;
223 sc->amr_get_work = amr_quartz_get_work;
224 sc->amr_poll_command = amr_quartz_poll_command;
225 sc->amr_poll_command1 = amr_quartz_poll_command1;
226 } else {
227 sc->amr_submit_command = amr_std_submit_command;
228 sc->amr_get_work = amr_std_get_work;
229 sc->amr_poll_command = amr_std_poll_command;
230 amr_std_attach_mailbox(sc);;
231 }
232
233#ifdef AMR_BOARD_INIT
234 if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc))))
235 return(ENXIO);
236#endif
237
238 /*
239 * Quiz controller for features and limits.
240 */
241 if (amr_query_controller(sc))
242 return(ENXIO);
243
244 debug(2, "controller query complete");
245
246 /*
247 * Setup sysctls.
248 */
249 amr_init_sysctl(sc);
250
251#if AMR_ENABLE_CAM != 0
252 /*
253 * Attach our 'real' SCSI channels to CAM.
254 */
255 if (amr_cam_attach(sc))
256 return(ENXIO);
257 debug(2, "CAM attach done");
258#endif
259
260 /*
261 * Create the control device.
262 */
263 sc->amr_dev_t = make_dev(&amr_cdevsw, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR,
264 S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev));
265 sc->amr_dev_t->si_drv1 = sc;
266 linux_no_adapter++;
267 if (device_get_unit(sc->amr_dev) == 0)
268 make_dev_alias(sc->amr_dev_t, "megadev0");
269
270 /*
271 * Schedule ourselves to bring the controller up once interrupts are
272 * available.
273 */
274 bzero(&sc->amr_ich, sizeof(struct intr_config_hook));
275 sc->amr_ich.ich_func = amr_startup;
276 sc->amr_ich.ich_arg = sc;
277 if (config_intrhook_establish(&sc->amr_ich) != 0) {
278 device_printf(sc->amr_dev, "can't establish configuration hook\n");
279 return(ENOMEM);
280 }
281
282 /*
283 * Print a little information about the controller.
284 */
285 amr_describe_controller(sc);
286
287 debug(2, "attach complete");
288 return(0);
289}
290
291/********************************************************************************
292 * Locate disk resources and attach children to them.
293 */
294static void
295amr_startup(void *arg)
296{
297 struct amr_softc *sc = (struct amr_softc *)arg;
298 struct amr_logdrive *dr;
299 int i, error;
300
301 debug_called(1);
302
303 /* pull ourselves off the intrhook chain */
304 if (sc->amr_ich.ich_func)
305 config_intrhook_disestablish(&sc->amr_ich);
306 sc->amr_ich.ich_func = NULL;
307
308 /* get up-to-date drive information */
309 if (amr_query_controller(sc)) {
310 device_printf(sc->amr_dev, "can't scan controller for drives\n");
311 return;
312 }
313
314 /* iterate over available drives */
315 for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) {
316 /* are we already attached to this drive? */
317 if (dr->al_disk == 0) {
318 /* generate geometry information */
319 if (dr->al_size > 0x200000) { /* extended translation? */
320 dr->al_heads = 255;
321 dr->al_sectors = 63;
322 } else {
323 dr->al_heads = 64;
324 dr->al_sectors = 32;
325 }
326 dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors);
327
328 dr->al_disk = device_add_child(sc->amr_dev, NULL, -1);
329 if (dr->al_disk == 0)
330 device_printf(sc->amr_dev, "device_add_child failed\n");
331 device_set_ivars(dr->al_disk, dr);
332 }
333 }
334
335 if ((error = bus_generic_attach(sc->amr_dev)) != 0)
336 device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error);
337
338 /* mark controller back up */
339 sc->amr_state &= ~AMR_STATE_SHUTDOWN;
340
341 /* interrupts will be enabled before we do anything more */
342 sc->amr_state |= AMR_STATE_INTEN;
343
344 /*
345 * Start the timeout routine.
346 */
347/* sc->amr_timeout = timeout(amr_periodic, sc, hz);*/
348
349 return;
350}
351
352static void
353amr_init_sysctl(struct amr_softc *sc)
354{
355
356 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
357 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
358 OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0,
359 "");
360}
361
362
363/*******************************************************************************
364 * Free resources associated with a controller instance
365 */
366void
367amr_free(struct amr_softc *sc)
368{
369 struct amr_command_cluster *acc;
370
371#if AMR_ENABLE_CAM != 0
372 /* detach from CAM */
373 amr_cam_detach(sc);
374#endif
375
376 /* cancel status timeout */
377 untimeout(amr_periodic, sc, sc->amr_timeout);
378
379 /* throw away any command buffers */
380 while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) {
381 TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link);
382 amr_freecmd_cluster(acc);
383 }
384
385 /* destroy control device */
386 if( sc->amr_dev_t != (struct cdev *)NULL)
387 destroy_dev(sc->amr_dev_t);
388
389 if (mtx_initialized(&sc->amr_hw_lock))
390 mtx_destroy(&sc->amr_hw_lock);
391
392 if (mtx_initialized(&sc->amr_list_lock))
393 mtx_destroy(&sc->amr_list_lock);
394}
395
396/*******************************************************************************
397 * Receive a bio structure from a child device and queue it on a particular
398 * disk resource, then poke the disk resource to start as much work as it can.
399 */
400int
401amr_submit_bio(struct amr_softc *sc, struct bio *bio)
402{
403 debug_called(2);
404
405 mtx_lock(&sc->amr_list_lock);
406 amr_enqueue_bio(sc, bio);
407 amr_startio(sc);
408 mtx_unlock(&sc->amr_list_lock);
409 return(0);
410}
411
412/********************************************************************************
413 * Accept an open operation on the control device.
414 */
415static int
416amr_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
417{
418 int unit = minor(dev);
419 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
420
421 debug_called(1);
422
423 sc->amr_state |= AMR_STATE_OPEN;
424 return(0);
425}
426
427#ifdef LSI
428static int
429amr_del_ld(struct amr_softc *sc, int drv_no, int status)
430{
431
432 debug_called(1);
433
434 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
435 sc->amr_state &= ~AMR_STATE_LD_DELETE;
436 sc->amr_state |= AMR_STATE_REMAP_LD;
437 debug(1, "State Set");
438
439 if (!status) {
440 debug(1, "disk begin destroyed %d",drv_no);
441 if (--amr_disks_registered == 0)
442 cdevsw_remove(&amrddisk_cdevsw);
443 debug(1, "disk begin destroyed success");
444 }
445 return 0;
446}
447
448static int
449amr_prepare_ld_delete(struct amr_softc *sc)
450{
451
452 debug_called(1);
453 if (sc->ld_del_supported == 0)
454 return(ENOIOCTL);
455
456 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
457 sc->amr_state |= AMR_STATE_LD_DELETE;
458
459 /* 5 minutes for the all the commands to be flushed.*/
460 tsleep((void *)&sc->ld_del_supported, PCATCH | PRIBIO,"delete_logical_drv",hz * 60 * 1);
461 if ( sc->amr_busyslots )
462 return(ENOIOCTL);
463
464 return 0;
465}
466#endif
467
468/********************************************************************************
469 * Accept the last close on the control device.
470 */
471static int
472amr_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
473{
474 int unit = minor(dev);
475 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
476
477 debug_called(1);
478
479 sc->amr_state &= ~AMR_STATE_OPEN;
480 return (0);
481}
482
483/********************************************************************************
484 * Handle controller-specific control operations.
485 */
486static void
487amr_rescan_drives(struct cdev *dev)
488{
489 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
490 int i, error = 0;
491
492 sc->amr_state |= AMR_STATE_REMAP_LD;
493 while (sc->amr_busyslots) {
494 device_printf(sc->amr_dev, "idle controller\n");
495 amr_done(sc);
496 }
497
498 /* mark ourselves as in-shutdown */
499 sc->amr_state |= AMR_STATE_SHUTDOWN;
500
501 /* flush controller */
502 device_printf(sc->amr_dev, "flushing cache...");
503 printf("%s\n", amr_flush(sc) ? "failed" : "done");
504
505 /* delete all our child devices */
506 for(i = 0 ; i < AMR_MAXLD; i++) {
507 if(sc->amr_drive[i].al_disk != 0) {
508 if((error = device_delete_child(sc->amr_dev,
509 sc->amr_drive[i].al_disk)) != 0)
510 goto shutdown_out;
511
512 sc->amr_drive[i].al_disk = 0;
513 }
514 }
515
516shutdown_out:
517 amr_startup(sc);
518}
519
520int
521amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag,
522 d_thread_t *td)
523{
524 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
525 struct amr_command *ac;
526 struct amr_mailbox *mb;
527 struct amr_linux_ioctl ali;
528 void *dp, *temp;
529 int error;
530 int adapter, len, ac_flags = 0;
531 int logical_drives_changed = 0;
532 u_int32_t linux_version = 0x02100000;
533 u_int8_t status;
534 struct amr_passthrough *ap; /* 60 bytes */
535
536 error = 0;
537 dp = NULL;
538 ac = NULL;
539 ap = NULL;
540
541 if ((error = copyin(addr, &ali, sizeof(ali))) != 0)
542 return (error);
543 switch (ali.ui.fcs.opcode) {
544 case 0x82:
545 switch(ali.ui.fcs.subopcode) {
546 case 'e':
547 copyout(&linux_version, (void *)(uintptr_t)ali.data,
548 sizeof(linux_version));
549 error = 0;
550 break;
551
552 case 'm':
553 copyout(&linux_no_adapter, (void *)(uintptr_t)ali.data,
554 sizeof(linux_no_adapter));
555 td->td_retval[0] = linux_no_adapter;
556 error = 0;
557 break;
558
559 default:
560 printf("Unknown subopcode\n");
561 error = ENOIOCTL;
562 break;
563 }
564 break;
565
566 case 0x80:
567 case 0x81:
568 if (ali.ui.fcs.opcode == 0x80)
569 len = max(ali.outlen, ali.inlen);
570 else
571 len = ali.ui.fcs.length;
572
573 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
574
575 mb = (void *)&ali.mbox[0];
576
577 if ((ali.mbox[0] == FC_DEL_LOGDRV && ali.mbox[2] == OP_DEL_LOGDRV) || /* delete */
578 (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) { /* create */
579 if (sc->amr_allow_vol_config == 0) {
580 error = EPERM;
581 break;
582 }
583 logical_drives_changed = 1;
584 }
585
586 if (ali.mbox[0] == AMR_CMD_PASS) {
587 mtx_lock(&sc->amr_list_lock);
588 while ((ac = amr_alloccmd(sc)) == NULL)
589 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
590 mtx_unlock(&sc->amr_list_lock);
591 ap = &ac->ac_ccb->ccb_pthru;
592
593 error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap,
594 sizeof(struct amr_passthrough));
595 if (error)
596 break;
597
598 if (ap->ap_data_transfer_length)
599 dp = malloc(ap->ap_data_transfer_length, M_AMR,
600 M_WAITOK | M_ZERO);
601
602 if (ali.inlen) {
603 error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address,
604 dp, ap->ap_data_transfer_length);
605 if (error)
606 break;
607 }
608
609 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB;
610 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
611 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
612 ac->ac_flags = ac_flags;
613
614 ac->ac_data = dp;
615 ac->ac_length = ap->ap_data_transfer_length;
616 temp = (void *)(uintptr_t)ap->ap_data_transfer_address;
617
618 mtx_lock(&sc->amr_list_lock);
619 error = amr_wait_command(ac);
620 mtx_unlock(&sc->amr_list_lock);
621 if (error)
622 break;
623
624 status = ac->ac_status;
625 error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status));
626 if (error)
627 break;
628
629 if (ali.outlen) {
630 error = copyout(dp, temp, ap->ap_data_transfer_length);
631 if (error)
632 break;
633 }
634 error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length);
635 if (error)
636 break;
637
638 error = 0;
639 break;
640 } else if (ali.mbox[0] == AMR_CMD_PASS_64) {
641 printf("No AMR_CMD_PASS_64\n");
642 error = ENOIOCTL;
643 break;
644 } else if (ali.mbox[0] == AMR_CMD_EXTPASS) {
645 printf("No AMR_CMD_EXTPASS\n");
646 error = ENOIOCTL;
647 break;
648 } else {
649 if (len)
650 dp = malloc(len, M_AMR, M_WAITOK | M_ZERO);
651
652 if (ali.inlen) {
653 error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len);
654 if (error)
655 break;
656 }
657
658 mtx_lock(&sc->amr_list_lock);
659 while ((ac = amr_alloccmd(sc)) == NULL)
660 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
661
662 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
663 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
664 bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox));
665
666 ac->ac_length = len;
667 ac->ac_data = dp;
668 ac->ac_flags = ac_flags;
669
670 error = amr_wait_command(ac);
671 mtx_unlock(&sc->amr_list_lock);
672 if (error)
673 break;
674
675 status = ac->ac_status;
676 error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status));
677 if (ali.outlen) {
678 error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, len);
679 if (error)
680 break;
681 }
682
683 error = 0;
684 if (logical_drives_changed)
685 amr_rescan_drives(dev);
686 break;
687 }
688 break;
689
690 default:
691 debug(1, "unknown linux ioctl 0x%lx", cmd);
692 printf("unknown linux ioctl 0x%lx\n", cmd);
693 error = ENOIOCTL;
694 break;
695 }
696
697 /*
698 * At this point, we know that there is a lock held and that these
699 * objects have been allocated.
700 */
701 mtx_lock(&sc->amr_list_lock);
702 if (ac != NULL)
703 amr_releasecmd(ac);
704 mtx_unlock(&sc->amr_list_lock);
705 if (dp != NULL)
706 free(dp, M_AMR);
707 return(error);
708}
709
710static int
711amr_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, d_thread_t *td)
712{
713 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
714 union {
715 void *_p;
716 struct amr_user_ioctl *au;
717#ifdef AMR_IO_COMMAND32
718 struct amr_user_ioctl32 *au32;
719#endif
720 int *result;
721 } arg;
722 struct amr_command *ac;
723 struct amr_mailbox_ioctl *mbi;
724 void *dp, *au_buffer;
725 unsigned long au_length;
726 unsigned char *au_cmd;
727 int *au_statusp, au_direction;
728 int error;
729 struct amr_passthrough *ap; /* 60 bytes */
730 int logical_drives_changed = 0;
731
732 debug_called(1);
733
734 arg._p = (void *)addr;
735
736 error = 0;
737 dp = NULL;
738 ac = NULL;
739 ap = NULL;
740
741 switch(cmd) {
742
743 case AMR_IO_VERSION:
744 debug(1, "AMR_IO_VERSION");
745 *arg.result = AMR_IO_VERSION_NUMBER;
746 return(0);
747
748#ifdef AMR_IO_COMMAND32
749 /*
750 * Accept ioctl-s from 32-bit binaries on non-32-bit
751 * platforms, such as AMD. LSI's MEGAMGR utility is
752 * the only example known today... -mi
753 */
754 case AMR_IO_COMMAND32:
755 debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]);
756 au_cmd = arg.au32->au_cmd;
757 au_buffer = (void *)(u_int64_t)arg.au32->au_buffer;
758 au_length = arg.au32->au_length;
759 au_direction = arg.au32->au_direction;
760 au_statusp = &arg.au32->au_status;
761 break;
762#endif
763
764 case AMR_IO_COMMAND:
765 debug(1, "AMR_IO_COMMAND 0x%x", arg.au->au_cmd[0]);
766 au_cmd = arg.au->au_cmd;
767 au_buffer = (void *)arg.au->au_buffer;
768 au_length = arg.au->au_length;
769 au_direction = arg.au->au_direction;
770 au_statusp = &arg.au->au_status;
771 break;
772
773 case 0xc0046d00:
774 case 0xc06e6d00: /* Linux emulation */
775 {
776 devclass_t devclass;
777 struct amr_linux_ioctl ali;
778 int adapter, error;
779
780 devclass = devclass_find("amr");
781 if (devclass == NULL)
782 return (ENOENT);
783
784 error = copyin(addr, &ali, sizeof(ali));
785 if (error)
786 return (error);
787 if (ali.ui.fcs.opcode == 0x82)
788 adapter = 0;
789 else
790 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
791
792 sc = devclass_get_softc(devclass, adapter);
793 if (sc == NULL)
794 return (ENOENT);
795
796 return (amr_linux_ioctl_int(sc->amr_dev_t, cmd,
797 addr, 0, td));
798 }
799 default:
800 debug(1, "unknown ioctl 0x%lx", cmd);
801 return(ENOIOCTL);
802 }
803
804 if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) || /* delete */
805 (au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) { /* create */
806 if (sc->amr_allow_vol_config == 0) {
807 error = EPERM;
808 goto out;
809 }
810 logical_drives_changed = 1;
811#ifdef LSI
812 if ((error = amr_prepare_ld_delete(sc)) != 0)
813 return (error);
814#endif
815 }
816
817 /* handle inbound data buffer */
818 if (au_length != 0 && au_cmd[0] != 0x06) {
819 if ((dp = malloc(au_length, M_AMR, M_WAITOK|M_ZERO)) == NULL) {
820 error = ENOMEM;
821 goto out;
822 }
823 if ((error = copyin(au_buffer, dp, au_length)) != 0) {
824 free(dp, M_AMR);
825 return (error);
826 }
827 debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp);
828 }
829
830 /* Allocate this now before the mutex gets held */
831
832 mtx_lock(&sc->amr_list_lock);
833 while ((ac = amr_alloccmd(sc)) == NULL)
834 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
835
836 /* handle SCSI passthrough command */
837 if (au_cmd[0] == AMR_CMD_PASS) {
838 int len;
839
840 ap = &ac->ac_ccb->ccb_pthru;
841 bzero(ap, sizeof(struct amr_passthrough));
842
843 /* copy cdb */
844 len = au_cmd[2];
845 ap->ap_cdb_length = len;
846 bcopy(au_cmd + 3, ap->ap_cdb, len);
847
848 /* build passthrough */
849 ap->ap_timeout = au_cmd[len + 3] & 0x07;
850 ap->ap_ars = (au_cmd[len + 3] & 0x08) ? 1 : 0;
851 ap->ap_islogical = (au_cmd[len + 3] & 0x80) ? 1 : 0;
852 ap->ap_logical_drive_no = au_cmd[len + 4];
853 ap->ap_channel = au_cmd[len + 5];
854 ap->ap_scsi_id = au_cmd[len + 6];
855 ap->ap_request_sense_length = 14;
856 ap->ap_data_transfer_length = au_length;
857 /* XXX what about the request-sense area? does the caller want it? */
858
859 /* build command */
860 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
861 ac->ac_flags = AMR_CMD_CCB;
862
863 } else {
864 /* direct command to controller */
865 mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox;
866
867 /* copy pertinent mailbox items */
868 mbi->mb_command = au_cmd[0];
869 mbi->mb_channel = au_cmd[1];
870 mbi->mb_param = au_cmd[2];
871 mbi->mb_pad[0] = au_cmd[3];
872 mbi->mb_drive = au_cmd[4];
873 ac->ac_flags = 0;
874 }
875
876 /* build the command */
877 ac->ac_data = dp;
878 ac->ac_length = au_length;
879 ac->ac_flags |= AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
880
881 /* run the command */
882 error = amr_wait_command(ac);
883 mtx_unlock(&sc->amr_list_lock);
884 if (error)
885 goto out;
886
887 /* copy out data and set status */
888 if (au_length != 0) {
889 error = copyout(dp, au_buffer, au_length);
890 }
891 debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer);
892 if (dp != NULL)
893 debug(2, "%p status 0x%x", dp, ac->ac_status);
894 *au_statusp = ac->ac_status;
895
896out:
897 /*
898 * At this point, we know that there is a lock held and that these
899 * objects have been allocated.
900 */
901 mtx_lock(&sc->amr_list_lock);
902 if (ac != NULL)
903 amr_releasecmd(ac);
904 mtx_unlock(&sc->amr_list_lock);
905 if (dp != NULL)
906 free(dp, M_AMR);
907
908#ifndef LSI
909 if (logical_drives_changed)
910 amr_rescan_drives(dev);
911#endif
912
913 return(error);
914}
915
916/********************************************************************************
917 ********************************************************************************
918 Status Monitoring
919 ********************************************************************************
920 ********************************************************************************/
921
922/********************************************************************************
923 * Perform a periodic check of the controller status
924 */
925static void
926amr_periodic(void *data)
927{
928 struct amr_softc *sc = (struct amr_softc *)data;
929
930 debug_called(2);
931
932 /* XXX perform periodic status checks here */
933
934 /* compensate for missed interrupts */
935 amr_done(sc);
936
937 /* reschedule */
938 sc->amr_timeout = timeout(amr_periodic, sc, hz);
939}
940
941/********************************************************************************
942 ********************************************************************************
943 Command Wrappers
944 ********************************************************************************
945 ********************************************************************************/
946
947/********************************************************************************
948 * Interrogate the controller for the operational parameters we require.
949 */
950static int
951amr_query_controller(struct amr_softc *sc)
952{
953 struct amr_enquiry3 *aex;
954 struct amr_prodinfo *ap;
955 struct amr_enquiry *ae;
956 int ldrv;
957 int status;
958
959 /*
960 * If we haven't found the real limit yet, let us have a couple of commands in
961 * order to be able to probe.
962 */
963 if (sc->amr_maxio == 0)
964 sc->amr_maxio = 2;
965
966 /*
967 * Greater than 10 byte cdb support
968 */
969 sc->support_ext_cdb = amr_support_ext_cdb(sc);
970
971 if(sc->support_ext_cdb) {
972 debug(2,"supports extended CDBs.");
973 }
974
975 /*
976 * Try to issue an ENQUIRY3 command
977 */
978 if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
979 AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) {
980
981 /*
982 * Fetch current state of logical drives.
983 */
984 for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
985 sc->amr_drive[ldrv].al_size = aex->ae_drivesize[ldrv];
986 sc->amr_drive[ldrv].al_state = aex->ae_drivestate[ldrv];
987 sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv];
988 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
989 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
990 }
991 free(aex, M_AMR);
992
993 /*
994 * Get product info for channel count.
995 */
996 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) {
997 device_printf(sc->amr_dev, "can't obtain product data from controller\n");
998 return(1);
999 }
1000 sc->amr_maxdrives = 40;
1001 sc->amr_maxchan = ap->ap_nschan;
1002 sc->amr_maxio = ap->ap_maxio;
1003 sc->amr_type |= AMR_TYPE_40LD;
1004 free(ap, M_AMR);
1005
1006 ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status);
1007 if (ap != NULL)
1008 free(ap, M_AMR);
1009 if (!status) {
1010 sc->amr_ld_del_supported = 1;
1011 device_printf(sc->amr_dev, "delete logical drives supported by controller\n");
1012 }
1013 } else {
1014
1015 /* failed, try the 8LD ENQUIRY commands */
1016 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) {
1017 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) {
1018 device_printf(sc->amr_dev, "can't obtain configuration data from controller\n");
1019 return(1);
1020 }
1021 ae->ae_signature = 0;
1022 }
1023
1024 /*
1025 * Fetch current state of logical drives.
1026 */
1027 for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
1028 sc->amr_drive[ldrv].al_size = ae->ae_ldrv.al_size[ldrv];
1029 sc->amr_drive[ldrv].al_state = ae->ae_ldrv.al_state[ldrv];
1030 sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv];
1031 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
1032 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1033 }
1034
1035 sc->amr_maxdrives = 8;
1036 sc->amr_maxchan = ae->ae_adapter.aa_channels;
1037 sc->amr_maxio = ae->ae_adapter.aa_maxio;
1038 free(ae, M_AMR);
1039 }
1040
1041 /*
1042 * Mark remaining drives as unused.
1043 */
1044 for (; ldrv < AMR_MAXLD; ldrv++)
1045 sc->amr_drive[ldrv].al_size = 0xffffffff;
1046
1047 /*
1048 * Cap the maximum number of outstanding I/Os. AMI's Linux driver doesn't trust
1049 * the controller's reported value, and lockups have been seen when we do.
1050 */
1051 sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD);
1052
1053 return(0);
1054}
1055
1056/********************************************************************************
1057 * Run a generic enquiry-style command.
1058 */
1059static void *
1060amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status)
1061{
1062 struct amr_command *ac;
1063 void *result;
1064 u_int8_t *mbox;
1065 int error;
1066
1067 debug_called(1);
1068
1069 error = 1;
1070 result = NULL;
1071
1072 /* get ourselves a command buffer */
1073 mtx_lock(&sc->amr_list_lock);
1074 ac = amr_alloccmd(sc);
1075 mtx_unlock(&sc->amr_list_lock);
1076 if (ac == NULL)
1077 goto out;
1078 /* allocate the response structure */
1079 if ((result = malloc(bufsize, M_AMR, M_ZERO|M_NOWAIT)) == NULL)
1080 goto out;
1081 /* set command flags */
1082
1083 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN;
1084
1085 /* point the command at our data */
1086 ac->ac_data = result;
1087 ac->ac_length = bufsize;
1088
1089 /* build the command proper */
1090 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1091 mbox[0] = cmd;
1092 mbox[2] = cmdsub;
1093 mbox[3] = cmdqual;
1094 *status = 0;
1095
1096 /* can't assume that interrupts are going to work here, so play it safe */
1097 if (sc->amr_poll_command(ac))
1098 goto out;
1099 error = ac->ac_status;
1100 *status = ac->ac_status;
1101
1102 out:
1103 mtx_lock(&sc->amr_list_lock);
1104 if (ac != NULL)
1105 amr_releasecmd(ac);
1106 mtx_unlock(&sc->amr_list_lock);
1107 if ((error != 0) && (result != NULL)) {
1108 free(result, M_AMR);
1109 result = NULL;
1110 }
1111 return(result);
1112}
1113
1114/********************************************************************************
1115 * Flush the controller's internal cache, return status.
1116 */
1117int
1118amr_flush(struct amr_softc *sc)
1119{
1120 struct amr_command *ac;
1121 int error;
1122
1123 /* get ourselves a command buffer */
1124 error = 1;
1125 mtx_lock(&sc->amr_list_lock);
1126 ac = amr_alloccmd(sc);
1127 mtx_unlock(&sc->amr_list_lock);
1128 if (ac == NULL)
1129 goto out;
1130 /* set command flags */
1131 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1132
1133 /* build the command proper */
1134 ac->ac_mailbox.mb_command = AMR_CMD_FLUSH;
1135
1136 /* we have to poll, as the system may be going down or otherwise damaged */
1137 if (sc->amr_poll_command(ac))
1138 goto out;
1139 error = ac->ac_status;
1140
1141 out:
1142 mtx_lock(&sc->amr_list_lock);
1143 if (ac != NULL)
1144 amr_releasecmd(ac);
1145 mtx_unlock(&sc->amr_list_lock);
1146 return(error);
1147}
1148
1149/********************************************************************************
1150 * Detect extented cdb >> greater than 10 byte cdb support
1151 * returns '1' means this support exist
1152 * returns '0' means this support doesn't exist
1153 */
1154static int
1155amr_support_ext_cdb(struct amr_softc *sc)
1156{
1157 struct amr_command *ac;
1158 u_int8_t *mbox;
1159 int error;
1160
1161 /* get ourselves a command buffer */
1162 error = 0;
1163 mtx_lock(&sc->amr_list_lock);
1164 ac = amr_alloccmd(sc);
1165 mtx_unlock(&sc->amr_list_lock);
1166 if (ac == NULL)
1167 goto out;
1168 /* set command flags */
1169 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1170
1171 /* build the command proper */
1172 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1173 mbox[0] = 0xA4;
1174 mbox[2] = 0x16;
1175
1176
1177 /* we have to poll, as the system may be going down or otherwise damaged */
1178 if (sc->amr_poll_command(ac))
1179 goto out;
1180 if( ac->ac_status == AMR_STATUS_SUCCESS ) {
1181 error = 1;
1182 }
1183
1184out:
1185 mtx_lock(&sc->amr_list_lock);
1186 if (ac != NULL)
1187 amr_releasecmd(ac);
1188 mtx_unlock(&sc->amr_list_lock);
1189 return(error);
1190}
1191
1192/********************************************************************************
1193 * Try to find I/O work for the controller from one or more of the work queues.
1194 *
1195 * We make the assumption that if the controller is not ready to take a command
1196 * at some given time, it will generate an interrupt at some later time when
1197 * it is.
1198 */
1199void
1200amr_startio(struct amr_softc *sc)
1201{
1202 struct amr_command *ac;
1203
1204 /* spin until something prevents us from doing any work */
1205 for (;;) {
1206
1207 /* Don't bother to queue commands no bounce buffers are available. */
1208 if (sc->amr_state & AMR_STATE_QUEUE_FRZN)
1209 break;
1210
1211 /* try to get a ready command */
1212 ac = amr_dequeue_ready(sc);
1213
1214 /* if that failed, build a command from a bio */
1215 if (ac == NULL)
1216 (void)amr_bio_command(sc, &ac);
1217
1218#if AMR_ENABLE_CAM != 0
1219 /* if that failed, build a command from a ccb */
1220 if (ac == NULL)
1221 (void)amr_cam_command(sc, &ac);
1222#endif
1223
1224 /* if we don't have anything to do, give up */
1225 if (ac == NULL)
1226 break;
1227
1228 /* try to give the command to the controller; if this fails save it for later and give up */
1229 if (amr_start(ac)) {
1230 debug(2, "controller busy, command deferred");
1231 amr_requeue_ready(ac); /* XXX schedule retry very soon? */
1232 break;
1233 }
1234 }
1235}
1236
1237/********************************************************************************
1238 * Handle completion of an I/O command.
1239 */
1240static void
1241amr_completeio(struct amr_command *ac)
1242{
1243 struct amrd_softc *sc = ac->ac_bio->bio_disk->d_drv1;
1244 static struct timeval lastfail;
1245 static int curfail;
1246
1247 if (ac->ac_status != AMR_STATUS_SUCCESS) { /* could be more verbose here? */
1248 ac->ac_bio->bio_error = EIO;
1249 ac->ac_bio->bio_flags |= BIO_ERROR;
1250
1251 if (ppsratecheck(&lastfail, &curfail, 1))
1252 device_printf(sc->amrd_dev, "I/O error - 0x%x\n", ac->ac_status);
1253/* amr_printcommand(ac);*/
1254 }
1255 amrd_intr(ac->ac_bio);
1256 mtx_lock(&ac->ac_sc->amr_list_lock);
1257 amr_releasecmd(ac);
1258 mtx_unlock(&ac->ac_sc->amr_list_lock);
1259}
1260
1261/********************************************************************************
1262 ********************************************************************************
1263 Command Processing
1264 ********************************************************************************
1265 ********************************************************************************/
1266
1267/********************************************************************************
1268 * Convert a bio off the top of the bio queue into a command.
1269 */
1270static int
1271amr_bio_command(struct amr_softc *sc, struct amr_command **acp)
1272{
1273 struct amr_command *ac;
1274 struct amrd_softc *amrd;
1275 struct bio *bio;
1276 int error;
1277 int blkcount;
1278 int driveno;
1279 int cmd;
1280
1281 *acp = NULL;
1282 error = 0;
1283
1284 /* get a command */
1285 if ((ac = amr_alloccmd(sc)) == NULL)
1286 return (ENOMEM);
1287
1288 /* get a bio to work on */
1289 if ((bio = amr_dequeue_bio(sc)) == NULL) {
1290 amr_releasecmd(ac);
1291 return (0);
1292 }
1293
1294 /* connect the bio to the command */
1295 ac->ac_complete = amr_completeio;
1296 ac->ac_bio = bio;
1297 ac->ac_data = bio->bio_data;
1298 ac->ac_length = bio->bio_bcount;
1299 cmd = 0;
1300 switch (bio->bio_cmd) {
1301 case BIO_READ:
1302 ac->ac_flags |= AMR_CMD_DATAIN;
1303 if (AMR_IS_SG64(sc)) {
1304 cmd = AMR_CMD_LREAD64;
1305 ac->ac_flags |= AMR_CMD_SG64;
1306 } else
1307 cmd = AMR_CMD_LREAD;
1308 break;
1309 case BIO_WRITE:
1310 ac->ac_flags |= AMR_CMD_DATAOUT;
1311 if (AMR_IS_SG64(sc)) {
1312 cmd = AMR_CMD_LWRITE64;
1313 ac->ac_flags |= AMR_CMD_SG64;
1314 } else
1315 cmd = AMR_CMD_LWRITE;
1316 break;
1317 case BIO_FLUSH:
1318 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1319 cmd = AMR_CMD_FLUSH;
1320 break;
1321 }
1322 amrd = (struct amrd_softc *)bio->bio_disk->d_drv1;
1323 driveno = amrd->amrd_drive - sc->amr_drive;
1324 blkcount = (bio->bio_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE;
1325
1326 ac->ac_mailbox.mb_command = cmd;
1327 if (bio->bio_cmd & (BIO_READ|BIO_WRITE)) {
1328 ac->ac_mailbox.mb_blkcount = blkcount;
1329 ac->ac_mailbox.mb_lba = bio->bio_pblkno;
1330 if ((bio->bio_pblkno + blkcount) > sc->amr_drive[driveno].al_size) {
1331 device_printf(sc->amr_dev,
1332 "I/O beyond end of unit (%lld,%d > %lu)\n",
1333 (long long)bio->bio_pblkno, blkcount,
1334 (u_long)sc->amr_drive[driveno].al_size);
1335 }
1336 }
1337 ac->ac_mailbox.mb_drive = driveno;
1338 if (sc->amr_state & AMR_STATE_REMAP_LD)
1339 ac->ac_mailbox.mb_drive |= 0x80;
1340
1341 /* we fill in the s/g related data when the command is mapped */
1342
1343 *acp = ac;
1344 return(error);
1345}
1346
1347/********************************************************************************
1348 * Take a command, submit it to the controller and sleep until it completes
1349 * or fails. Interrupts must be enabled, returns nonzero on error.
1350 */
1351static int
1352amr_wait_command(struct amr_command *ac)
1353{
1354 int error = 0;
1355 struct amr_softc *sc = ac->ac_sc;
1356
1357 debug_called(1);
1358
1359 ac->ac_complete = NULL;
1360 ac->ac_flags |= AMR_CMD_SLEEP;
1361 if ((error = amr_start(ac)) != 0) {
1362 return(error);
1363 }
1364
1365 while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) {
1366 error = msleep(ac,&sc->amr_list_lock, PRIBIO, "amrwcmd", 0);
1367 }
1368
1369 return(error);
1370}
1371
1372/********************************************************************************
1373 * Take a command, submit it to the controller and busy-wait for it to return.
1374 * Returns nonzero on error. Can be safely called with interrupts enabled.
1375 */
1376static int
1377amr_std_poll_command(struct amr_command *ac)
1378{
1379 struct amr_softc *sc = ac->ac_sc;
1380 int error, count;
1381
1382 debug_called(2);
1383
1384 ac->ac_complete = NULL;
1385 if ((error = amr_start(ac)) != 0)
1386 return(error);
1387
1388 count = 0;
1389 do {
1390 /*
1391 * Poll for completion, although the interrupt handler may beat us to it.
1392 * Note that the timeout here is somewhat arbitrary.
1393 */
1394 amr_done(sc);
1395 DELAY(1000);
1396 } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000));
1397 if (!(ac->ac_flags & AMR_CMD_BUSY)) {
1398 error = 0;
1399 } else {
1400 /* XXX the slot is now marked permanently busy */
1401 error = EIO;
1402 device_printf(sc->amr_dev, "polled command timeout\n");
1403 }
1404 return(error);
1405}
1406
1407static void
1408amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1409{
1410 struct amr_command *ac = arg;
1411 struct amr_softc *sc = ac->ac_sc;
1412 int mb_channel;
1413
1414 amr_setup_sg(arg, segs, nsegs, err);
1415
1416 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1417 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1418 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1419 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1420 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1421 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1422
1423 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1424 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1425 if (AC_IS_SG64(ac)) {
1426 ac->ac_sg64_hi = 0;
1427 ac->ac_sg64_lo = ac->ac_sgbusaddr;
1428 }
1429
1430 sc->amr_poll_command1(sc, ac);
1431}
1432
1433/********************************************************************************
1434 * Take a command, submit it to the controller and busy-wait for it to return.
1435 * Returns nonzero on error. Can be safely called with interrupts enabled.
1436 */
1437static int
1438amr_quartz_poll_command(struct amr_command *ac)
1439{
1440 struct amr_softc *sc = ac->ac_sc;
1441 int error;
1442
1443 debug_called(2);
1444
1445 error = 0;
1446
1447 if (AC_IS_SG64(ac)) {
1448 ac->ac_tag = sc->amr_buffer64_dmat;
1449 ac->ac_datamap = ac->ac_dma64map;
1450 } else {
1451 ac->ac_tag = sc->amr_buffer_dmat;
1452 ac->ac_datamap = ac->ac_dmamap;
1453 }
1454
1455 /* now we have a slot, we can map the command (unmapped in amr_complete) */
1456 if (ac->ac_data != 0) {
1457 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1458 ac->ac_length, amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) {
1459 error = 1;
1460 }
1461 } else {
1462 error = amr_quartz_poll_command1(sc, ac);
1463 }
1464
1465 return (error);
1466}
1467
1468static int
1469amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac)
1470{
1471 int count, error;
1472
1473 mtx_lock(&sc->amr_hw_lock);
1474 if ((sc->amr_state & AMR_STATE_INTEN) == 0) {
1475 count=0;
1476 while (sc->amr_busyslots) {
1477 msleep(sc, &sc->amr_hw_lock, PRIBIO | PCATCH, "amrpoll", hz);
1478 if(count++>10) {
1479 break;
1480 }
1481 }
1482
1483 if(sc->amr_busyslots) {
1484 device_printf(sc->amr_dev, "adapter is busy\n");
1485 mtx_unlock(&sc->amr_hw_lock);
1486 if (ac->ac_data != NULL) {
1487 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1488 }
1489 ac->ac_status=0;
1490 return(1);
1491 }
1492 }
1493
1494 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE);
1495
1496 /* clear the poll/ack fields in the mailbox */
1497 sc->amr_mailbox->mb_ident = 0xFE;
1498 sc->amr_mailbox->mb_nstatus = 0xFF;
1499 sc->amr_mailbox->mb_status = 0xFF;
1500 sc->amr_mailbox->mb_poll = 0;
1501 sc->amr_mailbox->mb_ack = 0;
1502 sc->amr_mailbox->mb_busy = 1;
1503
1504 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
1505
1506 while(sc->amr_mailbox->mb_nstatus == 0xFF)
1507 DELAY(1);
1508 while(sc->amr_mailbox->mb_status == 0xFF)
1509 DELAY(1);
1510 ac->ac_status=sc->amr_mailbox->mb_status;
1511 error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0;
1512 while(sc->amr_mailbox->mb_poll != 0x77)
1513 DELAY(1);
1514 sc->amr_mailbox->mb_poll = 0;
1515 sc->amr_mailbox->mb_ack = 0x77;
1516
1517 /* acknowledge that we have the commands */
1518 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK);
1519 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
1520 DELAY(1);
1521 mtx_unlock(&sc->amr_hw_lock);
1522
1523 /* unmap the command's data buffer */
1524 if (ac->ac_flags & AMR_CMD_DATAIN) {
1525 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTREAD);
1526 }
1527 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1528 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTWRITE);
1529 }
1530 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1531
1532 return(error);
1533}
1534
1535static __inline int
1536amr_freeslot(struct amr_command *ac)
1537{
1538 struct amr_softc *sc = ac->ac_sc;
1539 int slot;
1540
1541 debug_called(3);
1542
1543 slot = ac->ac_slot;
1544 if (sc->amr_busycmd[slot] == NULL)
1545 panic("amr: slot %d not busy?\n", slot);
1546
1547 sc->amr_busycmd[slot] = NULL;
1548 atomic_subtract_int(&sc->amr_busyslots, 1);
1549
1550 return (0);
1551}
1552
1553/********************************************************************************
1554 * Map/unmap (ac)'s data in the controller's addressable space as required.
1555 *
1556 * These functions may be safely called multiple times on a given command.
1557 */
1558static void
1559amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1560{
1561 struct amr_command *ac = (struct amr_command *)arg;
1562 struct amr_sgentry *sg;
1563 struct amr_sg64entry *sg64;
1564 int flags, i;
1565
1566 debug_called(3);
1567
1568 if (error)
1569 printf("amr_setup_sg: error %d\n", error);
1570
1571 /* get base address of s/g table */
1572 sg = ac->ac_sg.sg32;
1573 sg64 = ac->ac_sg.sg64;
1574
1575 if (AC_IS_SG64(ac)) {
1576 ac->ac_nsegments = nsegments;
1577 ac->ac_mb_physaddr = 0xffffffff;
1578 for (i = 0; i < nsegments; i++, sg64++) {
1579 sg64->sg_addr = segs[i].ds_addr;
1580 sg64->sg_count = segs[i].ds_len;
1581 }
1582 } else {
1583 /* decide whether we need to populate the s/g table */
1584 if (nsegments < 2) {
1585 ac->ac_nsegments = 0;
1586 ac->ac_mb_physaddr = segs[0].ds_addr;
1587 } else {
1588 ac->ac_nsegments = nsegments;
1589 ac->ac_mb_physaddr = ac->ac_sgbusaddr;
1590 for (i = 0; i < nsegments; i++, sg++) {
1591 sg->sg_addr = segs[i].ds_addr;
1592 sg->sg_count = segs[i].ds_len;
1593 }
1594 }
1595 }
1596
1597 flags = 0;
1598 if (ac->ac_flags & AMR_CMD_DATAIN)
1599 flags |= BUS_DMASYNC_PREREAD;
1600 if (ac->ac_flags & AMR_CMD_DATAOUT)
1601 flags |= BUS_DMASYNC_PREWRITE;
1602 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flags);
1603 ac->ac_flags |= AMR_CMD_MAPPED;
1604}
1605
1606static void
1607amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1608{
1609 struct amr_command *ac = arg;
1610 struct amr_softc *sc = ac->ac_sc;
1611 int mb_channel;
1612
1613 amr_setup_sg(arg, segs, nsegs, err);
1614
1615 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1616 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1617 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1618 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1619 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1620 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1621
1622 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1623 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1624 if (AC_IS_SG64(ac)) {
1625 ac->ac_sg64_hi = 0;
1626 ac->ac_sg64_lo = ac->ac_sgbusaddr;
1627 }
1628
1629 if (sc->amr_submit_command(ac) == EBUSY) {
1630 amr_freeslot(ac);
1631 amr_requeue_ready(ac);
1632 }
1633}
1634
1635static void
1636amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1637{
1638 struct amr_command *ac = arg;
1639 struct amr_softc *sc = ac->ac_sc;
1640 struct amr_passthrough *ap = &ac->ac_ccb->ccb_pthru;
1641 struct amr_ext_passthrough *aep = &ac->ac_ccb->ccb_epthru;
1642
1643 /* Set up the mailbox portion of the command to point at the ccb */
1644 ac->ac_mailbox.mb_nsgelem = 0;
1645 ac->ac_mailbox.mb_physaddr = ac->ac_ccb_busaddr;
1646
1647 amr_setup_sg(arg, segs, nsegs, err);
1648
1649 switch (ac->ac_mailbox.mb_command) {
1650 case AMR_CMD_EXTPASS:
1651 aep->ap_no_sg_elements = ac->ac_nsegments;
1652 aep->ap_data_transfer_address = ac->ac_mb_physaddr;
1653 break;
1654 case AMR_CMD_PASS:
1655 ap->ap_no_sg_elements = ac->ac_nsegments;
1656 ap->ap_data_transfer_address = ac->ac_mb_physaddr;
1657 break;
1658 default:
1659 panic("Unknown ccb command");
1660 }
1661
1662 if (sc->amr_submit_command(ac) == EBUSY) {
1663 amr_freeslot(ac);
1664 amr_requeue_ready(ac);
1665 }
1666}
1667
1668static int
1669amr_mapcmd(struct amr_command *ac)
1670{
1671 bus_dmamap_callback_t *cb;
1672 struct amr_softc *sc = ac->ac_sc;
1673
1674 debug_called(3);
1675
1676 if (AC_IS_SG64(ac)) {
1677 ac->ac_tag = sc->amr_buffer64_dmat;
1678 ac->ac_datamap = ac->ac_dma64map;
1679 } else {
1680 ac->ac_tag = sc->amr_buffer_dmat;
1681 ac->ac_datamap = ac->ac_dmamap;
1682 }
1683
1684 if (ac->ac_flags & AMR_CMD_CCB)
1685 cb = amr_setup_ccb;
1686 else
1687 cb = amr_setup_data;
1688
1689 /* if the command involves data at all, and hasn't been mapped */
1690 if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) {
1691 /* map the data buffers into bus space and build the s/g list */
1692 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1693 ac->ac_length, cb, ac, 0) == EINPROGRESS) {
1694 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1695 }
1696 } else {
1697 if (sc->amr_submit_command(ac) == EBUSY) {
1698 amr_freeslot(ac);
1699 amr_requeue_ready(ac);
1700 }
1701 }
1702
1703 return (0);
1704}
1705
1706static void
1707amr_unmapcmd(struct amr_command *ac)
1708{
1709 int flag;
1710
1711 debug_called(3);
1712
1713 /* if the command involved data at all and was mapped */
1714 if (ac->ac_flags & AMR_CMD_MAPPED) {
1715
1716 if (ac->ac_data != NULL) {
1717
1718 flag = 0;
1719 if (ac->ac_flags & AMR_CMD_DATAIN)
1720 flag |= BUS_DMASYNC_POSTREAD;
1721 if (ac->ac_flags & AMR_CMD_DATAOUT)
1722 flag |= BUS_DMASYNC_POSTWRITE;
1723
1724 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flag);
1725 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1726 }
1727
1728 ac->ac_flags &= ~AMR_CMD_MAPPED;
1729 }
1730}
1731
1732/********************************************************************************
1733 * Take a command and give it to the controller, returns 0 if successful, or
1734 * EBUSY if the command should be retried later.
1735 */
1736static int
1737amr_start(struct amr_command *ac)
1738{
1739 struct amr_softc *sc;
1740 int error = 0;
1741 int slot;
1742
1743 debug_called(3);
1744
1745 /* mark command as busy so that polling consumer can tell */
1746 sc = ac->ac_sc;
1747 ac->ac_flags |= AMR_CMD_BUSY;
1748
1749 /* get a command slot (freed in amr_done) */
1750 slot = ac->ac_slot;
1751 if (sc->amr_busycmd[slot] != NULL)
1752 panic("amr: slot %d busy?\n", slot);
1753 sc->amr_busycmd[slot] = ac;
1754 atomic_add_int(&sc->amr_busyslots, 1);
1755
1756 /* Now we have a slot, we can map the command (unmapped in amr_complete). */
1757 if ((error = amr_mapcmd(ac)) == ENOMEM) {
1758 /*
1759 * Memroy resources are short, so free the slot and let this be tried
1760 * later.
1761 */
1762 amr_freeslot(ac);
1763 }
1764
1765 return (error);
1766}
1767
1768/********************************************************************************
1769 * Extract one or more completed commands from the controller (sc)
1770 *
1771 * Returns nonzero if any commands on the work queue were marked as completed.
1772 */
1773
1774int
1775amr_done(struct amr_softc *sc)
1776{
1777 struct amr_command *ac;
1778 struct amr_mailbox mbox;
1779 int i, idx, result;
1780
1781 debug_called(3);
1782
1783 /* See if there's anything for us to do */
1784 result = 0;
1785
1786 /* loop collecting completed commands */
1787 for (;;) {
1788 /* poll for a completed command's identifier and status */
1789 if (sc->amr_get_work(sc, &mbox)) {
1790 result = 1;
1791
1792 /* iterate over completed commands in this result */
1793 for (i = 0; i < mbox.mb_nstatus; i++) {
1794 /* get pointer to busy command */
1795 idx = mbox.mb_completed[i] - 1;
1796 ac = sc->amr_busycmd[idx];
1797
1798 /* really a busy command? */
1799 if (ac != NULL) {
1800
1801 /* pull the command from the busy index */
1802 amr_freeslot(ac);
1803
1804 /* save status for later use */
1805 ac->ac_status = mbox.mb_status;
1806 amr_enqueue_completed(ac);
1807 debug(3, "completed command with status %x", mbox.mb_status);
1808 } else {
1809 device_printf(sc->amr_dev, "bad slot %d completed\n", idx);
1810 }
1811 }
1812 } else
1813 break; /* no work */
1814 }
1815
1816 /* handle completion and timeouts */
1817 amr_complete(sc, 0);
1818
1819 return(result);
1820}
1821
1822/********************************************************************************
1823 * Do completion processing on done commands on (sc)
1824 */
1825
1826static void
1827amr_complete(void *context, int pending)
1828{
1829 struct amr_softc *sc = (struct amr_softc *)context;
1830 struct amr_command *ac;
1831
1832 debug_called(3);
1833
1834 /* pull completed commands off the queue */
1835 for (;;) {
1836 ac = amr_dequeue_completed(sc);
1837 if (ac == NULL)
1838 break;
1839
1840 /* unmap the command's data buffer */
1841 amr_unmapcmd(ac);
1842
1843 /*
1844 * Is there a completion handler?
1845 */
1846 if (ac->ac_complete != NULL) {
1847 /* unbusy the command */
1848 ac->ac_flags &= ~AMR_CMD_BUSY;
1849 ac->ac_complete(ac);
1850
1851 /*
1852 * Is someone sleeping on this one?
1853 */
1854 } else {
1855 mtx_lock(&sc->amr_list_lock);
1856 ac->ac_flags &= ~AMR_CMD_BUSY;
1857 if (ac->ac_flags & AMR_CMD_SLEEP) {
1858 /* unbusy the command */
1859 wakeup(ac);
1860 }
1861 mtx_unlock(&sc->amr_list_lock);
1862 }
1863
1864 if(!sc->amr_busyslots) {
1865 wakeup(sc);
1866 }
1867 }
1868
1869 mtx_lock(&sc->amr_list_lock);
1870 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
1871 amr_startio(sc);
1872 mtx_unlock(&sc->amr_list_lock);
1873}
1874
1875/********************************************************************************
1876 ********************************************************************************
1877 Command Buffer Management
1878 ********************************************************************************
1879 ********************************************************************************/
1880
1881/********************************************************************************
1882 * Get a new command buffer.
1883 *
1884 * This may return NULL in low-memory cases.
1885 *
1886 * If possible, we recycle a command buffer that's been used before.
1887 */
1888struct amr_command *
1889amr_alloccmd(struct amr_softc *sc)
1890{
1891 struct amr_command *ac;
1892
1893 debug_called(3);
1894
1895 ac = amr_dequeue_free(sc);
1896 if (ac == NULL) {
1897 amr_alloccmd_cluster(sc);
1898 ac = amr_dequeue_free(sc);
1899 }
1900 if (ac == NULL) {
1901 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1902 return(NULL);
1903 }
1904
1905 /* clear out significant fields */
1906 ac->ac_status = 0;
1907 bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox));
1908 ac->ac_flags = 0;
1909 ac->ac_bio = NULL;
1910 ac->ac_data = NULL;
1911 ac->ac_complete = NULL;
1912 ac->ac_tag = NULL;
1913 ac->ac_datamap = NULL;
1914 return(ac);
1915}
1916
1917/********************************************************************************
1918 * Release a command buffer for recycling.
1919 */
1920void
1921amr_releasecmd(struct amr_command *ac)
1922{
1923 debug_called(3);
1924
1925 amr_enqueue_free(ac);
1926}
1927
1928/********************************************************************************
1929 * Allocate a new command cluster and initialise it.
1930 */
1931static void
1932amr_alloccmd_cluster(struct amr_softc *sc)
1933{
1934 struct amr_command_cluster *acc;
1935 struct amr_command *ac;
1936 int i, nextslot;
1937
1938 if (sc->amr_nextslot > sc->amr_maxio)
1939 return;
1940 acc = malloc(AMR_CMD_CLUSTERSIZE, M_AMR, M_NOWAIT | M_ZERO);
1941 if (acc != NULL) {
1942 nextslot = sc->amr_nextslot;
1943 TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link);
1944 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
1945 ac = &acc->acc_command[i];
1946 ac->ac_sc = sc;
1947 ac->ac_slot = nextslot;
1948
1949 /*
1950 * The SG table for each slot is a fixed size and is assumed to
1951 * to hold 64-bit s/g objects when the driver is configured to do
1952 * 64-bit DMA. 32-bit DMA commands still use the same table, but
1953 * cast down to 32-bit objects.
1954 */
1955 if (AMR_IS_SG64(sc)) {
1956 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
1957 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry));
1958 ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG);
1959 } else {
1960 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
1961 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry));
1962 ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
1963 }
1964
1965 ac->ac_ccb = sc->amr_ccb + ac->ac_slot;
1966 ac->ac_ccb_busaddr = sc->amr_ccb_busaddr +
1967 (ac->ac_slot * sizeof(union amr_ccb));
1968
1969 if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap))
1970 break;
1971 if (AMR_IS_SG64(sc) &&
1972 (bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map)))
1973 break;
1974 amr_releasecmd(ac);
1975 if (++nextslot > sc->amr_maxio)
1976 break;
1977 }
1978 sc->amr_nextslot = nextslot;
1979 }
1980}
1981
1982/********************************************************************************
1983 * Free a command cluster
1984 */
1985static void
1986amr_freecmd_cluster(struct amr_command_cluster *acc)
1987{
1988 struct amr_softc *sc = acc->acc_command[0].ac_sc;
1989 int i;
1990
1991 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
1992 bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap);
1993 if (AMR_IS_SG64(sc))
1994 bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map);
1995 }
1996 free(acc, M_AMR);
1997}
1998
1999/********************************************************************************
2000 ********************************************************************************
2001 Interface-specific Shims
2002 ********************************************************************************
2003 ********************************************************************************/
2004
2005/********************************************************************************
2006 * Tell the controller that the mailbox contains a valid command
2007 */
2008static int
2009amr_quartz_submit_command(struct amr_command *ac)
2010{
2011 struct amr_softc *sc = ac->ac_sc;
2012 int i = 0;
2013
2014 mtx_lock(&sc->amr_hw_lock);
2015 while (sc->amr_mailbox->mb_busy && (i++ < 10))
2016 DELAY(1);
2017 if (sc->amr_mailbox->mb_busy) {
2018 mtx_unlock(&sc->amr_hw_lock);
2019 return (EBUSY);
2020 }
2021
2022 /*
2023 * Save the slot number so that we can locate this command when complete.
2024 * Note that ident = 0 seems to be special, so we don't use it.
2025 */
2026 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2027 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2028 sc->amr_mailbox->mb_busy = 1;
2029 sc->amr_mailbox->mb_poll = 0;
2030 sc->amr_mailbox->mb_ack = 0;
2031 sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi;
2032 sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo;
2033
2034 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
2035 mtx_unlock(&sc->amr_hw_lock);
2036 return(0);
2037}
2038
2039static int
2040amr_std_submit_command(struct amr_command *ac)
2041{
2042 struct amr_softc *sc = ac->ac_sc;
2043
2044 mtx_lock(&sc->amr_hw_lock);
2045 if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) {
2046 mtx_unlock(&sc->amr_hw_lock);
2047 return (EBUSY);
2048 }
2049
2050 /*
2051 * Save the slot number so that we can locate this command when complete.
2052 * Note that ident = 0 seems to be special, so we don't use it.
2053 */
2054 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2055 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2056 sc->amr_mailbox->mb_busy = 1;
2057 sc->amr_mailbox->mb_poll = 0;
2058 sc->amr_mailbox->mb_ack = 0;
2059
2060 AMR_SPOST_COMMAND(sc);
2061 mtx_unlock(&sc->amr_hw_lock);
2062 return(0);
2063}
2064
2065/********************************************************************************
2066 * Claim any work that the controller has completed; acknowledge completion,
2067 * save details of the completion in (mbsave)
2068 */
2069static int
2070amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2071{
2072 int worked, i;
2073 u_int32_t outd;
2074 u_int8_t nstatus;
2075 u_int8_t completed[46];
2076
2077 debug_called(3);
2078
2079 worked = 0;
2080
2081 /* work waiting for us? */
2082 if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) {
2083
2084 /* acknowledge interrupt */
2085 AMR_QPUT_ODB(sc, AMR_QODB_READY);
2086
2087 while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff)
2088 DELAY(1);
2089 sc->amr_mailbox->mb_nstatus = 0xff;
2090
2091 /* wait until fw wrote out all completions */
2092 for (i = 0; i < nstatus; i++) {
2093 while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff)
2094 DELAY(1);
2095 sc->amr_mailbox->mb_completed[i] = 0xff;
2096 }
2097
2098 /* Save information for later processing */
2099 mbsave->mb_nstatus = nstatus;
2100 mbsave->mb_status = sc->amr_mailbox->mb_status;
2101 sc->amr_mailbox->mb_status = 0xff;
2102
2103 for (i = 0; i < nstatus; i++)
2104 mbsave->mb_completed[i] = completed[i];
2105
2106 /* acknowledge that we have the commands */
2107 AMR_QPUT_IDB(sc, AMR_QIDB_ACK);
2108
2109#if 0
2110#ifndef AMR_QUARTZ_GOFASTER
2111 /*
2112 * This waits for the controller to notice that we've taken the
2113 * command from it. It's very inefficient, and we shouldn't do it,
2114 * but if we remove this code, we stop completing commands under
2115 * load.
2116 *
2117 * Peter J says we shouldn't do this. The documentation says we
2118 * should. Who is right?
2119 */
2120 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
2121 ; /* XXX aiee! what if it dies? */
2122#endif
2123#endif
2124
2125 worked = 1; /* got some work */
2126 }
2127
2128 return(worked);
2129}
2130
2131static int
2132amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2133{
2134 int worked;
2135 u_int8_t istat;
2136
2137 debug_called(3);
2138
2139 worked = 0;
2140
2141 /* check for valid interrupt status */
2142 istat = AMR_SGET_ISTAT(sc);
2143 if ((istat & AMR_SINTR_VALID) != 0) {
2144 AMR_SPUT_ISTAT(sc, istat); /* ack interrupt status */
2145
2146 /* save mailbox, which contains a list of completed commands */
2147 bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave));
2148
2149 AMR_SACK_INTERRUPT(sc); /* acknowledge we have the mailbox */
2150 worked = 1;
2151 }
2152
2153 return(worked);
2154}
2155
2156/********************************************************************************
2157 * Notify the controller of the mailbox location.
2158 */
2159static void
2160amr_std_attach_mailbox(struct amr_softc *sc)
2161{
2162
2163 /* program the mailbox physical address */
2164 AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys & 0xff);
2165 AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >> 8) & 0xff);
2166 AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff);
2167 AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff);
2168 AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR);
2169
2170 /* clear any outstanding interrupt and enable interrupts proper */
2171 AMR_SACK_INTERRUPT(sc);
2172 AMR_SENABLE_INTR(sc);
2173}
2174
2175#ifdef AMR_BOARD_INIT
2176/********************************************************************************
2177 * Initialise the controller
2178 */
2179static int
2180amr_quartz_init(struct amr_softc *sc)
2181{
2182 int status, ostatus;
2183
2184 device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc));
2185
2186 AMR_QRESET(sc);
2187
2188 ostatus = 0xff;
2189 while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) {
2190 if (status != ostatus) {
2191 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status));
2192 ostatus = status;
2193 }
2194 switch (status) {
2195 case AMR_QINIT_NOMEM:
2196 return(ENOMEM);
2197
2198 case AMR_QINIT_SCAN:
2199 /* XXX we could print channel/target here */
2200 break;
2201 }
2202 }
2203 return(0);
2204}
2205
2206static int
2207amr_std_init(struct amr_softc *sc)
2208{
2209 int status, ostatus;
2210
2211 device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc));
2212
2213 AMR_SRESET(sc);
2214
2215 ostatus = 0xff;
2216 while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) {
2217 if (status != ostatus) {
2218 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status));
2219 ostatus = status;
2220 }
2221 switch (status) {
2222 case AMR_SINIT_NOMEM:
2223 return(ENOMEM);
2224
2225 case AMR_SINIT_INPROG:
2226 /* XXX we could print channel/target here? */
2227 break;
2228 }
2229 }
2230 return(0);
2231}
2232#endif
2233
2234/********************************************************************************
2235 ********************************************************************************
2236 Debugging
2237 ********************************************************************************
2238 ********************************************************************************/
2239
2240/********************************************************************************
2241 * Identify the controller and print some information about it.
2242 */
2243static void
2244amr_describe_controller(struct amr_softc *sc)
2245{
2246 struct amr_prodinfo *ap;
2247 struct amr_enquiry *ae;
2248 char *prod;
2249 int status;
2250
2251 /*
2252 * Try to get 40LD product info, which tells us what the card is labelled as.
2253 */
2254 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) {
2255 device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n",
2256 ap->ap_product, ap->ap_firmware, ap->ap_bios,
2257 ap->ap_memsize);
2258
2259 free(ap, M_AMR);
2260 return;
2261 }
2262
2263 /*
2264 * Try 8LD extended ENQUIRY to get controller signature, and use lookup table.
2265 */
2266 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) {
2267 prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature);
2268
2269 } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) {
2270
2271 /*
2272 * Try to work it out based on the PCI signatures.
2273 */
2274 switch (pci_get_device(sc->amr_dev)) {
2275 case 0x9010:
2276 prod = "Series 428";
2277 break;
2278 case 0x9060:
2279 prod = "Series 434";
2280 break;
2281 default:
2282 prod = "unknown controller";
2283 break;
2284 }
2285 } else {
2286 device_printf(sc->amr_dev, "<unsupported controller>\n");
2287 return;
2288 }
2289
2290 /*
2291 * HP NetRaid controllers have a special encoding of the firmware and
2292 * BIOS versions. The AMI version seems to have it as strings whereas
2293 * the HP version does it with a leading uppercase character and two
2294 * binary numbers.
2295 */
2296
2297 if(ae->ae_adapter.aa_firmware[2] >= 'A' &&
2298 ae->ae_adapter.aa_firmware[2] <= 'Z' &&
2299 ae->ae_adapter.aa_firmware[1] < ' ' &&
2300 ae->ae_adapter.aa_firmware[0] < ' ' &&
2301 ae->ae_adapter.aa_bios[2] >= 'A' &&
2302 ae->ae_adapter.aa_bios[2] <= 'Z' &&
2303 ae->ae_adapter.aa_bios[1] < ' ' &&
2304 ae->ae_adapter.aa_bios[0] < ' ') {
2305
2306 /* this looks like we have an HP NetRaid version of the MegaRaid */
2307
2308 if(ae->ae_signature == AMR_SIG_438) {
2309 /* the AMI 438 is a NetRaid 3si in HP-land */
2310 prod = "HP NetRaid 3si";
2311 }
2312
2313 device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n",
2314 prod, ae->ae_adapter.aa_firmware[2],
2315 ae->ae_adapter.aa_firmware[1],
2316 ae->ae_adapter.aa_firmware[0],
2317 ae->ae_adapter.aa_bios[2],
2318 ae->ae_adapter.aa_bios[1],
2319 ae->ae_adapter.aa_bios[0],
2320 ae->ae_adapter.aa_memorysize);
2321 } else {
2322 device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n",
2323 prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios,
2324 ae->ae_adapter.aa_memorysize);
2325 }
2326 free(ae, M_AMR);
2327}
2328
2329int
2330amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks)
2331{
2332 struct amr_command *ac;
2333 int error = EIO;
2334
2335 debug_called(1);
2336
2337 sc->amr_state |= AMR_STATE_INTEN;
2338
2339 /* get ourselves a command buffer */
2340 if ((ac = amr_alloccmd(sc)) == NULL)
2341 goto out;
2342 /* set command flags */
2343 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
2344
2345 /* point the command at our data */
2346 ac->ac_data = data;
2347 ac->ac_length = blks * AMR_BLKSIZE;
2348
2349 /* build the command proper */
2350 ac->ac_mailbox.mb_command = AMR_CMD_LWRITE;
2351 ac->ac_mailbox.mb_blkcount = blks;
2352 ac->ac_mailbox.mb_lba = lba;
2353 ac->ac_mailbox.mb_drive = unit;
2354
2355 /* can't assume that interrupts are going to work here, so play it safe */
2356 if (sc->amr_poll_command(ac))
2357 goto out;
2358 error = ac->ac_status;
2359
2360 out:
2361 if (ac != NULL)
2362 amr_releasecmd(ac);
2363
2364 sc->amr_state &= ~AMR_STATE_INTEN;
2365 return (error);
2366}
2367
2368
2369
2370#ifdef AMR_DEBUG
2371/********************************************************************************
2372 * Print the command (ac) in human-readable format
2373 */
2374#if 0
2375static void
2376amr_printcommand(struct amr_command *ac)
2377{
2378 struct amr_softc *sc = ac->ac_sc;
2379 struct amr_sgentry *sg;
2380 int i;
2381
2382 device_printf(sc->amr_dev, "cmd %x ident %d drive %d\n",
2383 ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive);
2384 device_printf(sc->amr_dev, "blkcount %d lba %d\n",
2385 ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba);
2386 device_printf(sc->amr_dev, "virtaddr %p length %lu\n", ac->ac_data, (unsigned long)ac->ac_length);
2387 device_printf(sc->amr_dev, "sg physaddr %08x nsg %d\n",
2388 ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem);
2389 device_printf(sc->amr_dev, "ccb %p bio %p\n", ac->ac_ccb_data, ac->ac_bio);
2390
2391 /* get base address of s/g table */
2392 sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2393 for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++)
2394 device_printf(sc->amr_dev, " %x/%d\n", sg->sg_addr, sg->sg_count);
2395}
2396#endif
2397#endif