Deleted Added
full compact
amr.c (174194) amr.c (174544)
1/*-
2 * Copyright (c) 1999,2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * Copyright (c) 2005 Scott Long
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*-
29 * Copyright (c) 2002 Eric Moore
30 * Copyright (c) 2002, 2004 LSI Logic Corporation
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. The party using or redistributing the source code and binary forms
42 * agrees to the disclaimer below and the terms and conditions set forth
43 * herein.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1999,2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * Copyright (c) 2005 Scott Long
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*-
29 * Copyright (c) 2002 Eric Moore
30 * Copyright (c) 2002, 2004 LSI Logic Corporation
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. The party using or redistributing the source code and binary forms
42 * agrees to the disclaimer below and the terms and conditions set forth
43 * herein.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58#include <sys/cdefs.h>
59__FBSDID("$FreeBSD: head/sys/dev/amr/amr.c 174194 2007-12-02 19:54:45Z scottl $");
59__FBSDID("$FreeBSD: head/sys/dev/amr/amr.c 174544 2007-12-12 05:55:03Z scottl $");
60
61/*
62 * Driver for the AMI MegaRaid family of controllers.
63 */
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/malloc.h>
68#include <sys/kernel.h>
69#include <sys/proc.h>
70#include <sys/sysctl.h>
71
72#include <sys/bio.h>
73#include <sys/bus.h>
74#include <sys/conf.h>
75#include <sys/stat.h>
76
77#include <machine/bus.h>
78#include <machine/cpu.h>
79#include <machine/resource.h>
80#include <sys/rman.h>
81
82#include <dev/pci/pcireg.h>
83#include <dev/pci/pcivar.h>
84
85#include <dev/amr/amrio.h>
86#include <dev/amr/amrreg.h>
87#include <dev/amr/amrvar.h>
88#define AMR_DEFINE_TABLES
89#include <dev/amr/amr_tables.h>
90
91/*
92 * The CAM interface appears to be completely broken. Disable it.
93 */
94#ifndef AMR_ENABLE_CAM
95#define AMR_ENABLE_CAM 1
96#endif
97
98SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD, 0, "AMR driver parameters");
99
100static d_open_t amr_open;
101static d_close_t amr_close;
102static d_ioctl_t amr_ioctl;
103
104static struct cdevsw amr_cdevsw = {
105 .d_version = D_VERSION,
106 .d_flags = D_NEEDGIANT,
107 .d_open = amr_open,
108 .d_close = amr_close,
109 .d_ioctl = amr_ioctl,
110 .d_name = "amr",
111};
112
113int linux_no_adapter = 0;
114/*
115 * Initialisation, bus interface.
116 */
117static void amr_startup(void *arg);
118
119/*
120 * Command wrappers
121 */
122static int amr_query_controller(struct amr_softc *sc);
123static void *amr_enquiry(struct amr_softc *sc, size_t bufsize,
124 u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status);
125static void amr_completeio(struct amr_command *ac);
126static int amr_support_ext_cdb(struct amr_softc *sc);
127
128/*
129 * Command buffer allocation.
130 */
131static void amr_alloccmd_cluster(struct amr_softc *sc);
132static void amr_freecmd_cluster(struct amr_command_cluster *acc);
133
134/*
135 * Command processing.
136 */
137static int amr_bio_command(struct amr_softc *sc, struct amr_command **acp);
138static int amr_wait_command(struct amr_command *ac) __unused;
139static int amr_mapcmd(struct amr_command *ac);
140static void amr_unmapcmd(struct amr_command *ac);
141static int amr_start(struct amr_command *ac);
142static void amr_complete(void *context, int pending);
60
61/*
62 * Driver for the AMI MegaRaid family of controllers.
63 */
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/malloc.h>
68#include <sys/kernel.h>
69#include <sys/proc.h>
70#include <sys/sysctl.h>
71
72#include <sys/bio.h>
73#include <sys/bus.h>
74#include <sys/conf.h>
75#include <sys/stat.h>
76
77#include <machine/bus.h>
78#include <machine/cpu.h>
79#include <machine/resource.h>
80#include <sys/rman.h>
81
82#include <dev/pci/pcireg.h>
83#include <dev/pci/pcivar.h>
84
85#include <dev/amr/amrio.h>
86#include <dev/amr/amrreg.h>
87#include <dev/amr/amrvar.h>
88#define AMR_DEFINE_TABLES
89#include <dev/amr/amr_tables.h>
90
91/*
92 * The CAM interface appears to be completely broken. Disable it.
93 */
94#ifndef AMR_ENABLE_CAM
95#define AMR_ENABLE_CAM 1
96#endif
97
98SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD, 0, "AMR driver parameters");
99
100static d_open_t amr_open;
101static d_close_t amr_close;
102static d_ioctl_t amr_ioctl;
103
104static struct cdevsw amr_cdevsw = {
105 .d_version = D_VERSION,
106 .d_flags = D_NEEDGIANT,
107 .d_open = amr_open,
108 .d_close = amr_close,
109 .d_ioctl = amr_ioctl,
110 .d_name = "amr",
111};
112
113int linux_no_adapter = 0;
114/*
115 * Initialisation, bus interface.
116 */
117static void amr_startup(void *arg);
118
119/*
120 * Command wrappers
121 */
122static int amr_query_controller(struct amr_softc *sc);
123static void *amr_enquiry(struct amr_softc *sc, size_t bufsize,
124 u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status);
125static void amr_completeio(struct amr_command *ac);
126static int amr_support_ext_cdb(struct amr_softc *sc);
127
128/*
129 * Command buffer allocation.
130 */
131static void amr_alloccmd_cluster(struct amr_softc *sc);
132static void amr_freecmd_cluster(struct amr_command_cluster *acc);
133
134/*
135 * Command processing.
136 */
137static int amr_bio_command(struct amr_softc *sc, struct amr_command **acp);
138static int amr_wait_command(struct amr_command *ac) __unused;
139static int amr_mapcmd(struct amr_command *ac);
140static void amr_unmapcmd(struct amr_command *ac);
141static int amr_start(struct amr_command *ac);
142static void amr_complete(void *context, int pending);
143static void amr_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
144static void amr_setup_dma64map(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
145static void amr_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
143static void amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
144static void amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
145static void amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
146
147/*
148 * Status monitoring
149 */
150static void amr_periodic(void *data);
151
152/*
153 * Interface-specific shims
154 */
155static int amr_quartz_submit_command(struct amr_command *ac);
156static int amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
157static int amr_quartz_poll_command(struct amr_command *ac);
158static int amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac);
159
160static int amr_std_submit_command(struct amr_command *ac);
161static int amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
162static int amr_std_poll_command(struct amr_command *ac);
163static void amr_std_attach_mailbox(struct amr_softc *sc);
164
165#ifdef AMR_BOARD_INIT
166static int amr_quartz_init(struct amr_softc *sc);
167static int amr_std_init(struct amr_softc *sc);
168#endif
169
170/*
171 * Debugging
172 */
173static void amr_describe_controller(struct amr_softc *sc);
174#ifdef AMR_DEBUG
175#if 0
176static void amr_printcommand(struct amr_command *ac);
177#endif
178#endif
179
180static void amr_init_sysctl(struct amr_softc *sc);
181static int amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr,
182 int32_t flag, d_thread_t *td);
183
184MALLOC_DEFINE(M_AMR, "amr", "AMR memory");
185
186/********************************************************************************
187 ********************************************************************************
188 Inline Glue
189 ********************************************************************************
190 ********************************************************************************/
191
192/********************************************************************************
193 ********************************************************************************
194 Public Interfaces
195 ********************************************************************************
196 ********************************************************************************/
197
198/********************************************************************************
199 * Initialise the controller and softc.
200 */
201int
202amr_attach(struct amr_softc *sc)
203{
204
205 debug_called(1);
206
207 /*
208 * Initialise per-controller queues.
209 */
210 TAILQ_INIT(&sc->amr_completed);
211 TAILQ_INIT(&sc->amr_freecmds);
212 TAILQ_INIT(&sc->amr_cmd_clusters);
213 TAILQ_INIT(&sc->amr_ready);
214 bioq_init(&sc->amr_bioq);
215
216 debug(2, "queue init done");
217
218 /*
219 * Configure for this controller type.
220 */
221 if (AMR_IS_QUARTZ(sc)) {
222 sc->amr_submit_command = amr_quartz_submit_command;
223 sc->amr_get_work = amr_quartz_get_work;
224 sc->amr_poll_command = amr_quartz_poll_command;
225 sc->amr_poll_command1 = amr_quartz_poll_command1;
226 } else {
227 sc->amr_submit_command = amr_std_submit_command;
228 sc->amr_get_work = amr_std_get_work;
229 sc->amr_poll_command = amr_std_poll_command;
230 amr_std_attach_mailbox(sc);;
231 }
232
233#ifdef AMR_BOARD_INIT
234 if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc))))
235 return(ENXIO);
236#endif
237
238 /*
239 * Quiz controller for features and limits.
240 */
241 if (amr_query_controller(sc))
242 return(ENXIO);
243
244 debug(2, "controller query complete");
245
246 /*
247 * Setup sysctls.
248 */
249 amr_init_sysctl(sc);
250
251#if AMR_ENABLE_CAM != 0
252 /*
253 * Attach our 'real' SCSI channels to CAM.
254 */
255 if (amr_cam_attach(sc))
256 return(ENXIO);
257 debug(2, "CAM attach done");
258#endif
259
260 /*
261 * Create the control device.
262 */
263 sc->amr_dev_t = make_dev(&amr_cdevsw, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR,
264 S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev));
265 sc->amr_dev_t->si_drv1 = sc;
266 linux_no_adapter++;
267 if (device_get_unit(sc->amr_dev) == 0)
268 make_dev_alias(sc->amr_dev_t, "megadev0");
269
270 /*
271 * Schedule ourselves to bring the controller up once interrupts are
272 * available.
273 */
274 bzero(&sc->amr_ich, sizeof(struct intr_config_hook));
275 sc->amr_ich.ich_func = amr_startup;
276 sc->amr_ich.ich_arg = sc;
277 if (config_intrhook_establish(&sc->amr_ich) != 0) {
278 device_printf(sc->amr_dev, "can't establish configuration hook\n");
279 return(ENOMEM);
280 }
281
282 /*
283 * Print a little information about the controller.
284 */
285 amr_describe_controller(sc);
286
287 debug(2, "attach complete");
288 return(0);
289}
290
291/********************************************************************************
292 * Locate disk resources and attach children to them.
293 */
294static void
295amr_startup(void *arg)
296{
297 struct amr_softc *sc = (struct amr_softc *)arg;
298 struct amr_logdrive *dr;
299 int i, error;
300
301 debug_called(1);
302
303 /* pull ourselves off the intrhook chain */
304 if (sc->amr_ich.ich_func)
305 config_intrhook_disestablish(&sc->amr_ich);
306 sc->amr_ich.ich_func = NULL;
307
308 /* get up-to-date drive information */
309 if (amr_query_controller(sc)) {
310 device_printf(sc->amr_dev, "can't scan controller for drives\n");
311 return;
312 }
313
314 /* iterate over available drives */
315 for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) {
316 /* are we already attached to this drive? */
317 if (dr->al_disk == 0) {
318 /* generate geometry information */
319 if (dr->al_size > 0x200000) { /* extended translation? */
320 dr->al_heads = 255;
321 dr->al_sectors = 63;
322 } else {
323 dr->al_heads = 64;
324 dr->al_sectors = 32;
325 }
326 dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors);
327
328 dr->al_disk = device_add_child(sc->amr_dev, NULL, -1);
329 if (dr->al_disk == 0)
330 device_printf(sc->amr_dev, "device_add_child failed\n");
331 device_set_ivars(dr->al_disk, dr);
332 }
333 }
334
335 if ((error = bus_generic_attach(sc->amr_dev)) != 0)
336 device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error);
337
338 /* mark controller back up */
339 sc->amr_state &= ~AMR_STATE_SHUTDOWN;
340
341 /* interrupts will be enabled before we do anything more */
342 sc->amr_state |= AMR_STATE_INTEN;
343
344 /*
345 * Start the timeout routine.
346 */
347/* sc->amr_timeout = timeout(amr_periodic, sc, hz);*/
348
349 return;
350}
351
352static void
353amr_init_sysctl(struct amr_softc *sc)
354{
355
356 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
357 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
358 OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0,
359 "");
360}
361
362
363/*******************************************************************************
364 * Free resources associated with a controller instance
365 */
366void
367amr_free(struct amr_softc *sc)
368{
369 struct amr_command_cluster *acc;
370
371#if AMR_ENABLE_CAM != 0
372 /* detach from CAM */
373 amr_cam_detach(sc);
374#endif
375
376 /* cancel status timeout */
377 untimeout(amr_periodic, sc, sc->amr_timeout);
378
379 /* throw away any command buffers */
380 while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) {
381 TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link);
382 amr_freecmd_cluster(acc);
383 }
384
385 /* destroy control device */
386 if( sc->amr_dev_t != (struct cdev *)NULL)
387 destroy_dev(sc->amr_dev_t);
388
389 if (mtx_initialized(&sc->amr_hw_lock))
390 mtx_destroy(&sc->amr_hw_lock);
391
392 if (mtx_initialized(&sc->amr_list_lock))
393 mtx_destroy(&sc->amr_list_lock);
394}
395
396/*******************************************************************************
397 * Receive a bio structure from a child device and queue it on a particular
398 * disk resource, then poke the disk resource to start as much work as it can.
399 */
400int
401amr_submit_bio(struct amr_softc *sc, struct bio *bio)
402{
403 debug_called(2);
404
405 mtx_lock(&sc->amr_list_lock);
406 amr_enqueue_bio(sc, bio);
407 amr_startio(sc);
408 mtx_unlock(&sc->amr_list_lock);
409 return(0);
410}
411
412/********************************************************************************
413 * Accept an open operation on the control device.
414 */
415static int
416amr_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
417{
418 int unit = minor(dev);
419 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
420
421 debug_called(1);
422
423 sc->amr_state |= AMR_STATE_OPEN;
424 return(0);
425}
426
427#ifdef LSI
428static int
429amr_del_ld(struct amr_softc *sc, int drv_no, int status)
430{
431
432 debug_called(1);
433
434 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
435 sc->amr_state &= ~AMR_STATE_LD_DELETE;
436 sc->amr_state |= AMR_STATE_REMAP_LD;
437 debug(1, "State Set");
438
439 if (!status) {
440 debug(1, "disk begin destroyed %d",drv_no);
441 if (--amr_disks_registered == 0)
442 cdevsw_remove(&amrddisk_cdevsw);
443 debug(1, "disk begin destroyed success");
444 }
445 return 0;
446}
447
448static int
449amr_prepare_ld_delete(struct amr_softc *sc)
450{
451
452 debug_called(1);
453 if (sc->ld_del_supported == 0)
454 return(ENOIOCTL);
455
456 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
457 sc->amr_state |= AMR_STATE_LD_DELETE;
458
459 /* 5 minutes for the all the commands to be flushed.*/
460 tsleep((void *)&sc->ld_del_supported, PCATCH | PRIBIO,"delete_logical_drv",hz * 60 * 1);
461 if ( sc->amr_busyslots )
462 return(ENOIOCTL);
463
464 return 0;
465}
466#endif
467
468/********************************************************************************
469 * Accept the last close on the control device.
470 */
471static int
472amr_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
473{
474 int unit = minor(dev);
475 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
476
477 debug_called(1);
478
479 sc->amr_state &= ~AMR_STATE_OPEN;
480 return (0);
481}
482
483/********************************************************************************
484 * Handle controller-specific control operations.
485 */
486static void
487amr_rescan_drives(struct cdev *dev)
488{
489 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
490 int i, error = 0;
491
492 sc->amr_state |= AMR_STATE_REMAP_LD;
493 while (sc->amr_busyslots) {
494 device_printf(sc->amr_dev, "idle controller\n");
495 amr_done(sc);
496 }
497
498 /* mark ourselves as in-shutdown */
499 sc->amr_state |= AMR_STATE_SHUTDOWN;
500
501 /* flush controller */
502 device_printf(sc->amr_dev, "flushing cache...");
503 printf("%s\n", amr_flush(sc) ? "failed" : "done");
504
505 /* delete all our child devices */
506 for(i = 0 ; i < AMR_MAXLD; i++) {
507 if(sc->amr_drive[i].al_disk != 0) {
508 if((error = device_delete_child(sc->amr_dev,
509 sc->amr_drive[i].al_disk)) != 0)
510 goto shutdown_out;
511
512 sc->amr_drive[i].al_disk = 0;
513 }
514 }
515
516shutdown_out:
517 amr_startup(sc);
518}
519
520int
521amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag,
522 d_thread_t *td)
523{
524 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
525 struct amr_command *ac;
526 struct amr_mailbox *mb;
527 struct amr_linux_ioctl ali;
528 void *dp, *temp;
529 int error;
530 int adapter, len, ac_flags = 0;
531 int logical_drives_changed = 0;
532 u_int32_t linux_version = 0x02100000;
533 u_int8_t status;
534 struct amr_passthrough *ap; /* 60 bytes */
535
536 error = 0;
537 dp = NULL;
538 ac = NULL;
539 ap = NULL;
540
541 if ((error = copyin(addr, &ali, sizeof(ali))) != 0)
542 return (error);
543 switch (ali.ui.fcs.opcode) {
544 case 0x82:
545 switch(ali.ui.fcs.subopcode) {
546 case 'e':
547 copyout(&linux_version, (void *)(uintptr_t)ali.data,
548 sizeof(linux_version));
549 error = 0;
550 break;
551
552 case 'm':
553 copyout(&linux_no_adapter, (void *)(uintptr_t)ali.data,
554 sizeof(linux_no_adapter));
555 td->td_retval[0] = linux_no_adapter;
556 error = 0;
557 break;
558
559 default:
560 printf("Unknown subopcode\n");
561 error = ENOIOCTL;
562 break;
563 }
564 break;
565
566 case 0x80:
567 case 0x81:
568 if (ali.ui.fcs.opcode == 0x80)
569 len = max(ali.outlen, ali.inlen);
570 else
571 len = ali.ui.fcs.length;
572
573 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
574
146
147/*
148 * Status monitoring
149 */
150static void amr_periodic(void *data);
151
152/*
153 * Interface-specific shims
154 */
155static int amr_quartz_submit_command(struct amr_command *ac);
156static int amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
157static int amr_quartz_poll_command(struct amr_command *ac);
158static int amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac);
159
160static int amr_std_submit_command(struct amr_command *ac);
161static int amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
162static int amr_std_poll_command(struct amr_command *ac);
163static void amr_std_attach_mailbox(struct amr_softc *sc);
164
165#ifdef AMR_BOARD_INIT
166static int amr_quartz_init(struct amr_softc *sc);
167static int amr_std_init(struct amr_softc *sc);
168#endif
169
170/*
171 * Debugging
172 */
173static void amr_describe_controller(struct amr_softc *sc);
174#ifdef AMR_DEBUG
175#if 0
176static void amr_printcommand(struct amr_command *ac);
177#endif
178#endif
179
180static void amr_init_sysctl(struct amr_softc *sc);
181static int amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr,
182 int32_t flag, d_thread_t *td);
183
184MALLOC_DEFINE(M_AMR, "amr", "AMR memory");
185
186/********************************************************************************
187 ********************************************************************************
188 Inline Glue
189 ********************************************************************************
190 ********************************************************************************/
191
192/********************************************************************************
193 ********************************************************************************
194 Public Interfaces
195 ********************************************************************************
196 ********************************************************************************/
197
198/********************************************************************************
199 * Initialise the controller and softc.
200 */
201int
202amr_attach(struct amr_softc *sc)
203{
204
205 debug_called(1);
206
207 /*
208 * Initialise per-controller queues.
209 */
210 TAILQ_INIT(&sc->amr_completed);
211 TAILQ_INIT(&sc->amr_freecmds);
212 TAILQ_INIT(&sc->amr_cmd_clusters);
213 TAILQ_INIT(&sc->amr_ready);
214 bioq_init(&sc->amr_bioq);
215
216 debug(2, "queue init done");
217
218 /*
219 * Configure for this controller type.
220 */
221 if (AMR_IS_QUARTZ(sc)) {
222 sc->amr_submit_command = amr_quartz_submit_command;
223 sc->amr_get_work = amr_quartz_get_work;
224 sc->amr_poll_command = amr_quartz_poll_command;
225 sc->amr_poll_command1 = amr_quartz_poll_command1;
226 } else {
227 sc->amr_submit_command = amr_std_submit_command;
228 sc->amr_get_work = amr_std_get_work;
229 sc->amr_poll_command = amr_std_poll_command;
230 amr_std_attach_mailbox(sc);;
231 }
232
233#ifdef AMR_BOARD_INIT
234 if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc))))
235 return(ENXIO);
236#endif
237
238 /*
239 * Quiz controller for features and limits.
240 */
241 if (amr_query_controller(sc))
242 return(ENXIO);
243
244 debug(2, "controller query complete");
245
246 /*
247 * Setup sysctls.
248 */
249 amr_init_sysctl(sc);
250
251#if AMR_ENABLE_CAM != 0
252 /*
253 * Attach our 'real' SCSI channels to CAM.
254 */
255 if (amr_cam_attach(sc))
256 return(ENXIO);
257 debug(2, "CAM attach done");
258#endif
259
260 /*
261 * Create the control device.
262 */
263 sc->amr_dev_t = make_dev(&amr_cdevsw, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR,
264 S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev));
265 sc->amr_dev_t->si_drv1 = sc;
266 linux_no_adapter++;
267 if (device_get_unit(sc->amr_dev) == 0)
268 make_dev_alias(sc->amr_dev_t, "megadev0");
269
270 /*
271 * Schedule ourselves to bring the controller up once interrupts are
272 * available.
273 */
274 bzero(&sc->amr_ich, sizeof(struct intr_config_hook));
275 sc->amr_ich.ich_func = amr_startup;
276 sc->amr_ich.ich_arg = sc;
277 if (config_intrhook_establish(&sc->amr_ich) != 0) {
278 device_printf(sc->amr_dev, "can't establish configuration hook\n");
279 return(ENOMEM);
280 }
281
282 /*
283 * Print a little information about the controller.
284 */
285 amr_describe_controller(sc);
286
287 debug(2, "attach complete");
288 return(0);
289}
290
291/********************************************************************************
292 * Locate disk resources and attach children to them.
293 */
294static void
295amr_startup(void *arg)
296{
297 struct amr_softc *sc = (struct amr_softc *)arg;
298 struct amr_logdrive *dr;
299 int i, error;
300
301 debug_called(1);
302
303 /* pull ourselves off the intrhook chain */
304 if (sc->amr_ich.ich_func)
305 config_intrhook_disestablish(&sc->amr_ich);
306 sc->amr_ich.ich_func = NULL;
307
308 /* get up-to-date drive information */
309 if (amr_query_controller(sc)) {
310 device_printf(sc->amr_dev, "can't scan controller for drives\n");
311 return;
312 }
313
314 /* iterate over available drives */
315 for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) {
316 /* are we already attached to this drive? */
317 if (dr->al_disk == 0) {
318 /* generate geometry information */
319 if (dr->al_size > 0x200000) { /* extended translation? */
320 dr->al_heads = 255;
321 dr->al_sectors = 63;
322 } else {
323 dr->al_heads = 64;
324 dr->al_sectors = 32;
325 }
326 dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors);
327
328 dr->al_disk = device_add_child(sc->amr_dev, NULL, -1);
329 if (dr->al_disk == 0)
330 device_printf(sc->amr_dev, "device_add_child failed\n");
331 device_set_ivars(dr->al_disk, dr);
332 }
333 }
334
335 if ((error = bus_generic_attach(sc->amr_dev)) != 0)
336 device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error);
337
338 /* mark controller back up */
339 sc->amr_state &= ~AMR_STATE_SHUTDOWN;
340
341 /* interrupts will be enabled before we do anything more */
342 sc->amr_state |= AMR_STATE_INTEN;
343
344 /*
345 * Start the timeout routine.
346 */
347/* sc->amr_timeout = timeout(amr_periodic, sc, hz);*/
348
349 return;
350}
351
352static void
353amr_init_sysctl(struct amr_softc *sc)
354{
355
356 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
357 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
358 OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0,
359 "");
360}
361
362
363/*******************************************************************************
364 * Free resources associated with a controller instance
365 */
366void
367amr_free(struct amr_softc *sc)
368{
369 struct amr_command_cluster *acc;
370
371#if AMR_ENABLE_CAM != 0
372 /* detach from CAM */
373 amr_cam_detach(sc);
374#endif
375
376 /* cancel status timeout */
377 untimeout(amr_periodic, sc, sc->amr_timeout);
378
379 /* throw away any command buffers */
380 while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) {
381 TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link);
382 amr_freecmd_cluster(acc);
383 }
384
385 /* destroy control device */
386 if( sc->amr_dev_t != (struct cdev *)NULL)
387 destroy_dev(sc->amr_dev_t);
388
389 if (mtx_initialized(&sc->amr_hw_lock))
390 mtx_destroy(&sc->amr_hw_lock);
391
392 if (mtx_initialized(&sc->amr_list_lock))
393 mtx_destroy(&sc->amr_list_lock);
394}
395
396/*******************************************************************************
397 * Receive a bio structure from a child device and queue it on a particular
398 * disk resource, then poke the disk resource to start as much work as it can.
399 */
400int
401amr_submit_bio(struct amr_softc *sc, struct bio *bio)
402{
403 debug_called(2);
404
405 mtx_lock(&sc->amr_list_lock);
406 amr_enqueue_bio(sc, bio);
407 amr_startio(sc);
408 mtx_unlock(&sc->amr_list_lock);
409 return(0);
410}
411
412/********************************************************************************
413 * Accept an open operation on the control device.
414 */
415static int
416amr_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
417{
418 int unit = minor(dev);
419 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
420
421 debug_called(1);
422
423 sc->amr_state |= AMR_STATE_OPEN;
424 return(0);
425}
426
427#ifdef LSI
428static int
429amr_del_ld(struct amr_softc *sc, int drv_no, int status)
430{
431
432 debug_called(1);
433
434 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
435 sc->amr_state &= ~AMR_STATE_LD_DELETE;
436 sc->amr_state |= AMR_STATE_REMAP_LD;
437 debug(1, "State Set");
438
439 if (!status) {
440 debug(1, "disk begin destroyed %d",drv_no);
441 if (--amr_disks_registered == 0)
442 cdevsw_remove(&amrddisk_cdevsw);
443 debug(1, "disk begin destroyed success");
444 }
445 return 0;
446}
447
448static int
449amr_prepare_ld_delete(struct amr_softc *sc)
450{
451
452 debug_called(1);
453 if (sc->ld_del_supported == 0)
454 return(ENOIOCTL);
455
456 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
457 sc->amr_state |= AMR_STATE_LD_DELETE;
458
459 /* 5 minutes for the all the commands to be flushed.*/
460 tsleep((void *)&sc->ld_del_supported, PCATCH | PRIBIO,"delete_logical_drv",hz * 60 * 1);
461 if ( sc->amr_busyslots )
462 return(ENOIOCTL);
463
464 return 0;
465}
466#endif
467
468/********************************************************************************
469 * Accept the last close on the control device.
470 */
471static int
472amr_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
473{
474 int unit = minor(dev);
475 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
476
477 debug_called(1);
478
479 sc->amr_state &= ~AMR_STATE_OPEN;
480 return (0);
481}
482
483/********************************************************************************
484 * Handle controller-specific control operations.
485 */
486static void
487amr_rescan_drives(struct cdev *dev)
488{
489 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
490 int i, error = 0;
491
492 sc->amr_state |= AMR_STATE_REMAP_LD;
493 while (sc->amr_busyslots) {
494 device_printf(sc->amr_dev, "idle controller\n");
495 amr_done(sc);
496 }
497
498 /* mark ourselves as in-shutdown */
499 sc->amr_state |= AMR_STATE_SHUTDOWN;
500
501 /* flush controller */
502 device_printf(sc->amr_dev, "flushing cache...");
503 printf("%s\n", amr_flush(sc) ? "failed" : "done");
504
505 /* delete all our child devices */
506 for(i = 0 ; i < AMR_MAXLD; i++) {
507 if(sc->amr_drive[i].al_disk != 0) {
508 if((error = device_delete_child(sc->amr_dev,
509 sc->amr_drive[i].al_disk)) != 0)
510 goto shutdown_out;
511
512 sc->amr_drive[i].al_disk = 0;
513 }
514 }
515
516shutdown_out:
517 amr_startup(sc);
518}
519
520int
521amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag,
522 d_thread_t *td)
523{
524 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
525 struct amr_command *ac;
526 struct amr_mailbox *mb;
527 struct amr_linux_ioctl ali;
528 void *dp, *temp;
529 int error;
530 int adapter, len, ac_flags = 0;
531 int logical_drives_changed = 0;
532 u_int32_t linux_version = 0x02100000;
533 u_int8_t status;
534 struct amr_passthrough *ap; /* 60 bytes */
535
536 error = 0;
537 dp = NULL;
538 ac = NULL;
539 ap = NULL;
540
541 if ((error = copyin(addr, &ali, sizeof(ali))) != 0)
542 return (error);
543 switch (ali.ui.fcs.opcode) {
544 case 0x82:
545 switch(ali.ui.fcs.subopcode) {
546 case 'e':
547 copyout(&linux_version, (void *)(uintptr_t)ali.data,
548 sizeof(linux_version));
549 error = 0;
550 break;
551
552 case 'm':
553 copyout(&linux_no_adapter, (void *)(uintptr_t)ali.data,
554 sizeof(linux_no_adapter));
555 td->td_retval[0] = linux_no_adapter;
556 error = 0;
557 break;
558
559 default:
560 printf("Unknown subopcode\n");
561 error = ENOIOCTL;
562 break;
563 }
564 break;
565
566 case 0x80:
567 case 0x81:
568 if (ali.ui.fcs.opcode == 0x80)
569 len = max(ali.outlen, ali.inlen);
570 else
571 len = ali.ui.fcs.length;
572
573 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
574
575 ap = malloc(sizeof(struct amr_passthrough),
576 M_AMR, M_WAITOK | M_ZERO);
577
578 mb = (void *)&ali.mbox[0];
579
580 if ((ali.mbox[0] == FC_DEL_LOGDRV && ali.mbox[2] == OP_DEL_LOGDRV) || /* delete */
581 (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) { /* create */
582 if (sc->amr_allow_vol_config == 0) {
583 error = EPERM;
584 break;
585 }
586 logical_drives_changed = 1;
587 }
588
589 if (ali.mbox[0] == AMR_CMD_PASS) {
575 mb = (void *)&ali.mbox[0];
576
577 if ((ali.mbox[0] == FC_DEL_LOGDRV && ali.mbox[2] == OP_DEL_LOGDRV) || /* delete */
578 (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) { /* create */
579 if (sc->amr_allow_vol_config == 0) {
580 error = EPERM;
581 break;
582 }
583 logical_drives_changed = 1;
584 }
585
586 if (ali.mbox[0] == AMR_CMD_PASS) {
587 mtx_lock(&sc->amr_list_lock);
588 while ((ac = amr_alloccmd(sc)) == NULL)
589 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
590 mtx_unlock(&sc->amr_list_lock);
591 ap = &ac->ac_ccb->ccb_pthru;
592
590 error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap,
591 sizeof(struct amr_passthrough));
592 if (error)
593 break;
594
595 if (ap->ap_data_transfer_length)
596 dp = malloc(ap->ap_data_transfer_length, M_AMR,
597 M_WAITOK | M_ZERO);
598
599 if (ali.inlen) {
600 error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address,
601 dp, ap->ap_data_transfer_length);
602 if (error)
603 break;
604 }
605
593 error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap,
594 sizeof(struct amr_passthrough));
595 if (error)
596 break;
597
598 if (ap->ap_data_transfer_length)
599 dp = malloc(ap->ap_data_transfer_length, M_AMR,
600 M_WAITOK | M_ZERO);
601
602 if (ali.inlen) {
603 error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address,
604 dp, ap->ap_data_transfer_length);
605 if (error)
606 break;
607 }
608
606 mtx_lock(&sc->amr_list_lock);
607 while ((ac = amr_alloccmd(sc)) == NULL)
608 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
609
610 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB_DATAIN|AMR_CMD_CCB_DATAOUT;
609 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB;
611 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
612 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
613 ac->ac_flags = ac_flags;
614
610 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
611 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
612 ac->ac_flags = ac_flags;
613
615 ac->ac_data = ap;
616 ac->ac_length = sizeof(struct amr_passthrough);
617 ac->ac_ccb_data = dp;
618 ac->ac_ccb_length = ap->ap_data_transfer_length;
614 ac->ac_data = dp;
615 ac->ac_length = ap->ap_data_transfer_length;
619 temp = (void *)(uintptr_t)ap->ap_data_transfer_address;
620
616 temp = (void *)(uintptr_t)ap->ap_data_transfer_address;
617
618 mtx_lock(&sc->amr_list_lock);
621 error = amr_wait_command(ac);
622 mtx_unlock(&sc->amr_list_lock);
623 if (error)
624 break;
625
626 status = ac->ac_status;
627 error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status));
628 if (error)
629 break;
630
631 if (ali.outlen) {
632 error = copyout(dp, temp, ap->ap_data_transfer_length);
633 if (error)
634 break;
635 }
636 error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length);
637 if (error)
638 break;
639
640 error = 0;
641 break;
642 } else if (ali.mbox[0] == AMR_CMD_PASS_64) {
643 printf("No AMR_CMD_PASS_64\n");
644 error = ENOIOCTL;
645 break;
646 } else if (ali.mbox[0] == AMR_CMD_EXTPASS) {
647 printf("No AMR_CMD_EXTPASS\n");
648 error = ENOIOCTL;
649 break;
650 } else {
651 if (len)
652 dp = malloc(len, M_AMR, M_WAITOK | M_ZERO);
653
654 if (ali.inlen) {
655 error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len);
656 if (error)
657 break;
658 }
659
660 mtx_lock(&sc->amr_list_lock);
661 while ((ac = amr_alloccmd(sc)) == NULL)
662 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
663
664 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
665 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
666 bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox));
667
668 ac->ac_length = len;
669 ac->ac_data = dp;
670 ac->ac_flags = ac_flags;
671
672 error = amr_wait_command(ac);
673 mtx_unlock(&sc->amr_list_lock);
674 if (error)
675 break;
676
677 status = ac->ac_status;
678 error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status));
679 if (ali.outlen) {
680 error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, len);
681 if (error)
682 break;
683 }
684
685 error = 0;
686 if (logical_drives_changed)
687 amr_rescan_drives(dev);
688 break;
689 }
690 break;
691
692 default:
693 debug(1, "unknown linux ioctl 0x%lx", cmd);
694 printf("unknown linux ioctl 0x%lx\n", cmd);
695 error = ENOIOCTL;
696 break;
697 }
698
699 /*
700 * At this point, we know that there is a lock held and that these
701 * objects have been allocated.
702 */
703 mtx_lock(&sc->amr_list_lock);
704 if (ac != NULL)
705 amr_releasecmd(ac);
706 mtx_unlock(&sc->amr_list_lock);
707 if (dp != NULL)
708 free(dp, M_AMR);
619 error = amr_wait_command(ac);
620 mtx_unlock(&sc->amr_list_lock);
621 if (error)
622 break;
623
624 status = ac->ac_status;
625 error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status));
626 if (error)
627 break;
628
629 if (ali.outlen) {
630 error = copyout(dp, temp, ap->ap_data_transfer_length);
631 if (error)
632 break;
633 }
634 error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length);
635 if (error)
636 break;
637
638 error = 0;
639 break;
640 } else if (ali.mbox[0] == AMR_CMD_PASS_64) {
641 printf("No AMR_CMD_PASS_64\n");
642 error = ENOIOCTL;
643 break;
644 } else if (ali.mbox[0] == AMR_CMD_EXTPASS) {
645 printf("No AMR_CMD_EXTPASS\n");
646 error = ENOIOCTL;
647 break;
648 } else {
649 if (len)
650 dp = malloc(len, M_AMR, M_WAITOK | M_ZERO);
651
652 if (ali.inlen) {
653 error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len);
654 if (error)
655 break;
656 }
657
658 mtx_lock(&sc->amr_list_lock);
659 while ((ac = amr_alloccmd(sc)) == NULL)
660 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
661
662 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
663 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
664 bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox));
665
666 ac->ac_length = len;
667 ac->ac_data = dp;
668 ac->ac_flags = ac_flags;
669
670 error = amr_wait_command(ac);
671 mtx_unlock(&sc->amr_list_lock);
672 if (error)
673 break;
674
675 status = ac->ac_status;
676 error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status));
677 if (ali.outlen) {
678 error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, len);
679 if (error)
680 break;
681 }
682
683 error = 0;
684 if (logical_drives_changed)
685 amr_rescan_drives(dev);
686 break;
687 }
688 break;
689
690 default:
691 debug(1, "unknown linux ioctl 0x%lx", cmd);
692 printf("unknown linux ioctl 0x%lx\n", cmd);
693 error = ENOIOCTL;
694 break;
695 }
696
697 /*
698 * At this point, we know that there is a lock held and that these
699 * objects have been allocated.
700 */
701 mtx_lock(&sc->amr_list_lock);
702 if (ac != NULL)
703 amr_releasecmd(ac);
704 mtx_unlock(&sc->amr_list_lock);
705 if (dp != NULL)
706 free(dp, M_AMR);
709 if (ap != NULL)
710 free(ap, M_AMR);
711 return(error);
712}
713
714static int
715amr_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, d_thread_t *td)
716{
717 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
718 union {
719 void *_p;
720 struct amr_user_ioctl *au;
721#ifdef AMR_IO_COMMAND32
722 struct amr_user_ioctl32 *au32;
723#endif
724 int *result;
725 } arg;
726 struct amr_command *ac;
727 struct amr_mailbox_ioctl *mbi;
728 void *dp, *au_buffer;
729 unsigned long au_length;
730 unsigned char *au_cmd;
731 int *au_statusp, au_direction;
707 return(error);
708}
709
710static int
711amr_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, d_thread_t *td)
712{
713 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
714 union {
715 void *_p;
716 struct amr_user_ioctl *au;
717#ifdef AMR_IO_COMMAND32
718 struct amr_user_ioctl32 *au32;
719#endif
720 int *result;
721 } arg;
722 struct amr_command *ac;
723 struct amr_mailbox_ioctl *mbi;
724 void *dp, *au_buffer;
725 unsigned long au_length;
726 unsigned char *au_cmd;
727 int *au_statusp, au_direction;
732 int error, ac_flags = 0;
728 int error;
733 struct amr_passthrough *ap; /* 60 bytes */
734 int logical_drives_changed = 0;
735
736 debug_called(1);
737
738 arg._p = (void *)addr;
739
740 error = 0;
741 dp = NULL;
742 ac = NULL;
743 ap = NULL;
744
745 switch(cmd) {
746
747 case AMR_IO_VERSION:
748 debug(1, "AMR_IO_VERSION");
749 *arg.result = AMR_IO_VERSION_NUMBER;
750 return(0);
751
752#ifdef AMR_IO_COMMAND32
753 /*
754 * Accept ioctl-s from 32-bit binaries on non-32-bit
755 * platforms, such as AMD. LSI's MEGAMGR utility is
756 * the only example known today... -mi
757 */
758 case AMR_IO_COMMAND32:
759 debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]);
760 au_cmd = arg.au32->au_cmd;
761 au_buffer = (void *)(u_int64_t)arg.au32->au_buffer;
762 au_length = arg.au32->au_length;
763 au_direction = arg.au32->au_direction;
764 au_statusp = &arg.au32->au_status;
765 break;
766#endif
767
768 case AMR_IO_COMMAND:
769 debug(1, "AMR_IO_COMMAND 0x%x", arg.au->au_cmd[0]);
770 au_cmd = arg.au->au_cmd;
771 au_buffer = (void *)arg.au->au_buffer;
772 au_length = arg.au->au_length;
773 au_direction = arg.au->au_direction;
774 au_statusp = &arg.au->au_status;
775 break;
776
777 case 0xc0046d00:
778 case 0xc06e6d00: /* Linux emulation */
779 {
780 devclass_t devclass;
781 struct amr_linux_ioctl ali;
782 int adapter, error;
783
784 devclass = devclass_find("amr");
785 if (devclass == NULL)
786 return (ENOENT);
787
788 error = copyin(addr, &ali, sizeof(ali));
789 if (error)
790 return (error);
791 if (ali.ui.fcs.opcode == 0x82)
792 adapter = 0;
793 else
794 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
795
796 sc = devclass_get_softc(devclass, adapter);
797 if (sc == NULL)
798 return (ENOENT);
799
800 return (amr_linux_ioctl_int(sc->amr_dev_t, cmd,
801 addr, 0, td));
802 }
803 default:
804 debug(1, "unknown ioctl 0x%lx", cmd);
805 return(ENOIOCTL);
806 }
807
808 if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) || /* delete */
809 (au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) { /* create */
810 if (sc->amr_allow_vol_config == 0) {
811 error = EPERM;
812 goto out;
813 }
814 logical_drives_changed = 1;
815#ifdef LSI
816 if ((error = amr_prepare_ld_delete(sc)) != 0)
817 return (error);
818#endif
819 }
820
821 /* handle inbound data buffer */
822 if (au_length != 0 && au_cmd[0] != 0x06) {
823 if ((dp = malloc(au_length, M_AMR, M_WAITOK|M_ZERO)) == NULL) {
824 error = ENOMEM;
825 goto out;
826 }
827 if ((error = copyin(au_buffer, dp, au_length)) != 0) {
828 free(dp, M_AMR);
829 return (error);
830 }
831 debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp);
832 }
833
834 /* Allocate this now before the mutex gets held */
729 struct amr_passthrough *ap; /* 60 bytes */
730 int logical_drives_changed = 0;
731
732 debug_called(1);
733
734 arg._p = (void *)addr;
735
736 error = 0;
737 dp = NULL;
738 ac = NULL;
739 ap = NULL;
740
741 switch(cmd) {
742
743 case AMR_IO_VERSION:
744 debug(1, "AMR_IO_VERSION");
745 *arg.result = AMR_IO_VERSION_NUMBER;
746 return(0);
747
748#ifdef AMR_IO_COMMAND32
749 /*
750 * Accept ioctl-s from 32-bit binaries on non-32-bit
751 * platforms, such as AMD. LSI's MEGAMGR utility is
752 * the only example known today... -mi
753 */
754 case AMR_IO_COMMAND32:
755 debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]);
756 au_cmd = arg.au32->au_cmd;
757 au_buffer = (void *)(u_int64_t)arg.au32->au_buffer;
758 au_length = arg.au32->au_length;
759 au_direction = arg.au32->au_direction;
760 au_statusp = &arg.au32->au_status;
761 break;
762#endif
763
764 case AMR_IO_COMMAND:
765 debug(1, "AMR_IO_COMMAND 0x%x", arg.au->au_cmd[0]);
766 au_cmd = arg.au->au_cmd;
767 au_buffer = (void *)arg.au->au_buffer;
768 au_length = arg.au->au_length;
769 au_direction = arg.au->au_direction;
770 au_statusp = &arg.au->au_status;
771 break;
772
773 case 0xc0046d00:
774 case 0xc06e6d00: /* Linux emulation */
775 {
776 devclass_t devclass;
777 struct amr_linux_ioctl ali;
778 int adapter, error;
779
780 devclass = devclass_find("amr");
781 if (devclass == NULL)
782 return (ENOENT);
783
784 error = copyin(addr, &ali, sizeof(ali));
785 if (error)
786 return (error);
787 if (ali.ui.fcs.opcode == 0x82)
788 adapter = 0;
789 else
790 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
791
792 sc = devclass_get_softc(devclass, adapter);
793 if (sc == NULL)
794 return (ENOENT);
795
796 return (amr_linux_ioctl_int(sc->amr_dev_t, cmd,
797 addr, 0, td));
798 }
799 default:
800 debug(1, "unknown ioctl 0x%lx", cmd);
801 return(ENOIOCTL);
802 }
803
804 if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) || /* delete */
805 (au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) { /* create */
806 if (sc->amr_allow_vol_config == 0) {
807 error = EPERM;
808 goto out;
809 }
810 logical_drives_changed = 1;
811#ifdef LSI
812 if ((error = amr_prepare_ld_delete(sc)) != 0)
813 return (error);
814#endif
815 }
816
817 /* handle inbound data buffer */
818 if (au_length != 0 && au_cmd[0] != 0x06) {
819 if ((dp = malloc(au_length, M_AMR, M_WAITOK|M_ZERO)) == NULL) {
820 error = ENOMEM;
821 goto out;
822 }
823 if ((error = copyin(au_buffer, dp, au_length)) != 0) {
824 free(dp, M_AMR);
825 return (error);
826 }
827 debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp);
828 }
829
830 /* Allocate this now before the mutex gets held */
835 if (au_cmd[0] == AMR_CMD_PASS)
836 ap = malloc(sizeof(struct amr_passthrough), M_AMR, M_WAITOK|M_ZERO);
837
838 mtx_lock(&sc->amr_list_lock);
839 while ((ac = amr_alloccmd(sc)) == NULL)
840 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
841
842 /* handle SCSI passthrough command */
843 if (au_cmd[0] == AMR_CMD_PASS) {
844 int len;
845
831
832 mtx_lock(&sc->amr_list_lock);
833 while ((ac = amr_alloccmd(sc)) == NULL)
834 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
835
836 /* handle SCSI passthrough command */
837 if (au_cmd[0] == AMR_CMD_PASS) {
838 int len;
839
840 ap = &ac->ac_ccb->ccb_pthru;
841 bzero(ap, sizeof(struct amr_passthrough));
842
846 /* copy cdb */
847 len = au_cmd[2];
848 ap->ap_cdb_length = len;
849 bcopy(au_cmd + 3, ap->ap_cdb, len);
850
851 /* build passthrough */
852 ap->ap_timeout = au_cmd[len + 3] & 0x07;
853 ap->ap_ars = (au_cmd[len + 3] & 0x08) ? 1 : 0;
854 ap->ap_islogical = (au_cmd[len + 3] & 0x80) ? 1 : 0;
855 ap->ap_logical_drive_no = au_cmd[len + 4];
856 ap->ap_channel = au_cmd[len + 5];
857 ap->ap_scsi_id = au_cmd[len + 6];
858 ap->ap_request_sense_length = 14;
859 ap->ap_data_transfer_length = au_length;
860 /* XXX what about the request-sense area? does the caller want it? */
861
862 /* build command */
843 /* copy cdb */
844 len = au_cmd[2];
845 ap->ap_cdb_length = len;
846 bcopy(au_cmd + 3, ap->ap_cdb, len);
847
848 /* build passthrough */
849 ap->ap_timeout = au_cmd[len + 3] & 0x07;
850 ap->ap_ars = (au_cmd[len + 3] & 0x08) ? 1 : 0;
851 ap->ap_islogical = (au_cmd[len + 3] & 0x80) ? 1 : 0;
852 ap->ap_logical_drive_no = au_cmd[len + 4];
853 ap->ap_channel = au_cmd[len + 5];
854 ap->ap_scsi_id = au_cmd[len + 6];
855 ap->ap_request_sense_length = 14;
856 ap->ap_data_transfer_length = au_length;
857 /* XXX what about the request-sense area? does the caller want it? */
858
859 /* build command */
863 ac->ac_data = ap;
864 ac->ac_length = sizeof(struct amr_passthrough);
865 ac->ac_ccb_data = dp;
866 ac->ac_ccb_length = au_length;
867
868 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
860 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
869 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB_DATAIN|AMR_CMD_CCB_DATAOUT;
861 ac->ac_flags = AMR_CMD_CCB;
870
871 } else {
872 /* direct command to controller */
873 mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox;
874
875 /* copy pertinent mailbox items */
876 mbi->mb_command = au_cmd[0];
877 mbi->mb_channel = au_cmd[1];
878 mbi->mb_param = au_cmd[2];
879 mbi->mb_pad[0] = au_cmd[3];
880 mbi->mb_drive = au_cmd[4];
862
863 } else {
864 /* direct command to controller */
865 mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox;
866
867 /* copy pertinent mailbox items */
868 mbi->mb_command = au_cmd[0];
869 mbi->mb_channel = au_cmd[1];
870 mbi->mb_param = au_cmd[2];
871 mbi->mb_pad[0] = au_cmd[3];
872 mbi->mb_drive = au_cmd[4];
881
882 /* build the command */
883 ac->ac_data = dp;
884 ac->ac_length = au_length;
885 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
873 ac->ac_flags = 0;
886 }
887
874 }
875
888 ac->ac_flags = ac_flags;
876 /* build the command */
877 ac->ac_data = dp;
878 ac->ac_length = au_length;
879 ac->ac_flags |= AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
889
890 /* run the command */
891 error = amr_wait_command(ac);
892 mtx_unlock(&sc->amr_list_lock);
893 if (error)
894 goto out;
895
896 /* copy out data and set status */
897 if (au_length != 0) {
898 error = copyout(dp, au_buffer, au_length);
899 }
900 debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer);
901 if (dp != NULL)
880
881 /* run the command */
882 error = amr_wait_command(ac);
883 mtx_unlock(&sc->amr_list_lock);
884 if (error)
885 goto out;
886
887 /* copy out data and set status */
888 if (au_length != 0) {
889 error = copyout(dp, au_buffer, au_length);
890 }
891 debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer);
892 if (dp != NULL)
902 debug(2, "%jd", (uintptr_t)dp);
893 debug(2, "%p status 0x%x", dp, ac->ac_status);
903 *au_statusp = ac->ac_status;
904
905out:
906 /*
907 * At this point, we know that there is a lock held and that these
908 * objects have been allocated.
909 */
910 mtx_lock(&sc->amr_list_lock);
911 if (ac != NULL)
912 amr_releasecmd(ac);
913 mtx_unlock(&sc->amr_list_lock);
914 if (dp != NULL)
915 free(dp, M_AMR);
894 *au_statusp = ac->ac_status;
895
896out:
897 /*
898 * At this point, we know that there is a lock held and that these
899 * objects have been allocated.
900 */
901 mtx_lock(&sc->amr_list_lock);
902 if (ac != NULL)
903 amr_releasecmd(ac);
904 mtx_unlock(&sc->amr_list_lock);
905 if (dp != NULL)
906 free(dp, M_AMR);
916 if (ap != NULL)
917 free(ap, M_AMR);
918
919#ifndef LSI
920 if (logical_drives_changed)
921 amr_rescan_drives(dev);
922#endif
923
924 return(error);
925}
926
927/********************************************************************************
928 ********************************************************************************
929 Status Monitoring
930 ********************************************************************************
931 ********************************************************************************/
932
933/********************************************************************************
934 * Perform a periodic check of the controller status
935 */
936static void
937amr_periodic(void *data)
938{
939 struct amr_softc *sc = (struct amr_softc *)data;
940
941 debug_called(2);
942
943 /* XXX perform periodic status checks here */
944
945 /* compensate for missed interrupts */
946 amr_done(sc);
947
948 /* reschedule */
949 sc->amr_timeout = timeout(amr_periodic, sc, hz);
950}
951
952/********************************************************************************
953 ********************************************************************************
954 Command Wrappers
955 ********************************************************************************
956 ********************************************************************************/
957
958/********************************************************************************
959 * Interrogate the controller for the operational parameters we require.
960 */
961static int
962amr_query_controller(struct amr_softc *sc)
963{
964 struct amr_enquiry3 *aex;
965 struct amr_prodinfo *ap;
966 struct amr_enquiry *ae;
967 int ldrv;
968 int status;
969
970 /*
971 * If we haven't found the real limit yet, let us have a couple of commands in
972 * order to be able to probe.
973 */
974 if (sc->amr_maxio == 0)
975 sc->amr_maxio = 2;
976
977 /*
978 * Greater than 10 byte cdb support
979 */
980 sc->support_ext_cdb = amr_support_ext_cdb(sc);
981
982 if(sc->support_ext_cdb) {
983 debug(2,"supports extended CDBs.");
984 }
985
986 /*
987 * Try to issue an ENQUIRY3 command
988 */
989 if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
990 AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) {
991
992 /*
993 * Fetch current state of logical drives.
994 */
995 for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
996 sc->amr_drive[ldrv].al_size = aex->ae_drivesize[ldrv];
997 sc->amr_drive[ldrv].al_state = aex->ae_drivestate[ldrv];
998 sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv];
999 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
1000 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1001 }
1002 free(aex, M_AMR);
1003
1004 /*
1005 * Get product info for channel count.
1006 */
1007 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) {
1008 device_printf(sc->amr_dev, "can't obtain product data from controller\n");
1009 return(1);
1010 }
1011 sc->amr_maxdrives = 40;
1012 sc->amr_maxchan = ap->ap_nschan;
1013 sc->amr_maxio = ap->ap_maxio;
1014 sc->amr_type |= AMR_TYPE_40LD;
1015 free(ap, M_AMR);
1016
1017 ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status);
1018 if (ap != NULL)
1019 free(ap, M_AMR);
1020 if (!status) {
1021 sc->amr_ld_del_supported = 1;
1022 device_printf(sc->amr_dev, "delete logical drives supported by controller\n");
1023 }
1024 } else {
1025
1026 /* failed, try the 8LD ENQUIRY commands */
1027 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) {
1028 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) {
1029 device_printf(sc->amr_dev, "can't obtain configuration data from controller\n");
1030 return(1);
1031 }
1032 ae->ae_signature = 0;
1033 }
1034
1035 /*
1036 * Fetch current state of logical drives.
1037 */
1038 for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
1039 sc->amr_drive[ldrv].al_size = ae->ae_ldrv.al_size[ldrv];
1040 sc->amr_drive[ldrv].al_state = ae->ae_ldrv.al_state[ldrv];
1041 sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv];
1042 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
1043 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1044 }
1045
1046 sc->amr_maxdrives = 8;
1047 sc->amr_maxchan = ae->ae_adapter.aa_channels;
1048 sc->amr_maxio = ae->ae_adapter.aa_maxio;
1049 free(ae, M_AMR);
1050 }
1051
1052 /*
1053 * Mark remaining drives as unused.
1054 */
1055 for (; ldrv < AMR_MAXLD; ldrv++)
1056 sc->amr_drive[ldrv].al_size = 0xffffffff;
1057
1058 /*
1059 * Cap the maximum number of outstanding I/Os. AMI's Linux driver doesn't trust
1060 * the controller's reported value, and lockups have been seen when we do.
1061 */
1062 sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD);
1063
1064 return(0);
1065}
1066
1067/********************************************************************************
1068 * Run a generic enquiry-style command.
1069 */
1070static void *
1071amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status)
1072{
1073 struct amr_command *ac;
1074 void *result;
1075 u_int8_t *mbox;
1076 int error;
1077
1078 debug_called(1);
1079
1080 error = 1;
1081 result = NULL;
1082
1083 /* get ourselves a command buffer */
1084 mtx_lock(&sc->amr_list_lock);
1085 ac = amr_alloccmd(sc);
1086 mtx_unlock(&sc->amr_list_lock);
1087 if (ac == NULL)
1088 goto out;
1089 /* allocate the response structure */
1090 if ((result = malloc(bufsize, M_AMR, M_ZERO|M_NOWAIT)) == NULL)
1091 goto out;
1092 /* set command flags */
1093
1094 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN;
1095
1096 /* point the command at our data */
1097 ac->ac_data = result;
1098 ac->ac_length = bufsize;
1099
1100 /* build the command proper */
1101 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1102 mbox[0] = cmd;
1103 mbox[2] = cmdsub;
1104 mbox[3] = cmdqual;
1105 *status = 0;
1106
1107 /* can't assume that interrupts are going to work here, so play it safe */
1108 if (sc->amr_poll_command(ac))
1109 goto out;
1110 error = ac->ac_status;
1111 *status = ac->ac_status;
1112
1113 out:
1114 mtx_lock(&sc->amr_list_lock);
1115 if (ac != NULL)
1116 amr_releasecmd(ac);
1117 mtx_unlock(&sc->amr_list_lock);
1118 if ((error != 0) && (result != NULL)) {
1119 free(result, M_AMR);
1120 result = NULL;
1121 }
1122 return(result);
1123}
1124
1125/********************************************************************************
1126 * Flush the controller's internal cache, return status.
1127 */
1128int
1129amr_flush(struct amr_softc *sc)
1130{
1131 struct amr_command *ac;
1132 int error;
1133
1134 /* get ourselves a command buffer */
1135 error = 1;
1136 mtx_lock(&sc->amr_list_lock);
1137 ac = amr_alloccmd(sc);
1138 mtx_unlock(&sc->amr_list_lock);
1139 if (ac == NULL)
1140 goto out;
1141 /* set command flags */
1142 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1143
1144 /* build the command proper */
1145 ac->ac_mailbox.mb_command = AMR_CMD_FLUSH;
1146
1147 /* we have to poll, as the system may be going down or otherwise damaged */
1148 if (sc->amr_poll_command(ac))
1149 goto out;
1150 error = ac->ac_status;
1151
1152 out:
1153 mtx_lock(&sc->amr_list_lock);
1154 if (ac != NULL)
1155 amr_releasecmd(ac);
1156 mtx_unlock(&sc->amr_list_lock);
1157 return(error);
1158}
1159
1160/********************************************************************************
1161 * Detect extented cdb >> greater than 10 byte cdb support
1162 * returns '1' means this support exist
1163 * returns '0' means this support doesn't exist
1164 */
1165static int
1166amr_support_ext_cdb(struct amr_softc *sc)
1167{
1168 struct amr_command *ac;
1169 u_int8_t *mbox;
1170 int error;
1171
1172 /* get ourselves a command buffer */
1173 error = 0;
1174 mtx_lock(&sc->amr_list_lock);
1175 ac = amr_alloccmd(sc);
1176 mtx_unlock(&sc->amr_list_lock);
1177 if (ac == NULL)
1178 goto out;
1179 /* set command flags */
1180 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1181
1182 /* build the command proper */
1183 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1184 mbox[0] = 0xA4;
1185 mbox[2] = 0x16;
1186
1187
1188 /* we have to poll, as the system may be going down or otherwise damaged */
1189 if (sc->amr_poll_command(ac))
1190 goto out;
1191 if( ac->ac_status == AMR_STATUS_SUCCESS ) {
1192 error = 1;
1193 }
1194
1195out:
1196 mtx_lock(&sc->amr_list_lock);
1197 if (ac != NULL)
1198 amr_releasecmd(ac);
1199 mtx_unlock(&sc->amr_list_lock);
1200 return(error);
1201}
1202
1203/********************************************************************************
1204 * Try to find I/O work for the controller from one or more of the work queues.
1205 *
1206 * We make the assumption that if the controller is not ready to take a command
1207 * at some given time, it will generate an interrupt at some later time when
1208 * it is.
1209 */
1210void
1211amr_startio(struct amr_softc *sc)
1212{
1213 struct amr_command *ac;
1214
1215 /* spin until something prevents us from doing any work */
1216 for (;;) {
1217
1218 /* Don't bother to queue commands no bounce buffers are available. */
1219 if (sc->amr_state & AMR_STATE_QUEUE_FRZN)
1220 break;
1221
1222 /* try to get a ready command */
1223 ac = amr_dequeue_ready(sc);
1224
1225 /* if that failed, build a command from a bio */
1226 if (ac == NULL)
1227 (void)amr_bio_command(sc, &ac);
1228
1229#if AMR_ENABLE_CAM != 0
1230 /* if that failed, build a command from a ccb */
1231 if (ac == NULL)
1232 (void)amr_cam_command(sc, &ac);
1233#endif
1234
1235 /* if we don't have anything to do, give up */
1236 if (ac == NULL)
1237 break;
1238
1239 /* try to give the command to the controller; if this fails save it for later and give up */
1240 if (amr_start(ac)) {
1241 debug(2, "controller busy, command deferred");
1242 amr_requeue_ready(ac); /* XXX schedule retry very soon? */
1243 break;
1244 }
1245 }
1246}
1247
1248/********************************************************************************
1249 * Handle completion of an I/O command.
1250 */
1251static void
1252amr_completeio(struct amr_command *ac)
1253{
1254 struct amrd_softc *sc = ac->ac_bio->bio_disk->d_drv1;
1255 static struct timeval lastfail;
1256 static int curfail;
1257
1258 if (ac->ac_status != AMR_STATUS_SUCCESS) { /* could be more verbose here? */
1259 ac->ac_bio->bio_error = EIO;
1260 ac->ac_bio->bio_flags |= BIO_ERROR;
1261
1262 if (ppsratecheck(&lastfail, &curfail, 1))
1263 device_printf(sc->amrd_dev, "I/O error - 0x%x\n", ac->ac_status);
1264/* amr_printcommand(ac);*/
1265 }
1266 amrd_intr(ac->ac_bio);
1267 mtx_lock(&ac->ac_sc->amr_list_lock);
1268 amr_releasecmd(ac);
1269 mtx_unlock(&ac->ac_sc->amr_list_lock);
1270}
1271
1272/********************************************************************************
1273 ********************************************************************************
1274 Command Processing
1275 ********************************************************************************
1276 ********************************************************************************/
1277
1278/********************************************************************************
1279 * Convert a bio off the top of the bio queue into a command.
1280 */
1281static int
1282amr_bio_command(struct amr_softc *sc, struct amr_command **acp)
1283{
1284 struct amr_command *ac;
1285 struct amrd_softc *amrd;
1286 struct bio *bio;
1287 int error;
1288 int blkcount;
1289 int driveno;
1290 int cmd;
1291
1292 *acp = NULL;
1293 error = 0;
1294
1295 /* get a command */
1296 if ((ac = amr_alloccmd(sc)) == NULL)
1297 return (ENOMEM);
1298
1299 /* get a bio to work on */
1300 if ((bio = amr_dequeue_bio(sc)) == NULL) {
1301 amr_releasecmd(ac);
1302 return (0);
1303 }
1304
1305 /* connect the bio to the command */
1306 ac->ac_complete = amr_completeio;
1307 ac->ac_bio = bio;
1308 ac->ac_data = bio->bio_data;
1309 ac->ac_length = bio->bio_bcount;
1310 cmd = 0;
1311 switch (bio->bio_cmd) {
1312 case BIO_READ:
1313 ac->ac_flags |= AMR_CMD_DATAIN;
1314 if (AMR_IS_SG64(sc)) {
1315 cmd = AMR_CMD_LREAD64;
1316 ac->ac_flags |= AMR_CMD_SG64;
1317 } else
1318 cmd = AMR_CMD_LREAD;
1319 break;
1320 case BIO_WRITE:
1321 ac->ac_flags |= AMR_CMD_DATAOUT;
1322 if (AMR_IS_SG64(sc)) {
1323 cmd = AMR_CMD_LWRITE64;
1324 ac->ac_flags |= AMR_CMD_SG64;
1325 } else
1326 cmd = AMR_CMD_LWRITE;
1327 break;
1328 case BIO_FLUSH:
1329 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1330 cmd = AMR_CMD_FLUSH;
1331 break;
1332 }
1333 amrd = (struct amrd_softc *)bio->bio_disk->d_drv1;
1334 driveno = amrd->amrd_drive - sc->amr_drive;
1335 blkcount = (bio->bio_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE;
1336
1337 ac->ac_mailbox.mb_command = cmd;
1338 if (bio->bio_cmd & (BIO_READ|BIO_WRITE)) {
1339 ac->ac_mailbox.mb_blkcount = blkcount;
1340 ac->ac_mailbox.mb_lba = bio->bio_pblkno;
1341 if ((bio->bio_pblkno + blkcount) > sc->amr_drive[driveno].al_size) {
1342 device_printf(sc->amr_dev,
1343 "I/O beyond end of unit (%lld,%d > %lu)\n",
1344 (long long)bio->bio_pblkno, blkcount,
1345 (u_long)sc->amr_drive[driveno].al_size);
1346 }
1347 }
1348 ac->ac_mailbox.mb_drive = driveno;
1349 if (sc->amr_state & AMR_STATE_REMAP_LD)
1350 ac->ac_mailbox.mb_drive |= 0x80;
1351
1352 /* we fill in the s/g related data when the command is mapped */
1353
1354 *acp = ac;
1355 return(error);
1356}
1357
1358/********************************************************************************
1359 * Take a command, submit it to the controller and sleep until it completes
1360 * or fails. Interrupts must be enabled, returns nonzero on error.
1361 */
1362static int
1363amr_wait_command(struct amr_command *ac)
1364{
1365 int error = 0;
1366 struct amr_softc *sc = ac->ac_sc;
1367
1368 debug_called(1);
1369
1370 ac->ac_complete = NULL;
1371 ac->ac_flags |= AMR_CMD_SLEEP;
1372 if ((error = amr_start(ac)) != 0) {
1373 return(error);
1374 }
1375
1376 while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) {
1377 error = msleep(ac,&sc->amr_list_lock, PRIBIO, "amrwcmd", 0);
1378 }
1379
1380 return(error);
1381}
1382
1383/********************************************************************************
1384 * Take a command, submit it to the controller and busy-wait for it to return.
1385 * Returns nonzero on error. Can be safely called with interrupts enabled.
1386 */
1387static int
1388amr_std_poll_command(struct amr_command *ac)
1389{
1390 struct amr_softc *sc = ac->ac_sc;
1391 int error, count;
1392
1393 debug_called(2);
1394
1395 ac->ac_complete = NULL;
1396 if ((error = amr_start(ac)) != 0)
1397 return(error);
1398
1399 count = 0;
1400 do {
1401 /*
1402 * Poll for completion, although the interrupt handler may beat us to it.
1403 * Note that the timeout here is somewhat arbitrary.
1404 */
1405 amr_done(sc);
1406 DELAY(1000);
1407 } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000));
1408 if (!(ac->ac_flags & AMR_CMD_BUSY)) {
1409 error = 0;
1410 } else {
1411 /* XXX the slot is now marked permanently busy */
1412 error = EIO;
1413 device_printf(sc->amr_dev, "polled command timeout\n");
1414 }
1415 return(error);
1416}
1417
1418static void
1419amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1420{
1421 struct amr_command *ac = arg;
1422 struct amr_softc *sc = ac->ac_sc;
907
908#ifndef LSI
909 if (logical_drives_changed)
910 amr_rescan_drives(dev);
911#endif
912
913 return(error);
914}
915
916/********************************************************************************
917 ********************************************************************************
918 Status Monitoring
919 ********************************************************************************
920 ********************************************************************************/
921
922/********************************************************************************
923 * Perform a periodic check of the controller status
924 */
925static void
926amr_periodic(void *data)
927{
928 struct amr_softc *sc = (struct amr_softc *)data;
929
930 debug_called(2);
931
932 /* XXX perform periodic status checks here */
933
934 /* compensate for missed interrupts */
935 amr_done(sc);
936
937 /* reschedule */
938 sc->amr_timeout = timeout(amr_periodic, sc, hz);
939}
940
941/********************************************************************************
942 ********************************************************************************
943 Command Wrappers
944 ********************************************************************************
945 ********************************************************************************/
946
947/********************************************************************************
948 * Interrogate the controller for the operational parameters we require.
949 */
950static int
951amr_query_controller(struct amr_softc *sc)
952{
953 struct amr_enquiry3 *aex;
954 struct amr_prodinfo *ap;
955 struct amr_enquiry *ae;
956 int ldrv;
957 int status;
958
959 /*
960 * If we haven't found the real limit yet, let us have a couple of commands in
961 * order to be able to probe.
962 */
963 if (sc->amr_maxio == 0)
964 sc->amr_maxio = 2;
965
966 /*
967 * Greater than 10 byte cdb support
968 */
969 sc->support_ext_cdb = amr_support_ext_cdb(sc);
970
971 if(sc->support_ext_cdb) {
972 debug(2,"supports extended CDBs.");
973 }
974
975 /*
976 * Try to issue an ENQUIRY3 command
977 */
978 if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
979 AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) {
980
981 /*
982 * Fetch current state of logical drives.
983 */
984 for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
985 sc->amr_drive[ldrv].al_size = aex->ae_drivesize[ldrv];
986 sc->amr_drive[ldrv].al_state = aex->ae_drivestate[ldrv];
987 sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv];
988 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
989 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
990 }
991 free(aex, M_AMR);
992
993 /*
994 * Get product info for channel count.
995 */
996 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) {
997 device_printf(sc->amr_dev, "can't obtain product data from controller\n");
998 return(1);
999 }
1000 sc->amr_maxdrives = 40;
1001 sc->amr_maxchan = ap->ap_nschan;
1002 sc->amr_maxio = ap->ap_maxio;
1003 sc->amr_type |= AMR_TYPE_40LD;
1004 free(ap, M_AMR);
1005
1006 ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status);
1007 if (ap != NULL)
1008 free(ap, M_AMR);
1009 if (!status) {
1010 sc->amr_ld_del_supported = 1;
1011 device_printf(sc->amr_dev, "delete logical drives supported by controller\n");
1012 }
1013 } else {
1014
1015 /* failed, try the 8LD ENQUIRY commands */
1016 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) {
1017 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) {
1018 device_printf(sc->amr_dev, "can't obtain configuration data from controller\n");
1019 return(1);
1020 }
1021 ae->ae_signature = 0;
1022 }
1023
1024 /*
1025 * Fetch current state of logical drives.
1026 */
1027 for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
1028 sc->amr_drive[ldrv].al_size = ae->ae_ldrv.al_size[ldrv];
1029 sc->amr_drive[ldrv].al_state = ae->ae_ldrv.al_state[ldrv];
1030 sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv];
1031 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
1032 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1033 }
1034
1035 sc->amr_maxdrives = 8;
1036 sc->amr_maxchan = ae->ae_adapter.aa_channels;
1037 sc->amr_maxio = ae->ae_adapter.aa_maxio;
1038 free(ae, M_AMR);
1039 }
1040
1041 /*
1042 * Mark remaining drives as unused.
1043 */
1044 for (; ldrv < AMR_MAXLD; ldrv++)
1045 sc->amr_drive[ldrv].al_size = 0xffffffff;
1046
1047 /*
1048 * Cap the maximum number of outstanding I/Os. AMI's Linux driver doesn't trust
1049 * the controller's reported value, and lockups have been seen when we do.
1050 */
1051 sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD);
1052
1053 return(0);
1054}
1055
1056/********************************************************************************
1057 * Run a generic enquiry-style command.
1058 */
1059static void *
1060amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status)
1061{
1062 struct amr_command *ac;
1063 void *result;
1064 u_int8_t *mbox;
1065 int error;
1066
1067 debug_called(1);
1068
1069 error = 1;
1070 result = NULL;
1071
1072 /* get ourselves a command buffer */
1073 mtx_lock(&sc->amr_list_lock);
1074 ac = amr_alloccmd(sc);
1075 mtx_unlock(&sc->amr_list_lock);
1076 if (ac == NULL)
1077 goto out;
1078 /* allocate the response structure */
1079 if ((result = malloc(bufsize, M_AMR, M_ZERO|M_NOWAIT)) == NULL)
1080 goto out;
1081 /* set command flags */
1082
1083 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN;
1084
1085 /* point the command at our data */
1086 ac->ac_data = result;
1087 ac->ac_length = bufsize;
1088
1089 /* build the command proper */
1090 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1091 mbox[0] = cmd;
1092 mbox[2] = cmdsub;
1093 mbox[3] = cmdqual;
1094 *status = 0;
1095
1096 /* can't assume that interrupts are going to work here, so play it safe */
1097 if (sc->amr_poll_command(ac))
1098 goto out;
1099 error = ac->ac_status;
1100 *status = ac->ac_status;
1101
1102 out:
1103 mtx_lock(&sc->amr_list_lock);
1104 if (ac != NULL)
1105 amr_releasecmd(ac);
1106 mtx_unlock(&sc->amr_list_lock);
1107 if ((error != 0) && (result != NULL)) {
1108 free(result, M_AMR);
1109 result = NULL;
1110 }
1111 return(result);
1112}
1113
1114/********************************************************************************
1115 * Flush the controller's internal cache, return status.
1116 */
1117int
1118amr_flush(struct amr_softc *sc)
1119{
1120 struct amr_command *ac;
1121 int error;
1122
1123 /* get ourselves a command buffer */
1124 error = 1;
1125 mtx_lock(&sc->amr_list_lock);
1126 ac = amr_alloccmd(sc);
1127 mtx_unlock(&sc->amr_list_lock);
1128 if (ac == NULL)
1129 goto out;
1130 /* set command flags */
1131 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1132
1133 /* build the command proper */
1134 ac->ac_mailbox.mb_command = AMR_CMD_FLUSH;
1135
1136 /* we have to poll, as the system may be going down or otherwise damaged */
1137 if (sc->amr_poll_command(ac))
1138 goto out;
1139 error = ac->ac_status;
1140
1141 out:
1142 mtx_lock(&sc->amr_list_lock);
1143 if (ac != NULL)
1144 amr_releasecmd(ac);
1145 mtx_unlock(&sc->amr_list_lock);
1146 return(error);
1147}
1148
1149/********************************************************************************
1150 * Detect extented cdb >> greater than 10 byte cdb support
1151 * returns '1' means this support exist
1152 * returns '0' means this support doesn't exist
1153 */
1154static int
1155amr_support_ext_cdb(struct amr_softc *sc)
1156{
1157 struct amr_command *ac;
1158 u_int8_t *mbox;
1159 int error;
1160
1161 /* get ourselves a command buffer */
1162 error = 0;
1163 mtx_lock(&sc->amr_list_lock);
1164 ac = amr_alloccmd(sc);
1165 mtx_unlock(&sc->amr_list_lock);
1166 if (ac == NULL)
1167 goto out;
1168 /* set command flags */
1169 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1170
1171 /* build the command proper */
1172 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1173 mbox[0] = 0xA4;
1174 mbox[2] = 0x16;
1175
1176
1177 /* we have to poll, as the system may be going down or otherwise damaged */
1178 if (sc->amr_poll_command(ac))
1179 goto out;
1180 if( ac->ac_status == AMR_STATUS_SUCCESS ) {
1181 error = 1;
1182 }
1183
1184out:
1185 mtx_lock(&sc->amr_list_lock);
1186 if (ac != NULL)
1187 amr_releasecmd(ac);
1188 mtx_unlock(&sc->amr_list_lock);
1189 return(error);
1190}
1191
1192/********************************************************************************
1193 * Try to find I/O work for the controller from one or more of the work queues.
1194 *
1195 * We make the assumption that if the controller is not ready to take a command
1196 * at some given time, it will generate an interrupt at some later time when
1197 * it is.
1198 */
1199void
1200amr_startio(struct amr_softc *sc)
1201{
1202 struct amr_command *ac;
1203
1204 /* spin until something prevents us from doing any work */
1205 for (;;) {
1206
1207 /* Don't bother to queue commands no bounce buffers are available. */
1208 if (sc->amr_state & AMR_STATE_QUEUE_FRZN)
1209 break;
1210
1211 /* try to get a ready command */
1212 ac = amr_dequeue_ready(sc);
1213
1214 /* if that failed, build a command from a bio */
1215 if (ac == NULL)
1216 (void)amr_bio_command(sc, &ac);
1217
1218#if AMR_ENABLE_CAM != 0
1219 /* if that failed, build a command from a ccb */
1220 if (ac == NULL)
1221 (void)amr_cam_command(sc, &ac);
1222#endif
1223
1224 /* if we don't have anything to do, give up */
1225 if (ac == NULL)
1226 break;
1227
1228 /* try to give the command to the controller; if this fails save it for later and give up */
1229 if (amr_start(ac)) {
1230 debug(2, "controller busy, command deferred");
1231 amr_requeue_ready(ac); /* XXX schedule retry very soon? */
1232 break;
1233 }
1234 }
1235}
1236
1237/********************************************************************************
1238 * Handle completion of an I/O command.
1239 */
1240static void
1241amr_completeio(struct amr_command *ac)
1242{
1243 struct amrd_softc *sc = ac->ac_bio->bio_disk->d_drv1;
1244 static struct timeval lastfail;
1245 static int curfail;
1246
1247 if (ac->ac_status != AMR_STATUS_SUCCESS) { /* could be more verbose here? */
1248 ac->ac_bio->bio_error = EIO;
1249 ac->ac_bio->bio_flags |= BIO_ERROR;
1250
1251 if (ppsratecheck(&lastfail, &curfail, 1))
1252 device_printf(sc->amrd_dev, "I/O error - 0x%x\n", ac->ac_status);
1253/* amr_printcommand(ac);*/
1254 }
1255 amrd_intr(ac->ac_bio);
1256 mtx_lock(&ac->ac_sc->amr_list_lock);
1257 amr_releasecmd(ac);
1258 mtx_unlock(&ac->ac_sc->amr_list_lock);
1259}
1260
1261/********************************************************************************
1262 ********************************************************************************
1263 Command Processing
1264 ********************************************************************************
1265 ********************************************************************************/
1266
1267/********************************************************************************
1268 * Convert a bio off the top of the bio queue into a command.
1269 */
1270static int
1271amr_bio_command(struct amr_softc *sc, struct amr_command **acp)
1272{
1273 struct amr_command *ac;
1274 struct amrd_softc *amrd;
1275 struct bio *bio;
1276 int error;
1277 int blkcount;
1278 int driveno;
1279 int cmd;
1280
1281 *acp = NULL;
1282 error = 0;
1283
1284 /* get a command */
1285 if ((ac = amr_alloccmd(sc)) == NULL)
1286 return (ENOMEM);
1287
1288 /* get a bio to work on */
1289 if ((bio = amr_dequeue_bio(sc)) == NULL) {
1290 amr_releasecmd(ac);
1291 return (0);
1292 }
1293
1294 /* connect the bio to the command */
1295 ac->ac_complete = amr_completeio;
1296 ac->ac_bio = bio;
1297 ac->ac_data = bio->bio_data;
1298 ac->ac_length = bio->bio_bcount;
1299 cmd = 0;
1300 switch (bio->bio_cmd) {
1301 case BIO_READ:
1302 ac->ac_flags |= AMR_CMD_DATAIN;
1303 if (AMR_IS_SG64(sc)) {
1304 cmd = AMR_CMD_LREAD64;
1305 ac->ac_flags |= AMR_CMD_SG64;
1306 } else
1307 cmd = AMR_CMD_LREAD;
1308 break;
1309 case BIO_WRITE:
1310 ac->ac_flags |= AMR_CMD_DATAOUT;
1311 if (AMR_IS_SG64(sc)) {
1312 cmd = AMR_CMD_LWRITE64;
1313 ac->ac_flags |= AMR_CMD_SG64;
1314 } else
1315 cmd = AMR_CMD_LWRITE;
1316 break;
1317 case BIO_FLUSH:
1318 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1319 cmd = AMR_CMD_FLUSH;
1320 break;
1321 }
1322 amrd = (struct amrd_softc *)bio->bio_disk->d_drv1;
1323 driveno = amrd->amrd_drive - sc->amr_drive;
1324 blkcount = (bio->bio_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE;
1325
1326 ac->ac_mailbox.mb_command = cmd;
1327 if (bio->bio_cmd & (BIO_READ|BIO_WRITE)) {
1328 ac->ac_mailbox.mb_blkcount = blkcount;
1329 ac->ac_mailbox.mb_lba = bio->bio_pblkno;
1330 if ((bio->bio_pblkno + blkcount) > sc->amr_drive[driveno].al_size) {
1331 device_printf(sc->amr_dev,
1332 "I/O beyond end of unit (%lld,%d > %lu)\n",
1333 (long long)bio->bio_pblkno, blkcount,
1334 (u_long)sc->amr_drive[driveno].al_size);
1335 }
1336 }
1337 ac->ac_mailbox.mb_drive = driveno;
1338 if (sc->amr_state & AMR_STATE_REMAP_LD)
1339 ac->ac_mailbox.mb_drive |= 0x80;
1340
1341 /* we fill in the s/g related data when the command is mapped */
1342
1343 *acp = ac;
1344 return(error);
1345}
1346
1347/********************************************************************************
1348 * Take a command, submit it to the controller and sleep until it completes
1349 * or fails. Interrupts must be enabled, returns nonzero on error.
1350 */
1351static int
1352amr_wait_command(struct amr_command *ac)
1353{
1354 int error = 0;
1355 struct amr_softc *sc = ac->ac_sc;
1356
1357 debug_called(1);
1358
1359 ac->ac_complete = NULL;
1360 ac->ac_flags |= AMR_CMD_SLEEP;
1361 if ((error = amr_start(ac)) != 0) {
1362 return(error);
1363 }
1364
1365 while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) {
1366 error = msleep(ac,&sc->amr_list_lock, PRIBIO, "amrwcmd", 0);
1367 }
1368
1369 return(error);
1370}
1371
1372/********************************************************************************
1373 * Take a command, submit it to the controller and busy-wait for it to return.
1374 * Returns nonzero on error. Can be safely called with interrupts enabled.
1375 */
1376static int
1377amr_std_poll_command(struct amr_command *ac)
1378{
1379 struct amr_softc *sc = ac->ac_sc;
1380 int error, count;
1381
1382 debug_called(2);
1383
1384 ac->ac_complete = NULL;
1385 if ((error = amr_start(ac)) != 0)
1386 return(error);
1387
1388 count = 0;
1389 do {
1390 /*
1391 * Poll for completion, although the interrupt handler may beat us to it.
1392 * Note that the timeout here is somewhat arbitrary.
1393 */
1394 amr_done(sc);
1395 DELAY(1000);
1396 } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000));
1397 if (!(ac->ac_flags & AMR_CMD_BUSY)) {
1398 error = 0;
1399 } else {
1400 /* XXX the slot is now marked permanently busy */
1401 error = EIO;
1402 device_printf(sc->amr_dev, "polled command timeout\n");
1403 }
1404 return(error);
1405}
1406
1407static void
1408amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1409{
1410 struct amr_command *ac = arg;
1411 struct amr_softc *sc = ac->ac_sc;
1423 int flags;
1412 int mb_channel;
1424
1413
1425 flags = 0;
1426 if (ac->ac_flags & AMR_CMD_DATAIN)
1427 flags |= BUS_DMASYNC_PREREAD;
1428 if (ac->ac_flags & AMR_CMD_DATAOUT)
1429 flags |= BUS_DMASYNC_PREWRITE;
1414 amr_setup_sg(arg, segs, nsegs, err);
1430
1415
1416 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1417 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1418 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1419 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1420 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1421 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1422
1423 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1424 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1431 if (AC_IS_SG64(ac)) {
1425 if (AC_IS_SG64(ac)) {
1432 amr_setup_dma64map(arg, segs, nsegs, err);
1433 bus_dmamap_sync(sc->amr_buffer64_dmat,ac->ac_dma64map, flags);
1434 } else {
1435 amr_setup_dmamap(arg, segs, nsegs, err);
1436 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap, flags);
1426 ac->ac_sg64_hi = 0;
1427 ac->ac_sg64_lo = ac->ac_sgbusaddr;
1437 }
1428 }
1429
1438 sc->amr_poll_command1(sc, ac);
1439}
1440
1441/********************************************************************************
1442 * Take a command, submit it to the controller and busy-wait for it to return.
1443 * Returns nonzero on error. Can be safely called with interrupts enabled.
1444 */
1445static int
1446amr_quartz_poll_command(struct amr_command *ac)
1447{
1430 sc->amr_poll_command1(sc, ac);
1431}
1432
1433/********************************************************************************
1434 * Take a command, submit it to the controller and busy-wait for it to return.
1435 * Returns nonzero on error. Can be safely called with interrupts enabled.
1436 */
1437static int
1438amr_quartz_poll_command(struct amr_command *ac)
1439{
1448 bus_dma_tag_t tag;
1449 bus_dmamap_t datamap;
1450 struct amr_softc *sc = ac->ac_sc;
1451 int error;
1452
1453 debug_called(2);
1454
1455 error = 0;
1456
1457 if (AC_IS_SG64(ac)) {
1440 struct amr_softc *sc = ac->ac_sc;
1441 int error;
1442
1443 debug_called(2);
1444
1445 error = 0;
1446
1447 if (AC_IS_SG64(ac)) {
1458 tag = sc->amr_buffer64_dmat;
1459 datamap = ac->ac_dma64map;
1448 ac->ac_tag = sc->amr_buffer64_dmat;
1449 ac->ac_datamap = ac->ac_dma64map;
1460 } else {
1450 } else {
1461 tag = sc->amr_buffer_dmat;
1462 datamap = ac->ac_dmamap;
1451 ac->ac_tag = sc->amr_buffer_dmat;
1452 ac->ac_datamap = ac->ac_dmamap;
1463 }
1464
1465 /* now we have a slot, we can map the command (unmapped in amr_complete) */
1466 if (ac->ac_data != 0) {
1453 }
1454
1455 /* now we have a slot, we can map the command (unmapped in amr_complete) */
1456 if (ac->ac_data != 0) {
1467 if (bus_dmamap_load(tag, datamap, ac->ac_data, ac->ac_length,
1468 amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) {
1457 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1458 ac->ac_length, amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) {
1469 error = 1;
1470 }
1471 } else {
1472 error = amr_quartz_poll_command1(sc, ac);
1473 }
1474
1475 return (error);
1476}
1477
1478static int
1479amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac)
1480{
1481 int count, error;
1482
1483 mtx_lock(&sc->amr_hw_lock);
1484 if ((sc->amr_state & AMR_STATE_INTEN) == 0) {
1485 count=0;
1486 while (sc->amr_busyslots) {
1487 msleep(sc, &sc->amr_hw_lock, PRIBIO | PCATCH, "amrpoll", hz);
1488 if(count++>10) {
1489 break;
1490 }
1491 }
1492
1493 if(sc->amr_busyslots) {
1494 device_printf(sc->amr_dev, "adapter is busy\n");
1495 mtx_unlock(&sc->amr_hw_lock);
1496 if (ac->ac_data != NULL) {
1459 error = 1;
1460 }
1461 } else {
1462 error = amr_quartz_poll_command1(sc, ac);
1463 }
1464
1465 return (error);
1466}
1467
1468static int
1469amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac)
1470{
1471 int count, error;
1472
1473 mtx_lock(&sc->amr_hw_lock);
1474 if ((sc->amr_state & AMR_STATE_INTEN) == 0) {
1475 count=0;
1476 while (sc->amr_busyslots) {
1477 msleep(sc, &sc->amr_hw_lock, PRIBIO | PCATCH, "amrpoll", hz);
1478 if(count++>10) {
1479 break;
1480 }
1481 }
1482
1483 if(sc->amr_busyslots) {
1484 device_printf(sc->amr_dev, "adapter is busy\n");
1485 mtx_unlock(&sc->amr_hw_lock);
1486 if (ac->ac_data != NULL) {
1497 if (AC_IS_SG64(ac))
1498 bus_dmamap_unload(sc->amr_buffer64_dmat, ac->ac_dma64map);
1499 else
1500 bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_dmamap);
1487 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1501 }
1502 ac->ac_status=0;
1503 return(1);
1504 }
1505 }
1506
1507 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE);
1508
1509 /* clear the poll/ack fields in the mailbox */
1510 sc->amr_mailbox->mb_ident = 0xFE;
1511 sc->amr_mailbox->mb_nstatus = 0xFF;
1512 sc->amr_mailbox->mb_status = 0xFF;
1513 sc->amr_mailbox->mb_poll = 0;
1514 sc->amr_mailbox->mb_ack = 0;
1515 sc->amr_mailbox->mb_busy = 1;
1516
1517 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
1518
1519 while(sc->amr_mailbox->mb_nstatus == 0xFF)
1520 DELAY(1);
1521 while(sc->amr_mailbox->mb_status == 0xFF)
1522 DELAY(1);
1523 ac->ac_status=sc->amr_mailbox->mb_status;
1524 error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0;
1525 while(sc->amr_mailbox->mb_poll != 0x77)
1526 DELAY(1);
1527 sc->amr_mailbox->mb_poll = 0;
1528 sc->amr_mailbox->mb_ack = 0x77;
1529
1530 /* acknowledge that we have the commands */
1531 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK);
1532 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
1533 DELAY(1);
1534 mtx_unlock(&sc->amr_hw_lock);
1535
1536 /* unmap the command's data buffer */
1537 if (ac->ac_flags & AMR_CMD_DATAIN) {
1488 }
1489 ac->ac_status=0;
1490 return(1);
1491 }
1492 }
1493
1494 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE);
1495
1496 /* clear the poll/ack fields in the mailbox */
1497 sc->amr_mailbox->mb_ident = 0xFE;
1498 sc->amr_mailbox->mb_nstatus = 0xFF;
1499 sc->amr_mailbox->mb_status = 0xFF;
1500 sc->amr_mailbox->mb_poll = 0;
1501 sc->amr_mailbox->mb_ack = 0;
1502 sc->amr_mailbox->mb_busy = 1;
1503
1504 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
1505
1506 while(sc->amr_mailbox->mb_nstatus == 0xFF)
1507 DELAY(1);
1508 while(sc->amr_mailbox->mb_status == 0xFF)
1509 DELAY(1);
1510 ac->ac_status=sc->amr_mailbox->mb_status;
1511 error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0;
1512 while(sc->amr_mailbox->mb_poll != 0x77)
1513 DELAY(1);
1514 sc->amr_mailbox->mb_poll = 0;
1515 sc->amr_mailbox->mb_ack = 0x77;
1516
1517 /* acknowledge that we have the commands */
1518 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK);
1519 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
1520 DELAY(1);
1521 mtx_unlock(&sc->amr_hw_lock);
1522
1523 /* unmap the command's data buffer */
1524 if (ac->ac_flags & AMR_CMD_DATAIN) {
1538 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,
1539 BUS_DMASYNC_POSTREAD);
1525 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTREAD);
1540 }
1541 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1526 }
1527 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1542 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,
1543 BUS_DMASYNC_POSTWRITE);
1528 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTWRITE);
1544 }
1529 }
1545 if (AC_IS_SG64(ac))
1546 bus_dmamap_unload(sc->amr_buffer64_dmat, ac->ac_dma64map);
1547 else
1548 bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_dmamap);
1530 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1549
1550 return(error);
1551}
1552
1553static __inline int
1554amr_freeslot(struct amr_command *ac)
1555{
1556 struct amr_softc *sc = ac->ac_sc;
1557 int slot;
1558
1559 debug_called(3);
1560
1561 slot = ac->ac_slot;
1562 if (sc->amr_busycmd[slot] == NULL)
1563 panic("amr: slot %d not busy?\n", slot);
1564
1565 sc->amr_busycmd[slot] = NULL;
1566 atomic_subtract_int(&sc->amr_busyslots, 1);
1567
1568 return (0);
1569}
1570
1571/********************************************************************************
1572 * Map/unmap (ac)'s data in the controller's addressable space as required.
1573 *
1574 * These functions may be safely called multiple times on a given command.
1575 */
1576static void
1531
1532 return(error);
1533}
1534
1535static __inline int
1536amr_freeslot(struct amr_command *ac)
1537{
1538 struct amr_softc *sc = ac->ac_sc;
1539 int slot;
1540
1541 debug_called(3);
1542
1543 slot = ac->ac_slot;
1544 if (sc->amr_busycmd[slot] == NULL)
1545 panic("amr: slot %d not busy?\n", slot);
1546
1547 sc->amr_busycmd[slot] = NULL;
1548 atomic_subtract_int(&sc->amr_busyslots, 1);
1549
1550 return (0);
1551}
1552
1553/********************************************************************************
1554 * Map/unmap (ac)'s data in the controller's addressable space as required.
1555 *
1556 * These functions may be safely called multiple times on a given command.
1557 */
1558static void
1577amr_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1559amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1578{
1579 struct amr_command *ac = (struct amr_command *)arg;
1580 struct amr_sgentry *sg;
1560{
1561 struct amr_command *ac = (struct amr_command *)arg;
1562 struct amr_sgentry *sg;
1581 int i;
1582 u_int8_t *sgc;
1563 struct amr_sg64entry *sg64;
1564 int flags, i;
1583
1584 debug_called(3);
1585
1565
1566 debug_called(3);
1567
1586 /* get base address of s/g table */
1587 sg = ac->ac_sg.sg32;
1568 if (error)
1569 printf("amr_setup_sg: error %d\n", error);
1588
1570
1589 /* save data physical address */
1590
1591 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1592 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG && (
1593 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG ||
1594 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)) {
1595 sgc = &(((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param);
1596 } else {
1597 sgc = &ac->ac_mailbox.mb_nsgelem;
1598 }
1599
1600 /* decide whether we need to populate the s/g table */
1601 if (nsegments < 2) {
1602 *sgc = 0;
1603 ac->ac_mailbox.mb_nsgelem = 0;
1604 ac->ac_mailbox.mb_physaddr = segs[0].ds_addr;
1605 } else {
1606 ac->ac_mailbox.mb_nsgelem = nsegments;
1607 *sgc = nsegments;
1608 /* XXX Setting these to 0 might not be needed. */
1609 ac->ac_sg64_lo = 0;
1610 ac->ac_sg64_hi = 0;
1611 ac->ac_mailbox.mb_physaddr = ac->ac_sgbusaddr;
1612 for (i = 0; i < nsegments; i++, sg++) {
1613 sg->sg_addr = segs[i].ds_addr;
1614 sg->sg_count = segs[i].ds_len;
1615 }
1616 }
1617
1618}
1619
1620static void
1621amr_setup_dma64map(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1622{
1623 struct amr_command *ac = (struct amr_command *)arg;
1624 struct amr_sg64entry *sg;
1625 int i;
1626 u_int8_t *sgc;
1627
1628 debug_called(3);
1629
1630 /* get base address of s/g table */
1571 /* get base address of s/g table */
1631 sg = ac->ac_sg.sg64;
1632
1633 /* save data physical address */
1634
1635 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1636 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG && (
1637 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG ||
1638 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)) {
1639 sgc = &(((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param);
1640 } else {
1641 sgc = &ac->ac_mailbox.mb_nsgelem;
1642 }
1643
1644 ac->ac_mailbox.mb_nsgelem = nsegments;
1645 *sgc = nsegments;
1646 ac->ac_sg64_hi = 0;
1647 ac->ac_sg64_lo = ac->ac_sgbusaddr;
1648 ac->ac_mailbox.mb_physaddr = 0xffffffff;
1649 for (i = 0; i < nsegments; i++, sg++) {
1650 sg->sg_addr = segs[i].ds_addr;
1651 sg->sg_count = segs[i].ds_len;
1652 }
1653}
1654
1655static void
1656amr_setup_ccbmap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1657{
1658 struct amr_command *ac = (struct amr_command *)arg;
1659 struct amr_softc *sc = ac->ac_sc;
1660 struct amr_sgentry *sg;
1661 struct amr_passthrough *ap = (struct amr_passthrough *)ac->ac_data;
1662 struct amr_ext_passthrough *aep = (struct amr_ext_passthrough *)ac->ac_data;
1663 int i;
1664
1665 /* get base address of s/g table */
1666 sg = ac->ac_sg.sg32;
1572 sg = ac->ac_sg.sg32;
1573 sg64 = ac->ac_sg.sg64;
1667
1574
1668 /* decide whether we need to populate the s/g table */
1669 if( ac->ac_mailbox.mb_command == AMR_CMD_EXTPASS ) {
1670 if (nsegments < 2) {
1671 aep->ap_no_sg_elements = 0;
1672 aep->ap_data_transfer_address = segs[0].ds_addr;
1673 } else {
1674 /* save s/g table information in passthrough */
1675 aep->ap_no_sg_elements = nsegments;
1676 aep->ap_data_transfer_address = ac->ac_sgbusaddr;
1677 /*
1678 * populate s/g table (overwrites previous call which mapped the
1679 * passthrough)
1680 */
1681 for (i = 0; i < nsegments; i++, sg++) {
1682 sg->sg_addr = segs[i].ds_addr;
1683 sg->sg_count = segs[i].ds_len;
1684 debug(3, " %d: 0x%x/%d", i, sg->sg_addr, sg->sg_count);
1685 }
1575 if (AC_IS_SG64(ac)) {
1576 ac->ac_nsegments = nsegments;
1577 ac->ac_mb_physaddr = 0xffffffff;
1578 for (i = 0; i < nsegments; i++, sg64++) {
1579 sg64->sg_addr = segs[i].ds_addr;
1580 sg64->sg_count = segs[i].ds_len;
1686 }
1581 }
1687 debug(3, "slot %d %d segments at 0x%x\n", ac->ac_slot,
1688 aep->ap_no_sg_elements, aep->ap_data_transfer_address);
1689 } else {
1582 } else {
1583 /* decide whether we need to populate the s/g table */
1690 if (nsegments < 2) {
1584 if (nsegments < 2) {
1691 ap->ap_no_sg_elements = 0;
1692 ap->ap_data_transfer_address = segs[0].ds_addr;
1585 ac->ac_nsegments = 0;
1586 ac->ac_mb_physaddr = segs[0].ds_addr;
1693 } else {
1587 } else {
1694 /* save s/g table information in passthrough */
1695 ap->ap_no_sg_elements = nsegments;
1696 ap->ap_data_transfer_address = ac->ac_sgbusaddr;
1697 /*
1698 * populate s/g table (overwrites previous call which mapped the
1699 * passthrough)
1700 */
1588 ac->ac_nsegments = nsegments;
1589 ac->ac_mb_physaddr = ac->ac_sgbusaddr;
1701 for (i = 0; i < nsegments; i++, sg++) {
1702 sg->sg_addr = segs[i].ds_addr;
1703 sg->sg_count = segs[i].ds_len;
1590 for (i = 0; i < nsegments; i++, sg++) {
1591 sg->sg_addr = segs[i].ds_addr;
1592 sg->sg_count = segs[i].ds_len;
1704 debug(3, " %d: 0x%x/%d", i, sg->sg_addr, sg->sg_count);
1705 }
1706 }
1593 }
1594 }
1707 debug(3, "slot %d %d segments at 0x%x\n", ac->ac_slot,
1708 ap->ap_no_sg_elements, ap->ap_data_transfer_address);
1709 }
1595 }
1710 if (ac->ac_flags & AMR_CMD_CCB_DATAIN)
1711 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap,
1712 BUS_DMASYNC_PREREAD);
1713 if (ac->ac_flags & AMR_CMD_CCB_DATAOUT)
1714 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap,
1715 BUS_DMASYNC_PREWRITE);
1716 if ((ac->ac_flags & (AMR_CMD_CCB_DATAIN | AMR_CMD_CCB_DATAOUT)) == 0)
1717 panic("no direction for ccb?\n");
1718
1596
1597 flags = 0;
1719 if (ac->ac_flags & AMR_CMD_DATAIN)
1598 if (ac->ac_flags & AMR_CMD_DATAIN)
1720 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,BUS_DMASYNC_PREREAD);
1599 flags |= BUS_DMASYNC_PREREAD;
1721 if (ac->ac_flags & AMR_CMD_DATAOUT)
1600 if (ac->ac_flags & AMR_CMD_DATAOUT)
1722 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,BUS_DMASYNC_PREWRITE);
1723
1601 flags |= BUS_DMASYNC_PREWRITE;
1602 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flags);
1724 ac->ac_flags |= AMR_CMD_MAPPED;
1603 ac->ac_flags |= AMR_CMD_MAPPED;
1725
1726 if (sc->amr_submit_command(ac) == EBUSY) {
1727 amr_freeslot(ac);
1728 amr_requeue_ready(ac);
1729 }
1730}
1731
1732static void
1604}
1605
1606static void
1733amr_setup_ccb64map(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1607amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1734{
1608{
1735 struct amr_command *ac = (struct amr_command *)arg;
1736 struct amr_softc *sc = ac->ac_sc;
1737 struct amr_sg64entry *sg;
1738 struct amr_passthrough *ap = (struct amr_passthrough *)ac->ac_data;
1739 struct amr_ext_passthrough *aep = (struct amr_ext_passthrough *)ac->ac_data;
1740 int i;
1609 struct amr_command *ac = arg;
1610 struct amr_softc *sc = ac->ac_sc;
1611 int mb_channel;
1741
1612
1742 /* get base address of s/g table */
1743 sg = ac->ac_sg.sg64;
1613 amr_setup_sg(arg, segs, nsegs, err);
1744
1614
1745 /* decide whether we need to populate the s/g table */
1746 if( ac->ac_mailbox.mb_command == AMR_CMD_EXTPASS ) {
1747 /* save s/g table information in passthrough */
1748 aep->ap_no_sg_elements = nsegments;
1749 aep->ap_data_transfer_address = ac->ac_sgbusaddr;
1750 /*
1751 * populate s/g table (overwrites previous call which mapped the
1752 * passthrough)
1753 */
1754 for (i = 0; i < nsegments; i++, sg++) {
1755 sg->sg_addr = segs[i].ds_addr;
1756 sg->sg_count = segs[i].ds_len;
1757 debug(3, " %d: 0x%lx/%d", i, (u_long)sg->sg_addr, sg->sg_count);
1758 }
1759 debug(3, "slot %d %d segments at 0x%x\n", ac->ac_slot,
1760 aep->ap_no_sg_elements, aep->ap_data_transfer_address);
1761 } else {
1762 /* save s/g table information in passthrough */
1763 ap->ap_no_sg_elements = nsegments;
1764 ap->ap_data_transfer_address = ac->ac_sgbusaddr;
1765 /*
1766 * populate s/g table (overwrites previous call which mapped the
1767 * passthrough)
1768 */
1769 for (i = 0; i < nsegments; i++, sg++) {
1770 sg->sg_addr = segs[i].ds_addr;
1771 sg->sg_count = segs[i].ds_len;
1772 debug(3, " %d: 0x%lx/%d", i, (u_long)sg->sg_addr, sg->sg_count);
1773 }
1774 debug(3, "slot %d %d segments at 0x%x\n", ac->ac_slot,
1775 ap->ap_no_sg_elements, ap->ap_data_transfer_address);
1615 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1616 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1617 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1618 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1619 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1620 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1621
1622 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1623 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1624 if (AC_IS_SG64(ac)) {
1625 ac->ac_sg64_hi = 0;
1626 ac->ac_sg64_lo = ac->ac_sgbusaddr;
1776 }
1627 }
1777 if (ac->ac_flags & AMR_CMD_CCB_DATAIN)
1778 bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_ccb_dma64map,
1779 BUS_DMASYNC_PREREAD);
1780 if (ac->ac_flags & AMR_CMD_CCB_DATAOUT)
1781 bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_ccb_dma64map,
1782 BUS_DMASYNC_PREWRITE);
1783 if ((ac->ac_flags & (AMR_CMD_CCB_DATAIN | AMR_CMD_CCB_DATAOUT)) == 0)
1784 panic("no direction for ccb?\n");
1785
1628
1786 if (ac->ac_flags & AMR_CMD_DATAIN)
1787 bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_dma64map,
1788 BUS_DMASYNC_PREREAD);
1789 if (ac->ac_flags & AMR_CMD_DATAOUT)
1790 bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_dma64map,
1791 BUS_DMASYNC_PREWRITE);
1792
1793 ac->ac_flags |= AMR_CMD_MAPPED;
1794
1795 if (sc->amr_submit_command(ac) == EBUSY) {
1796 amr_freeslot(ac);
1797 amr_requeue_ready(ac);
1798 }
1799}
1629 if (sc->amr_submit_command(ac) == EBUSY) {
1630 amr_freeslot(ac);
1631 amr_requeue_ready(ac);
1632 }
1633}
1800
1634
1801static void
1635static void
1802amr_setup_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegments,
1803 int error)
1636amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1804{
1637{
1805 struct amr_command *ac = (struct amr_command *)arg;
1806 struct amr_softc *sc = ac->ac_sc;
1638 struct amr_command *ac = arg;
1639 struct amr_softc *sc = ac->ac_sc;
1640 struct amr_passthrough *ap = &ac->ac_ccb->ccb_pthru;
1641 struct amr_ext_passthrough *aep = &ac->ac_ccb->ccb_epthru;
1807
1642
1808 amr_setup_dmamap(arg, segs, nsegments, error);
1643 /* Set up the mailbox portion of the command to point at the ccb */
1644 ac->ac_mailbox.mb_nsgelem = 0;
1645 ac->ac_mailbox.mb_physaddr = ac->ac_ccb_busaddr;
1809
1646
1810 if (bus_dmamap_load(sc->amr_buffer_dmat, ac->ac_ccb_dmamap,
1811 ac->ac_ccb_data, ac->ac_ccb_length, amr_setup_ccbmap, ac,
1812 0) == EINPROGRESS) {
1813 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1647 amr_setup_sg(arg, segs, nsegs, err);
1648
1649 switch (ac->ac_mailbox.mb_command) {
1650 case AMR_CMD_EXTPASS:
1651 aep->ap_no_sg_elements = ac->ac_nsegments;
1652 aep->ap_data_transfer_address = ac->ac_mb_physaddr;
1653 break;
1654 case AMR_CMD_PASS:
1655 ap->ap_no_sg_elements = ac->ac_nsegments;
1656 ap->ap_data_transfer_address = ac->ac_mb_physaddr;
1657 break;
1658 default:
1659 panic("Unknown ccb command");
1814 }
1660 }
1815}
1816
1661
1817static void
1818amr_setup_dma64map_cb(void *arg, bus_dma_segment_t *segs, int nsegments,
1819 int error)
1820{
1821 struct amr_command *ac = (struct amr_command *)arg;
1822 struct amr_softc *sc = ac->ac_sc;
1823
1824 amr_setup_dma64map(arg, segs, nsegments, error);
1825
1826 if (bus_dmamap_load(sc->amr_buffer64_dmat, ac->ac_ccb_dma64map,
1827 ac->ac_ccb_data, ac->ac_ccb_length, amr_setup_ccb64map, ac,
1828 0) == EINPROGRESS) {
1829 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1662 if (sc->amr_submit_command(ac) == EBUSY) {
1663 amr_freeslot(ac);
1664 amr_requeue_ready(ac);
1830 }
1831}
1832
1833static int
1834amr_mapcmd(struct amr_command *ac)
1835{
1665 }
1666}
1667
1668static int
1669amr_mapcmd(struct amr_command *ac)
1670{
1836 bus_dma_tag_t tag;
1837 bus_dmamap_t datamap;
1838 bus_dmamap_callback_t *cb;
1839 struct amr_softc *sc = ac->ac_sc;
1840
1841 debug_called(3);
1842
1843 if (AC_IS_SG64(ac)) {
1671 bus_dmamap_callback_t *cb;
1672 struct amr_softc *sc = ac->ac_sc;
1673
1674 debug_called(3);
1675
1676 if (AC_IS_SG64(ac)) {
1844 tag = sc->amr_buffer64_dmat;
1845 datamap = ac->ac_dma64map;
1846 cb = amr_setup_dma64map_cb;
1677 ac->ac_tag = sc->amr_buffer64_dmat;
1678 ac->ac_datamap = ac->ac_dma64map;
1847 } else {
1679 } else {
1848 tag = sc->amr_buffer_dmat;
1849 datamap = ac->ac_dmamap;
1850 cb = amr_setup_dmamap_cb;
1680 ac->ac_tag = sc->amr_buffer_dmat;
1681 ac->ac_datamap = ac->ac_dmamap;
1851 }
1852
1682 }
1683
1684 if (ac->ac_flags & AMR_CMD_CCB)
1685 cb = amr_setup_ccb;
1686 else
1687 cb = amr_setup_data;
1688
1853 /* if the command involves data at all, and hasn't been mapped */
1854 if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) {
1689 /* if the command involves data at all, and hasn't been mapped */
1690 if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) {
1855 if (ac->ac_ccb_data == NULL)
1856 cb = amr_setup_data_dmamap;
1857 /* map the data buffers into bus space and build the s/g list */
1691 /* map the data buffers into bus space and build the s/g list */
1858 if (bus_dmamap_load(tag, datamap, ac->ac_data, ac->ac_length,
1859 cb, ac, 0) == EINPROGRESS) {
1692 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1693 ac->ac_length, cb, ac, 0) == EINPROGRESS) {
1860 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1861 }
1862 } else {
1863 if (sc->amr_submit_command(ac) == EBUSY) {
1864 amr_freeslot(ac);
1865 amr_requeue_ready(ac);
1866 }
1867 }
1868
1869 return (0);
1870}
1871
1872static void
1873amr_unmapcmd(struct amr_command *ac)
1874{
1694 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1695 }
1696 } else {
1697 if (sc->amr_submit_command(ac) == EBUSY) {
1698 amr_freeslot(ac);
1699 amr_requeue_ready(ac);
1700 }
1701 }
1702
1703 return (0);
1704}
1705
1706static void
1707amr_unmapcmd(struct amr_command *ac)
1708{
1875 struct amr_softc *sc = ac->ac_sc;
1876 int flag;
1877
1878 debug_called(3);
1879
1880 /* if the command involved data at all and was mapped */
1881 if (ac->ac_flags & AMR_CMD_MAPPED) {
1882
1883 if (ac->ac_data != NULL) {
1884
1885 flag = 0;
1886 if (ac->ac_flags & AMR_CMD_DATAIN)
1887 flag |= BUS_DMASYNC_POSTREAD;
1888 if (ac->ac_flags & AMR_CMD_DATAOUT)
1889 flag |= BUS_DMASYNC_POSTWRITE;
1890
1709 int flag;
1710
1711 debug_called(3);
1712
1713 /* if the command involved data at all and was mapped */
1714 if (ac->ac_flags & AMR_CMD_MAPPED) {
1715
1716 if (ac->ac_data != NULL) {
1717
1718 flag = 0;
1719 if (ac->ac_flags & AMR_CMD_DATAIN)
1720 flag |= BUS_DMASYNC_POSTREAD;
1721 if (ac->ac_flags & AMR_CMD_DATAOUT)
1722 flag |= BUS_DMASYNC_POSTWRITE;
1723
1891 if (AC_IS_SG64(ac)) {
1892 bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_dma64map, flag);
1893 bus_dmamap_unload(sc->amr_buffer64_dmat, ac->ac_dma64map);
1894 } else {
1895 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_dmamap, flag);
1896 bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_dmamap);
1897 }
1724 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flag);
1725 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1898 }
1899
1726 }
1727
1900 if (ac->ac_ccb_data != NULL) {
1901
1902 flag = 0;
1903 if (ac->ac_flags & AMR_CMD_CCB_DATAIN)
1904 flag |= BUS_DMASYNC_POSTREAD;
1905 if (ac->ac_flags & AMR_CMD_CCB_DATAOUT)
1906 flag |= BUS_DMASYNC_POSTWRITE;
1907
1908 if (AC_IS_SG64(ac)) {
1909 bus_dmamap_sync(sc->amr_buffer64_dmat,ac->ac_ccb_dma64map,flag);
1910 bus_dmamap_unload(sc->amr_buffer64_dmat, ac->ac_ccb_dma64map);
1911 } else {
1912 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap, flag);
1913 bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_ccb_dmamap);
1914 }
1915 }
1916 ac->ac_flags &= ~AMR_CMD_MAPPED;
1917 }
1918}
1919
1728 ac->ac_flags &= ~AMR_CMD_MAPPED;
1729 }
1730}
1731
1920static void
1921amr_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1922{
1923 struct amr_command *ac = arg;
1924 struct amr_softc *sc = ac->ac_sc;
1925 int flags;
1926
1927 flags = 0;
1928 if (ac->ac_flags & AMR_CMD_DATAIN)
1929 flags |= BUS_DMASYNC_PREREAD;
1930 if (ac->ac_flags & AMR_CMD_DATAOUT)
1931 flags |= BUS_DMASYNC_PREWRITE;
1932
1933 if (AC_IS_SG64(ac)) {
1934 amr_setup_dma64map(arg, segs, nsegs, err);
1935 bus_dmamap_sync(sc->amr_buffer64_dmat,ac->ac_dma64map, flags);
1936 } else {
1937 amr_setup_dmamap(arg, segs, nsegs, err);
1938 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap, flags);
1939 }
1940 ac->ac_flags |= AMR_CMD_MAPPED;
1941
1942 if (sc->amr_submit_command(ac) == EBUSY) {
1943 amr_freeslot(ac);
1944 amr_requeue_ready(ac);
1945 }
1946}
1947
1948/********************************************************************************
1949 * Take a command and give it to the controller, returns 0 if successful, or
1950 * EBUSY if the command should be retried later.
1951 */
1952static int
1953amr_start(struct amr_command *ac)
1954{
1955 struct amr_softc *sc;
1956 int error = 0;
1957 int slot;
1958
1959 debug_called(3);
1960
1961 /* mark command as busy so that polling consumer can tell */
1962 sc = ac->ac_sc;
1963 ac->ac_flags |= AMR_CMD_BUSY;
1964
1965 /* get a command slot (freed in amr_done) */
1966 slot = ac->ac_slot;
1967 if (sc->amr_busycmd[slot] != NULL)
1968 panic("amr: slot %d busy?\n", slot);
1969 sc->amr_busycmd[slot] = ac;
1970 atomic_add_int(&sc->amr_busyslots, 1);
1971
1972 /* Now we have a slot, we can map the command (unmapped in amr_complete). */
1973 if ((error = amr_mapcmd(ac)) == ENOMEM) {
1974 /*
1975 * Memroy resources are short, so free the slot and let this be tried
1976 * later.
1977 */
1978 amr_freeslot(ac);
1979 }
1980
1981 return (error);
1982}
1983
1984/********************************************************************************
1985 * Extract one or more completed commands from the controller (sc)
1986 *
1987 * Returns nonzero if any commands on the work queue were marked as completed.
1988 */
1989
1990int
1991amr_done(struct amr_softc *sc)
1992{
1993 struct amr_command *ac;
1994 struct amr_mailbox mbox;
1995 int i, idx, result;
1996
1997 debug_called(3);
1998
1999 /* See if there's anything for us to do */
2000 result = 0;
2001
2002 /* loop collecting completed commands */
2003 for (;;) {
2004 /* poll for a completed command's identifier and status */
2005 if (sc->amr_get_work(sc, &mbox)) {
2006 result = 1;
2007
2008 /* iterate over completed commands in this result */
2009 for (i = 0; i < mbox.mb_nstatus; i++) {
2010 /* get pointer to busy command */
2011 idx = mbox.mb_completed[i] - 1;
2012 ac = sc->amr_busycmd[idx];
2013
2014 /* really a busy command? */
2015 if (ac != NULL) {
2016
2017 /* pull the command from the busy index */
2018 amr_freeslot(ac);
2019
2020 /* save status for later use */
2021 ac->ac_status = mbox.mb_status;
2022 amr_enqueue_completed(ac);
2023 debug(3, "completed command with status %x", mbox.mb_status);
2024 } else {
2025 device_printf(sc->amr_dev, "bad slot %d completed\n", idx);
2026 }
2027 }
2028 } else
2029 break; /* no work */
2030 }
2031
2032 /* handle completion and timeouts */
2033 amr_complete(sc, 0);
2034
2035 return(result);
2036}
2037
2038/********************************************************************************
2039 * Do completion processing on done commands on (sc)
2040 */
2041
2042static void
2043amr_complete(void *context, int pending)
2044{
2045 struct amr_softc *sc = (struct amr_softc *)context;
2046 struct amr_command *ac;
2047
2048 debug_called(3);
2049
2050 /* pull completed commands off the queue */
2051 for (;;) {
2052 ac = amr_dequeue_completed(sc);
2053 if (ac == NULL)
2054 break;
2055
2056 /* unmap the command's data buffer */
2057 amr_unmapcmd(ac);
2058
2059 /*
2060 * Is there a completion handler?
2061 */
2062 if (ac->ac_complete != NULL) {
2063 /* unbusy the command */
2064 ac->ac_flags &= ~AMR_CMD_BUSY;
2065 ac->ac_complete(ac);
2066
2067 /*
2068 * Is someone sleeping on this one?
2069 */
2070 } else {
2071 mtx_lock(&sc->amr_list_lock);
2072 ac->ac_flags &= ~AMR_CMD_BUSY;
2073 if (ac->ac_flags & AMR_CMD_SLEEP) {
2074 /* unbusy the command */
2075 wakeup(ac);
2076 }
2077 mtx_unlock(&sc->amr_list_lock);
2078 }
2079
2080 if(!sc->amr_busyslots) {
2081 wakeup(sc);
2082 }
2083 }
2084
2085 mtx_lock(&sc->amr_list_lock);
2086 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
2087 amr_startio(sc);
2088 mtx_unlock(&sc->amr_list_lock);
2089}
2090
2091/********************************************************************************
2092 ********************************************************************************
2093 Command Buffer Management
2094 ********************************************************************************
2095 ********************************************************************************/
2096
2097/********************************************************************************
2098 * Get a new command buffer.
2099 *
2100 * This may return NULL in low-memory cases.
2101 *
2102 * If possible, we recycle a command buffer that's been used before.
2103 */
2104struct amr_command *
2105amr_alloccmd(struct amr_softc *sc)
2106{
2107 struct amr_command *ac;
2108
2109 debug_called(3);
2110
2111 ac = amr_dequeue_free(sc);
2112 if (ac == NULL) {
2113 amr_alloccmd_cluster(sc);
2114 ac = amr_dequeue_free(sc);
2115 }
2116 if (ac == NULL) {
2117 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
2118 return(NULL);
2119 }
2120
2121 /* clear out significant fields */
2122 ac->ac_status = 0;
2123 bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox));
2124 ac->ac_flags = 0;
2125 ac->ac_bio = NULL;
2126 ac->ac_data = NULL;
1732/********************************************************************************
1733 * Take a command and give it to the controller, returns 0 if successful, or
1734 * EBUSY if the command should be retried later.
1735 */
1736static int
1737amr_start(struct amr_command *ac)
1738{
1739 struct amr_softc *sc;
1740 int error = 0;
1741 int slot;
1742
1743 debug_called(3);
1744
1745 /* mark command as busy so that polling consumer can tell */
1746 sc = ac->ac_sc;
1747 ac->ac_flags |= AMR_CMD_BUSY;
1748
1749 /* get a command slot (freed in amr_done) */
1750 slot = ac->ac_slot;
1751 if (sc->amr_busycmd[slot] != NULL)
1752 panic("amr: slot %d busy?\n", slot);
1753 sc->amr_busycmd[slot] = ac;
1754 atomic_add_int(&sc->amr_busyslots, 1);
1755
1756 /* Now we have a slot, we can map the command (unmapped in amr_complete). */
1757 if ((error = amr_mapcmd(ac)) == ENOMEM) {
1758 /*
1759 * Memroy resources are short, so free the slot and let this be tried
1760 * later.
1761 */
1762 amr_freeslot(ac);
1763 }
1764
1765 return (error);
1766}
1767
1768/********************************************************************************
1769 * Extract one or more completed commands from the controller (sc)
1770 *
1771 * Returns nonzero if any commands on the work queue were marked as completed.
1772 */
1773
1774int
1775amr_done(struct amr_softc *sc)
1776{
1777 struct amr_command *ac;
1778 struct amr_mailbox mbox;
1779 int i, idx, result;
1780
1781 debug_called(3);
1782
1783 /* See if there's anything for us to do */
1784 result = 0;
1785
1786 /* loop collecting completed commands */
1787 for (;;) {
1788 /* poll for a completed command's identifier and status */
1789 if (sc->amr_get_work(sc, &mbox)) {
1790 result = 1;
1791
1792 /* iterate over completed commands in this result */
1793 for (i = 0; i < mbox.mb_nstatus; i++) {
1794 /* get pointer to busy command */
1795 idx = mbox.mb_completed[i] - 1;
1796 ac = sc->amr_busycmd[idx];
1797
1798 /* really a busy command? */
1799 if (ac != NULL) {
1800
1801 /* pull the command from the busy index */
1802 amr_freeslot(ac);
1803
1804 /* save status for later use */
1805 ac->ac_status = mbox.mb_status;
1806 amr_enqueue_completed(ac);
1807 debug(3, "completed command with status %x", mbox.mb_status);
1808 } else {
1809 device_printf(sc->amr_dev, "bad slot %d completed\n", idx);
1810 }
1811 }
1812 } else
1813 break; /* no work */
1814 }
1815
1816 /* handle completion and timeouts */
1817 amr_complete(sc, 0);
1818
1819 return(result);
1820}
1821
1822/********************************************************************************
1823 * Do completion processing on done commands on (sc)
1824 */
1825
1826static void
1827amr_complete(void *context, int pending)
1828{
1829 struct amr_softc *sc = (struct amr_softc *)context;
1830 struct amr_command *ac;
1831
1832 debug_called(3);
1833
1834 /* pull completed commands off the queue */
1835 for (;;) {
1836 ac = amr_dequeue_completed(sc);
1837 if (ac == NULL)
1838 break;
1839
1840 /* unmap the command's data buffer */
1841 amr_unmapcmd(ac);
1842
1843 /*
1844 * Is there a completion handler?
1845 */
1846 if (ac->ac_complete != NULL) {
1847 /* unbusy the command */
1848 ac->ac_flags &= ~AMR_CMD_BUSY;
1849 ac->ac_complete(ac);
1850
1851 /*
1852 * Is someone sleeping on this one?
1853 */
1854 } else {
1855 mtx_lock(&sc->amr_list_lock);
1856 ac->ac_flags &= ~AMR_CMD_BUSY;
1857 if (ac->ac_flags & AMR_CMD_SLEEP) {
1858 /* unbusy the command */
1859 wakeup(ac);
1860 }
1861 mtx_unlock(&sc->amr_list_lock);
1862 }
1863
1864 if(!sc->amr_busyslots) {
1865 wakeup(sc);
1866 }
1867 }
1868
1869 mtx_lock(&sc->amr_list_lock);
1870 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
1871 amr_startio(sc);
1872 mtx_unlock(&sc->amr_list_lock);
1873}
1874
1875/********************************************************************************
1876 ********************************************************************************
1877 Command Buffer Management
1878 ********************************************************************************
1879 ********************************************************************************/
1880
1881/********************************************************************************
1882 * Get a new command buffer.
1883 *
1884 * This may return NULL in low-memory cases.
1885 *
1886 * If possible, we recycle a command buffer that's been used before.
1887 */
1888struct amr_command *
1889amr_alloccmd(struct amr_softc *sc)
1890{
1891 struct amr_command *ac;
1892
1893 debug_called(3);
1894
1895 ac = amr_dequeue_free(sc);
1896 if (ac == NULL) {
1897 amr_alloccmd_cluster(sc);
1898 ac = amr_dequeue_free(sc);
1899 }
1900 if (ac == NULL) {
1901 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1902 return(NULL);
1903 }
1904
1905 /* clear out significant fields */
1906 ac->ac_status = 0;
1907 bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox));
1908 ac->ac_flags = 0;
1909 ac->ac_bio = NULL;
1910 ac->ac_data = NULL;
2127 ac->ac_ccb_data = NULL;
2128 ac->ac_complete = NULL;
1911 ac->ac_complete = NULL;
1912 ac->ac_tag = NULL;
1913 ac->ac_datamap = NULL;
2129 return(ac);
2130}
2131
2132/********************************************************************************
2133 * Release a command buffer for recycling.
2134 */
2135void
2136amr_releasecmd(struct amr_command *ac)
2137{
2138 debug_called(3);
2139
2140 amr_enqueue_free(ac);
2141}
2142
2143/********************************************************************************
2144 * Allocate a new command cluster and initialise it.
2145 */
2146static void
2147amr_alloccmd_cluster(struct amr_softc *sc)
2148{
2149 struct amr_command_cluster *acc;
2150 struct amr_command *ac;
2151 int i, nextslot;
2152
2153 if (sc->amr_nextslot > sc->amr_maxio)
2154 return;
2155 acc = malloc(AMR_CMD_CLUSTERSIZE, M_AMR, M_NOWAIT | M_ZERO);
2156 if (acc != NULL) {
2157 nextslot = sc->amr_nextslot;
2158 TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link);
2159 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2160 ac = &acc->acc_command[i];
2161 ac->ac_sc = sc;
2162 ac->ac_slot = nextslot;
2163
2164 /*
2165 * The SG table for each slot is a fixed size and is assumed to
2166 * to hold 64-bit s/g objects when the driver is configured to do
2167 * 64-bit DMA. 32-bit DMA commands still use the same table, but
2168 * cast down to 32-bit objects.
2169 */
2170 if (AMR_IS_SG64(sc)) {
2171 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
2172 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry));
2173 ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG);
2174 } else {
2175 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
2176 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry));
2177 ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2178 }
2179
1914 return(ac);
1915}
1916
1917/********************************************************************************
1918 * Release a command buffer for recycling.
1919 */
1920void
1921amr_releasecmd(struct amr_command *ac)
1922{
1923 debug_called(3);
1924
1925 amr_enqueue_free(ac);
1926}
1927
1928/********************************************************************************
1929 * Allocate a new command cluster and initialise it.
1930 */
1931static void
1932amr_alloccmd_cluster(struct amr_softc *sc)
1933{
1934 struct amr_command_cluster *acc;
1935 struct amr_command *ac;
1936 int i, nextslot;
1937
1938 if (sc->amr_nextslot > sc->amr_maxio)
1939 return;
1940 acc = malloc(AMR_CMD_CLUSTERSIZE, M_AMR, M_NOWAIT | M_ZERO);
1941 if (acc != NULL) {
1942 nextslot = sc->amr_nextslot;
1943 TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link);
1944 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
1945 ac = &acc->acc_command[i];
1946 ac->ac_sc = sc;
1947 ac->ac_slot = nextslot;
1948
1949 /*
1950 * The SG table for each slot is a fixed size and is assumed to
1951 * to hold 64-bit s/g objects when the driver is configured to do
1952 * 64-bit DMA. 32-bit DMA commands still use the same table, but
1953 * cast down to 32-bit objects.
1954 */
1955 if (AMR_IS_SG64(sc)) {
1956 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
1957 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry));
1958 ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG);
1959 } else {
1960 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
1961 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry));
1962 ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
1963 }
1964
2180 if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap) ||
2181 bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_ccb_dmamap) ||
2182 (AMR_IS_SG64(sc) &&
2183 (bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map) ||
2184 bus_dmamap_create(sc->amr_buffer64_dmat, 0, &ac->ac_ccb_dma64map))))
2185 break;
1965 ac->ac_ccb = sc->amr_ccb + ac->ac_slot;
1966 ac->ac_ccb_busaddr = sc->amr_ccb_busaddr +
1967 (ac->ac_slot * sizeof(union amr_ccb));
1968
1969 if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap))
1970 break;
1971 if (AMR_IS_SG64(sc) &&
1972 (bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map)))
1973 break;
2186 amr_releasecmd(ac);
2187 if (++nextslot > sc->amr_maxio)
2188 break;
2189 }
2190 sc->amr_nextslot = nextslot;
2191 }
2192}
2193
2194/********************************************************************************
2195 * Free a command cluster
2196 */
2197static void
2198amr_freecmd_cluster(struct amr_command_cluster *acc)
2199{
2200 struct amr_softc *sc = acc->acc_command[0].ac_sc;
2201 int i;
2202
2203 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2204 bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap);
1974 amr_releasecmd(ac);
1975 if (++nextslot > sc->amr_maxio)
1976 break;
1977 }
1978 sc->amr_nextslot = nextslot;
1979 }
1980}
1981
1982/********************************************************************************
1983 * Free a command cluster
1984 */
1985static void
1986amr_freecmd_cluster(struct amr_command_cluster *acc)
1987{
1988 struct amr_softc *sc = acc->acc_command[0].ac_sc;
1989 int i;
1990
1991 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
1992 bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap);
2205 bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_ccb_dmamap);
2206 if (AMR_IS_SG64(sc))
2207 bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map);
1993 if (AMR_IS_SG64(sc))
1994 bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map);
2208 bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_ccb_dma64map);
2209 }
2210 free(acc, M_AMR);
2211}
2212
2213/********************************************************************************
2214 ********************************************************************************
2215 Interface-specific Shims
2216 ********************************************************************************
2217 ********************************************************************************/
2218
2219/********************************************************************************
2220 * Tell the controller that the mailbox contains a valid command
2221 */
2222static int
2223amr_quartz_submit_command(struct amr_command *ac)
2224{
2225 struct amr_softc *sc = ac->ac_sc;
2226 int i = 0;
2227
2228 mtx_lock(&sc->amr_hw_lock);
2229 while (sc->amr_mailbox->mb_busy && (i++ < 10))
2230 DELAY(1);
2231 if (sc->amr_mailbox->mb_busy) {
2232 mtx_unlock(&sc->amr_hw_lock);
2233 return (EBUSY);
2234 }
2235
2236 /*
2237 * Save the slot number so that we can locate this command when complete.
2238 * Note that ident = 0 seems to be special, so we don't use it.
2239 */
2240 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2241 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2242 sc->amr_mailbox->mb_busy = 1;
2243 sc->amr_mailbox->mb_poll = 0;
2244 sc->amr_mailbox->mb_ack = 0;
2245 sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi;
2246 sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo;
2247
2248 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
2249 mtx_unlock(&sc->amr_hw_lock);
2250 return(0);
2251}
2252
2253static int
2254amr_std_submit_command(struct amr_command *ac)
2255{
2256 struct amr_softc *sc = ac->ac_sc;
2257
2258 mtx_lock(&sc->amr_hw_lock);
2259 if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) {
2260 mtx_unlock(&sc->amr_hw_lock);
2261 return (EBUSY);
2262 }
2263
2264 /*
2265 * Save the slot number so that we can locate this command when complete.
2266 * Note that ident = 0 seems to be special, so we don't use it.
2267 */
2268 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2269 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2270 sc->amr_mailbox->mb_busy = 1;
2271 sc->amr_mailbox->mb_poll = 0;
2272 sc->amr_mailbox->mb_ack = 0;
2273
2274 AMR_SPOST_COMMAND(sc);
2275 mtx_unlock(&sc->amr_hw_lock);
2276 return(0);
2277}
2278
2279/********************************************************************************
2280 * Claim any work that the controller has completed; acknowledge completion,
2281 * save details of the completion in (mbsave)
2282 */
2283static int
2284amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2285{
2286 int worked, i;
2287 u_int32_t outd;
2288 u_int8_t nstatus;
2289 u_int8_t completed[46];
2290
2291 debug_called(3);
2292
2293 worked = 0;
2294
2295 /* work waiting for us? */
2296 if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) {
2297
2298 /* acknowledge interrupt */
2299 AMR_QPUT_ODB(sc, AMR_QODB_READY);
2300
2301 while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff)
2302 DELAY(1);
2303 sc->amr_mailbox->mb_nstatus = 0xff;
2304
2305 /* wait until fw wrote out all completions */
2306 for (i = 0; i < nstatus; i++) {
2307 while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff)
2308 DELAY(1);
2309 sc->amr_mailbox->mb_completed[i] = 0xff;
2310 }
2311
2312 /* Save information for later processing */
2313 mbsave->mb_nstatus = nstatus;
2314 mbsave->mb_status = sc->amr_mailbox->mb_status;
2315 sc->amr_mailbox->mb_status = 0xff;
2316
2317 for (i = 0; i < nstatus; i++)
2318 mbsave->mb_completed[i] = completed[i];
2319
2320 /* acknowledge that we have the commands */
2321 AMR_QPUT_IDB(sc, AMR_QIDB_ACK);
2322
2323#if 0
2324#ifndef AMR_QUARTZ_GOFASTER
2325 /*
2326 * This waits for the controller to notice that we've taken the
2327 * command from it. It's very inefficient, and we shouldn't do it,
2328 * but if we remove this code, we stop completing commands under
2329 * load.
2330 *
2331 * Peter J says we shouldn't do this. The documentation says we
2332 * should. Who is right?
2333 */
2334 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
2335 ; /* XXX aiee! what if it dies? */
2336#endif
2337#endif
2338
2339 worked = 1; /* got some work */
2340 }
2341
2342 return(worked);
2343}
2344
2345static int
2346amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2347{
2348 int worked;
2349 u_int8_t istat;
2350
2351 debug_called(3);
2352
2353 worked = 0;
2354
2355 /* check for valid interrupt status */
2356 istat = AMR_SGET_ISTAT(sc);
2357 if ((istat & AMR_SINTR_VALID) != 0) {
2358 AMR_SPUT_ISTAT(sc, istat); /* ack interrupt status */
2359
2360 /* save mailbox, which contains a list of completed commands */
2361 bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave));
2362
2363 AMR_SACK_INTERRUPT(sc); /* acknowledge we have the mailbox */
2364 worked = 1;
2365 }
2366
2367 return(worked);
2368}
2369
2370/********************************************************************************
2371 * Notify the controller of the mailbox location.
2372 */
2373static void
2374amr_std_attach_mailbox(struct amr_softc *sc)
2375{
2376
2377 /* program the mailbox physical address */
2378 AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys & 0xff);
2379 AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >> 8) & 0xff);
2380 AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff);
2381 AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff);
2382 AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR);
2383
2384 /* clear any outstanding interrupt and enable interrupts proper */
2385 AMR_SACK_INTERRUPT(sc);
2386 AMR_SENABLE_INTR(sc);
2387}
2388
2389#ifdef AMR_BOARD_INIT
2390/********************************************************************************
2391 * Initialise the controller
2392 */
2393static int
2394amr_quartz_init(struct amr_softc *sc)
2395{
2396 int status, ostatus;
2397
2398 device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc));
2399
2400 AMR_QRESET(sc);
2401
2402 ostatus = 0xff;
2403 while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) {
2404 if (status != ostatus) {
2405 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status));
2406 ostatus = status;
2407 }
2408 switch (status) {
2409 case AMR_QINIT_NOMEM:
2410 return(ENOMEM);
2411
2412 case AMR_QINIT_SCAN:
2413 /* XXX we could print channel/target here */
2414 break;
2415 }
2416 }
2417 return(0);
2418}
2419
2420static int
2421amr_std_init(struct amr_softc *sc)
2422{
2423 int status, ostatus;
2424
2425 device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc));
2426
2427 AMR_SRESET(sc);
2428
2429 ostatus = 0xff;
2430 while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) {
2431 if (status != ostatus) {
2432 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status));
2433 ostatus = status;
2434 }
2435 switch (status) {
2436 case AMR_SINIT_NOMEM:
2437 return(ENOMEM);
2438
2439 case AMR_SINIT_INPROG:
2440 /* XXX we could print channel/target here? */
2441 break;
2442 }
2443 }
2444 return(0);
2445}
2446#endif
2447
2448/********************************************************************************
2449 ********************************************************************************
2450 Debugging
2451 ********************************************************************************
2452 ********************************************************************************/
2453
2454/********************************************************************************
2455 * Identify the controller and print some information about it.
2456 */
2457static void
2458amr_describe_controller(struct amr_softc *sc)
2459{
2460 struct amr_prodinfo *ap;
2461 struct amr_enquiry *ae;
2462 char *prod;
2463 int status;
2464
2465 /*
2466 * Try to get 40LD product info, which tells us what the card is labelled as.
2467 */
2468 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) {
2469 device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n",
2470 ap->ap_product, ap->ap_firmware, ap->ap_bios,
2471 ap->ap_memsize);
2472
2473 free(ap, M_AMR);
2474 return;
2475 }
2476
2477 /*
2478 * Try 8LD extended ENQUIRY to get controller signature, and use lookup table.
2479 */
2480 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) {
2481 prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature);
2482
2483 } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) {
2484
2485 /*
2486 * Try to work it out based on the PCI signatures.
2487 */
2488 switch (pci_get_device(sc->amr_dev)) {
2489 case 0x9010:
2490 prod = "Series 428";
2491 break;
2492 case 0x9060:
2493 prod = "Series 434";
2494 break;
2495 default:
2496 prod = "unknown controller";
2497 break;
2498 }
2499 } else {
2500 device_printf(sc->amr_dev, "<unsupported controller>\n");
2501 return;
2502 }
2503
2504 /*
2505 * HP NetRaid controllers have a special encoding of the firmware and
2506 * BIOS versions. The AMI version seems to have it as strings whereas
2507 * the HP version does it with a leading uppercase character and two
2508 * binary numbers.
2509 */
2510
2511 if(ae->ae_adapter.aa_firmware[2] >= 'A' &&
2512 ae->ae_adapter.aa_firmware[2] <= 'Z' &&
2513 ae->ae_adapter.aa_firmware[1] < ' ' &&
2514 ae->ae_adapter.aa_firmware[0] < ' ' &&
2515 ae->ae_adapter.aa_bios[2] >= 'A' &&
2516 ae->ae_adapter.aa_bios[2] <= 'Z' &&
2517 ae->ae_adapter.aa_bios[1] < ' ' &&
2518 ae->ae_adapter.aa_bios[0] < ' ') {
2519
2520 /* this looks like we have an HP NetRaid version of the MegaRaid */
2521
2522 if(ae->ae_signature == AMR_SIG_438) {
2523 /* the AMI 438 is a NetRaid 3si in HP-land */
2524 prod = "HP NetRaid 3si";
2525 }
2526
2527 device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n",
2528 prod, ae->ae_adapter.aa_firmware[2],
2529 ae->ae_adapter.aa_firmware[1],
2530 ae->ae_adapter.aa_firmware[0],
2531 ae->ae_adapter.aa_bios[2],
2532 ae->ae_adapter.aa_bios[1],
2533 ae->ae_adapter.aa_bios[0],
2534 ae->ae_adapter.aa_memorysize);
2535 } else {
2536 device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n",
2537 prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios,
2538 ae->ae_adapter.aa_memorysize);
2539 }
2540 free(ae, M_AMR);
2541}
2542
2543int
2544amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks)
2545{
2546 struct amr_command *ac;
2547 int error = EIO;
2548
2549 debug_called(1);
2550
2551 sc->amr_state |= AMR_STATE_INTEN;
2552
2553 /* get ourselves a command buffer */
2554 if ((ac = amr_alloccmd(sc)) == NULL)
2555 goto out;
2556 /* set command flags */
2557 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
2558
2559 /* point the command at our data */
2560 ac->ac_data = data;
2561 ac->ac_length = blks * AMR_BLKSIZE;
2562
2563 /* build the command proper */
2564 ac->ac_mailbox.mb_command = AMR_CMD_LWRITE;
2565 ac->ac_mailbox.mb_blkcount = blks;
2566 ac->ac_mailbox.mb_lba = lba;
2567 ac->ac_mailbox.mb_drive = unit;
2568
2569 /* can't assume that interrupts are going to work here, so play it safe */
2570 if (sc->amr_poll_command(ac))
2571 goto out;
2572 error = ac->ac_status;
2573
2574 out:
2575 if (ac != NULL)
2576 amr_releasecmd(ac);
2577
2578 sc->amr_state &= ~AMR_STATE_INTEN;
2579 return (error);
2580}
2581
2582
2583
2584#ifdef AMR_DEBUG
2585/********************************************************************************
2586 * Print the command (ac) in human-readable format
2587 */
2588#if 0
2589static void
2590amr_printcommand(struct amr_command *ac)
2591{
2592 struct amr_softc *sc = ac->ac_sc;
2593 struct amr_sgentry *sg;
2594 int i;
2595
2596 device_printf(sc->amr_dev, "cmd %x ident %d drive %d\n",
2597 ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive);
2598 device_printf(sc->amr_dev, "blkcount %d lba %d\n",
2599 ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba);
2600 device_printf(sc->amr_dev, "virtaddr %p length %lu\n", ac->ac_data, (unsigned long)ac->ac_length);
2601 device_printf(sc->amr_dev, "sg physaddr %08x nsg %d\n",
2602 ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem);
2603 device_printf(sc->amr_dev, "ccb %p bio %p\n", ac->ac_ccb_data, ac->ac_bio);
2604
2605 /* get base address of s/g table */
2606 sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2607 for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++)
2608 device_printf(sc->amr_dev, " %x/%d\n", sg->sg_addr, sg->sg_count);
2609}
2610#endif
2611#endif
1995 }
1996 free(acc, M_AMR);
1997}
1998
1999/********************************************************************************
2000 ********************************************************************************
2001 Interface-specific Shims
2002 ********************************************************************************
2003 ********************************************************************************/
2004
2005/********************************************************************************
2006 * Tell the controller that the mailbox contains a valid command
2007 */
2008static int
2009amr_quartz_submit_command(struct amr_command *ac)
2010{
2011 struct amr_softc *sc = ac->ac_sc;
2012 int i = 0;
2013
2014 mtx_lock(&sc->amr_hw_lock);
2015 while (sc->amr_mailbox->mb_busy && (i++ < 10))
2016 DELAY(1);
2017 if (sc->amr_mailbox->mb_busy) {
2018 mtx_unlock(&sc->amr_hw_lock);
2019 return (EBUSY);
2020 }
2021
2022 /*
2023 * Save the slot number so that we can locate this command when complete.
2024 * Note that ident = 0 seems to be special, so we don't use it.
2025 */
2026 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2027 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2028 sc->amr_mailbox->mb_busy = 1;
2029 sc->amr_mailbox->mb_poll = 0;
2030 sc->amr_mailbox->mb_ack = 0;
2031 sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi;
2032 sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo;
2033
2034 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
2035 mtx_unlock(&sc->amr_hw_lock);
2036 return(0);
2037}
2038
2039static int
2040amr_std_submit_command(struct amr_command *ac)
2041{
2042 struct amr_softc *sc = ac->ac_sc;
2043
2044 mtx_lock(&sc->amr_hw_lock);
2045 if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) {
2046 mtx_unlock(&sc->amr_hw_lock);
2047 return (EBUSY);
2048 }
2049
2050 /*
2051 * Save the slot number so that we can locate this command when complete.
2052 * Note that ident = 0 seems to be special, so we don't use it.
2053 */
2054 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2055 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2056 sc->amr_mailbox->mb_busy = 1;
2057 sc->amr_mailbox->mb_poll = 0;
2058 sc->amr_mailbox->mb_ack = 0;
2059
2060 AMR_SPOST_COMMAND(sc);
2061 mtx_unlock(&sc->amr_hw_lock);
2062 return(0);
2063}
2064
2065/********************************************************************************
2066 * Claim any work that the controller has completed; acknowledge completion,
2067 * save details of the completion in (mbsave)
2068 */
2069static int
2070amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2071{
2072 int worked, i;
2073 u_int32_t outd;
2074 u_int8_t nstatus;
2075 u_int8_t completed[46];
2076
2077 debug_called(3);
2078
2079 worked = 0;
2080
2081 /* work waiting for us? */
2082 if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) {
2083
2084 /* acknowledge interrupt */
2085 AMR_QPUT_ODB(sc, AMR_QODB_READY);
2086
2087 while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff)
2088 DELAY(1);
2089 sc->amr_mailbox->mb_nstatus = 0xff;
2090
2091 /* wait until fw wrote out all completions */
2092 for (i = 0; i < nstatus; i++) {
2093 while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff)
2094 DELAY(1);
2095 sc->amr_mailbox->mb_completed[i] = 0xff;
2096 }
2097
2098 /* Save information for later processing */
2099 mbsave->mb_nstatus = nstatus;
2100 mbsave->mb_status = sc->amr_mailbox->mb_status;
2101 sc->amr_mailbox->mb_status = 0xff;
2102
2103 for (i = 0; i < nstatus; i++)
2104 mbsave->mb_completed[i] = completed[i];
2105
2106 /* acknowledge that we have the commands */
2107 AMR_QPUT_IDB(sc, AMR_QIDB_ACK);
2108
2109#if 0
2110#ifndef AMR_QUARTZ_GOFASTER
2111 /*
2112 * This waits for the controller to notice that we've taken the
2113 * command from it. It's very inefficient, and we shouldn't do it,
2114 * but if we remove this code, we stop completing commands under
2115 * load.
2116 *
2117 * Peter J says we shouldn't do this. The documentation says we
2118 * should. Who is right?
2119 */
2120 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
2121 ; /* XXX aiee! what if it dies? */
2122#endif
2123#endif
2124
2125 worked = 1; /* got some work */
2126 }
2127
2128 return(worked);
2129}
2130
2131static int
2132amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2133{
2134 int worked;
2135 u_int8_t istat;
2136
2137 debug_called(3);
2138
2139 worked = 0;
2140
2141 /* check for valid interrupt status */
2142 istat = AMR_SGET_ISTAT(sc);
2143 if ((istat & AMR_SINTR_VALID) != 0) {
2144 AMR_SPUT_ISTAT(sc, istat); /* ack interrupt status */
2145
2146 /* save mailbox, which contains a list of completed commands */
2147 bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave));
2148
2149 AMR_SACK_INTERRUPT(sc); /* acknowledge we have the mailbox */
2150 worked = 1;
2151 }
2152
2153 return(worked);
2154}
2155
2156/********************************************************************************
2157 * Notify the controller of the mailbox location.
2158 */
2159static void
2160amr_std_attach_mailbox(struct amr_softc *sc)
2161{
2162
2163 /* program the mailbox physical address */
2164 AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys & 0xff);
2165 AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >> 8) & 0xff);
2166 AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff);
2167 AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff);
2168 AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR);
2169
2170 /* clear any outstanding interrupt and enable interrupts proper */
2171 AMR_SACK_INTERRUPT(sc);
2172 AMR_SENABLE_INTR(sc);
2173}
2174
2175#ifdef AMR_BOARD_INIT
2176/********************************************************************************
2177 * Initialise the controller
2178 */
2179static int
2180amr_quartz_init(struct amr_softc *sc)
2181{
2182 int status, ostatus;
2183
2184 device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc));
2185
2186 AMR_QRESET(sc);
2187
2188 ostatus = 0xff;
2189 while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) {
2190 if (status != ostatus) {
2191 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status));
2192 ostatus = status;
2193 }
2194 switch (status) {
2195 case AMR_QINIT_NOMEM:
2196 return(ENOMEM);
2197
2198 case AMR_QINIT_SCAN:
2199 /* XXX we could print channel/target here */
2200 break;
2201 }
2202 }
2203 return(0);
2204}
2205
2206static int
2207amr_std_init(struct amr_softc *sc)
2208{
2209 int status, ostatus;
2210
2211 device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc));
2212
2213 AMR_SRESET(sc);
2214
2215 ostatus = 0xff;
2216 while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) {
2217 if (status != ostatus) {
2218 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status));
2219 ostatus = status;
2220 }
2221 switch (status) {
2222 case AMR_SINIT_NOMEM:
2223 return(ENOMEM);
2224
2225 case AMR_SINIT_INPROG:
2226 /* XXX we could print channel/target here? */
2227 break;
2228 }
2229 }
2230 return(0);
2231}
2232#endif
2233
2234/********************************************************************************
2235 ********************************************************************************
2236 Debugging
2237 ********************************************************************************
2238 ********************************************************************************/
2239
2240/********************************************************************************
2241 * Identify the controller and print some information about it.
2242 */
2243static void
2244amr_describe_controller(struct amr_softc *sc)
2245{
2246 struct amr_prodinfo *ap;
2247 struct amr_enquiry *ae;
2248 char *prod;
2249 int status;
2250
2251 /*
2252 * Try to get 40LD product info, which tells us what the card is labelled as.
2253 */
2254 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) {
2255 device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n",
2256 ap->ap_product, ap->ap_firmware, ap->ap_bios,
2257 ap->ap_memsize);
2258
2259 free(ap, M_AMR);
2260 return;
2261 }
2262
2263 /*
2264 * Try 8LD extended ENQUIRY to get controller signature, and use lookup table.
2265 */
2266 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) {
2267 prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature);
2268
2269 } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) {
2270
2271 /*
2272 * Try to work it out based on the PCI signatures.
2273 */
2274 switch (pci_get_device(sc->amr_dev)) {
2275 case 0x9010:
2276 prod = "Series 428";
2277 break;
2278 case 0x9060:
2279 prod = "Series 434";
2280 break;
2281 default:
2282 prod = "unknown controller";
2283 break;
2284 }
2285 } else {
2286 device_printf(sc->amr_dev, "<unsupported controller>\n");
2287 return;
2288 }
2289
2290 /*
2291 * HP NetRaid controllers have a special encoding of the firmware and
2292 * BIOS versions. The AMI version seems to have it as strings whereas
2293 * the HP version does it with a leading uppercase character and two
2294 * binary numbers.
2295 */
2296
2297 if(ae->ae_adapter.aa_firmware[2] >= 'A' &&
2298 ae->ae_adapter.aa_firmware[2] <= 'Z' &&
2299 ae->ae_adapter.aa_firmware[1] < ' ' &&
2300 ae->ae_adapter.aa_firmware[0] < ' ' &&
2301 ae->ae_adapter.aa_bios[2] >= 'A' &&
2302 ae->ae_adapter.aa_bios[2] <= 'Z' &&
2303 ae->ae_adapter.aa_bios[1] < ' ' &&
2304 ae->ae_adapter.aa_bios[0] < ' ') {
2305
2306 /* this looks like we have an HP NetRaid version of the MegaRaid */
2307
2308 if(ae->ae_signature == AMR_SIG_438) {
2309 /* the AMI 438 is a NetRaid 3si in HP-land */
2310 prod = "HP NetRaid 3si";
2311 }
2312
2313 device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n",
2314 prod, ae->ae_adapter.aa_firmware[2],
2315 ae->ae_adapter.aa_firmware[1],
2316 ae->ae_adapter.aa_firmware[0],
2317 ae->ae_adapter.aa_bios[2],
2318 ae->ae_adapter.aa_bios[1],
2319 ae->ae_adapter.aa_bios[0],
2320 ae->ae_adapter.aa_memorysize);
2321 } else {
2322 device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n",
2323 prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios,
2324 ae->ae_adapter.aa_memorysize);
2325 }
2326 free(ae, M_AMR);
2327}
2328
2329int
2330amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks)
2331{
2332 struct amr_command *ac;
2333 int error = EIO;
2334
2335 debug_called(1);
2336
2337 sc->amr_state |= AMR_STATE_INTEN;
2338
2339 /* get ourselves a command buffer */
2340 if ((ac = amr_alloccmd(sc)) == NULL)
2341 goto out;
2342 /* set command flags */
2343 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
2344
2345 /* point the command at our data */
2346 ac->ac_data = data;
2347 ac->ac_length = blks * AMR_BLKSIZE;
2348
2349 /* build the command proper */
2350 ac->ac_mailbox.mb_command = AMR_CMD_LWRITE;
2351 ac->ac_mailbox.mb_blkcount = blks;
2352 ac->ac_mailbox.mb_lba = lba;
2353 ac->ac_mailbox.mb_drive = unit;
2354
2355 /* can't assume that interrupts are going to work here, so play it safe */
2356 if (sc->amr_poll_command(ac))
2357 goto out;
2358 error = ac->ac_status;
2359
2360 out:
2361 if (ac != NULL)
2362 amr_releasecmd(ac);
2363
2364 sc->amr_state &= ~AMR_STATE_INTEN;
2365 return (error);
2366}
2367
2368
2369
2370#ifdef AMR_DEBUG
2371/********************************************************************************
2372 * Print the command (ac) in human-readable format
2373 */
2374#if 0
2375static void
2376amr_printcommand(struct amr_command *ac)
2377{
2378 struct amr_softc *sc = ac->ac_sc;
2379 struct amr_sgentry *sg;
2380 int i;
2381
2382 device_printf(sc->amr_dev, "cmd %x ident %d drive %d\n",
2383 ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive);
2384 device_printf(sc->amr_dev, "blkcount %d lba %d\n",
2385 ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba);
2386 device_printf(sc->amr_dev, "virtaddr %p length %lu\n", ac->ac_data, (unsigned long)ac->ac_length);
2387 device_printf(sc->amr_dev, "sg physaddr %08x nsg %d\n",
2388 ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem);
2389 device_printf(sc->amr_dev, "ccb %p bio %p\n", ac->ac_ccb_data, ac->ac_bio);
2390
2391 /* get base address of s/g table */
2392 sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2393 for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++)
2394 device_printf(sc->amr_dev, " %x/%d\n", sg->sg_addr, sg->sg_count);
2395}
2396#endif
2397#endif