mly.c revision 78235
1/*- 2 * Copyright (c) 2000, 2001 Michael Smith 3 * Copyright (c) 2000 BSDi 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: head/sys/dev/mly/mly.c 78235 2001-06-15 00:16:59Z peter $ 28 */ 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/malloc.h> 33#include <sys/kernel.h> 34#include <sys/bus.h> 35#include <sys/conf.h> 36#include <sys/ctype.h> 37#include <sys/ioccom.h> 38#include <sys/stat.h> 39 40#include <machine/bus_memio.h> 41#include <machine/bus.h> 42#include <machine/resource.h> 43#include <sys/rman.h> 44 45#include <cam/scsi/scsi_all.h> 46 47#include <dev/mly/mlyreg.h> 48#include <dev/mly/mlyio.h> 49#include <dev/mly/mlyvar.h> 50#define MLY_DEFINE_TABLES 51#include <dev/mly/mly_tables.h> 52 53static int mly_get_controllerinfo(struct mly_softc *sc); 54static void mly_scan_devices(struct mly_softc *sc); 55static void mly_rescan_btl(struct mly_softc *sc, int bus, int target); 56static void mly_complete_rescan(struct mly_command *mc); 57static int mly_get_eventstatus(struct mly_softc *sc); 58static int mly_enable_mmbox(struct mly_softc *sc); 59static int mly_flush(struct mly_softc *sc); 60static int mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, 61 size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length); 62static void mly_fetch_event(struct mly_softc *sc); 63static void mly_complete_event(struct mly_command *mc); 64static void mly_process_event(struct mly_softc *sc, struct mly_event *me); 65static void mly_periodic(void *data); 66 67static int mly_immediate_command(struct mly_command *mc); 68static int mly_start(struct mly_command *mc); 69static void mly_complete(void *context, int pending); 70 71static void mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error); 72static int mly_alloc_commands(struct mly_softc *sc); 73static void mly_map_command(struct mly_command *mc); 74static void mly_unmap_command(struct mly_command *mc); 75 76static int mly_fwhandshake(struct mly_softc *sc); 77 78static void mly_describe_controller(struct mly_softc *sc); 79#ifdef MLY_DEBUG 80static void mly_printstate(struct mly_softc *sc); 81static void mly_print_command(struct mly_command *mc); 82static void mly_print_packet(struct mly_command *mc); 83static void mly_panic(struct mly_softc *sc, char *reason); 84#endif 85void mly_print_controller(int controller); 86 87static d_open_t mly_user_open; 88static d_close_t mly_user_close; 89static d_ioctl_t mly_user_ioctl; 90static int mly_user_command(struct mly_softc *sc, struct mly_user_command *uc); 91static int mly_user_health(struct mly_softc *sc, struct mly_user_health *uh); 92 93#define MLY_CDEV_MAJOR 158 94 95static struct cdevsw mly_cdevsw = { 96 mly_user_open, 97 mly_user_close, 98 noread, 99 nowrite, 100 mly_user_ioctl, 101 nopoll, 102 nommap, 103 nostrategy, 104 "mly", 105 MLY_CDEV_MAJOR, 106 nodump, 107 nopsize, 108 0 109}; 110 111/******************************************************************************** 112 ******************************************************************************** 113 Device Interface 114 ******************************************************************************** 115 ********************************************************************************/ 116 117/******************************************************************************** 118 * Initialise the controller and softc 119 */ 120int 121mly_attach(struct mly_softc *sc) 122{ 123 int error; 124 125 debug_called(1); 126 127 /* 128 * Initialise per-controller queues. 129 */ 130 mly_initq_free(sc); 131 mly_initq_ready(sc); 132 mly_initq_busy(sc); 133 mly_initq_complete(sc); 134 135#if __FreeBSD_version >= 500005 136 /* 137 * Initialise command-completion task. 138 */ 139 TASK_INIT(&sc->mly_task_complete, 0, mly_complete, sc); 140#endif 141 142 /* disable interrupts before we start talking to the controller */ 143 MLY_MASK_INTERRUPTS(sc); 144 145 /* 146 * Wait for the controller to come ready, handshake with the firmware if required. 147 * This is typically only necessary on platforms where the controller BIOS does not 148 * run. 149 */ 150 if ((error = mly_fwhandshake(sc))) 151 return(error); 152 153 /* 154 * Allocate command buffers 155 */ 156 if ((error = mly_alloc_commands(sc))) 157 return(error); 158 159 /* 160 * Obtain controller feature information 161 */ 162 if ((error = mly_get_controllerinfo(sc))) 163 return(error); 164 165 /* 166 * Get the current event counter for health purposes, populate the initial 167 * health status buffer. 168 */ 169 if ((error = mly_get_eventstatus(sc))) 170 return(error); 171 172 /* 173 * Enable memory-mailbox mode 174 */ 175 if ((error = mly_enable_mmbox(sc))) 176 return(error); 177 178 /* 179 * Attach to CAM. 180 */ 181 if ((error = mly_cam_attach(sc))) 182 return(error); 183 184 /* 185 * Print a little information about the controller 186 */ 187 mly_describe_controller(sc); 188 189 /* 190 * Mark all attached devices for rescan 191 */ 192 mly_scan_devices(sc); 193 194 /* 195 * Instigate the first status poll immediately. Rescan completions won't 196 * happen until interrupts are enabled, which should still be before 197 * the SCSI subsystem gets to us. (XXX assuming CAM and interrupt-driven 198 * discovery here...) 199 */ 200 mly_periodic((void *)sc); 201 202 /* 203 * Create the control device. 204 */ 205 sc->mly_dev_t = make_dev(&mly_cdevsw, device_get_unit(sc->mly_dev), UID_ROOT, GID_OPERATOR, 206 S_IRUSR | S_IWUSR, "mly%d", device_get_unit(sc->mly_dev)); 207 sc->mly_dev_t->si_drv1 = sc; 208 209 /* enable interrupts now */ 210 MLY_UNMASK_INTERRUPTS(sc); 211 212 return(0); 213} 214 215/******************************************************************************** 216 * Bring the controller to a state where it can be safely left alone. 217 */ 218void 219mly_detach(struct mly_softc *sc) 220{ 221 222 debug_called(1); 223 224 /* kill the periodic event */ 225 untimeout(mly_periodic, sc, sc->mly_periodic); 226 227 sc->mly_state |= MLY_STATE_SUSPEND; 228 229 /* flush controller */ 230 mly_printf(sc, "flushing cache..."); 231 printf("%s\n", mly_flush(sc) ? "failed" : "done"); 232 233 MLY_MASK_INTERRUPTS(sc); 234} 235 236/******************************************************************************** 237 ******************************************************************************** 238 Command Wrappers 239 ******************************************************************************** 240 ********************************************************************************/ 241 242/******************************************************************************** 243 * Fill in the mly_controllerinfo and mly_controllerparam fields in the softc. 244 */ 245static int 246mly_get_controllerinfo(struct mly_softc *sc) 247{ 248 struct mly_command_ioctl mci; 249 u_int8_t status; 250 int error; 251 252 debug_called(1); 253 254 if (sc->mly_controllerinfo != NULL) 255 free(sc->mly_controllerinfo, M_DEVBUF); 256 257 /* build the getcontrollerinfo ioctl and send it */ 258 bzero(&mci, sizeof(mci)); 259 sc->mly_controllerinfo = NULL; 260 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO; 261 if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerinfo, sizeof(*sc->mly_controllerinfo), 262 &status, NULL, NULL))) 263 return(error); 264 if (status != 0) 265 return(EIO); 266 267 if (sc->mly_controllerparam != NULL) 268 free(sc->mly_controllerparam, M_DEVBUF); 269 270 /* build the getcontrollerparameter ioctl and send it */ 271 bzero(&mci, sizeof(mci)); 272 sc->mly_controllerparam = NULL; 273 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER; 274 if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerparam, sizeof(*sc->mly_controllerparam), 275 &status, NULL, NULL))) 276 return(error); 277 if (status != 0) 278 return(EIO); 279 280 return(0); 281} 282 283/******************************************************************************** 284 * Schedule all possible devices for a rescan. 285 * 286 */ 287static void 288mly_scan_devices(struct mly_softc *sc) 289{ 290 int bus, target, nchn; 291 292 debug_called(1); 293 294 /* 295 * Clear any previous BTL information. 296 */ 297 bzero(&sc->mly_btl, sizeof(sc->mly_btl)); 298 299 /* 300 * Mark all devices as requiring a rescan, and let the early periodic scan collect them. 301 */ 302 nchn = sc->mly_controllerinfo->physical_channels_present + 303 sc->mly_controllerinfo->virtual_channels_present; 304 for (bus = 0; bus < nchn; bus++) 305 for (target = 0; target < MLY_MAX_TARGETS; target++) 306 sc->mly_btl[bus][target].mb_flags = MLY_BTL_RESCAN; 307 308} 309 310/******************************************************************************** 311 * Rescan a device, possibly as a consequence of getting an event which suggests 312 * that it may have changed. 313 */ 314static void 315mly_rescan_btl(struct mly_softc *sc, int bus, int target) 316{ 317 struct mly_command *mc; 318 struct mly_command_ioctl *mci; 319 320 debug_called(2); 321 322 /* get a command */ 323 mc = NULL; 324 if (mly_alloc_command(sc, &mc)) 325 return; /* we'll be retried soon */ 326 327 /* set up the data buffer */ 328 if ((mc->mc_data = malloc(sizeof(union mly_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { 329 mly_release_command(mc); 330 return; /* we'll get retried the next time a command completes */ 331 } 332 mc->mc_flags |= MLY_CMD_DATAIN; 333 mc->mc_complete = mly_complete_rescan; 334 335 sc->mly_btl[bus][target].mb_flags &= ~MLY_BTL_RESCAN; 336 337 /* 338 * Build the ioctl. 339 * 340 * At this point we are committed to sending this request, as it 341 * will be the only one constructed for this particular update. 342 */ 343 mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl; 344 mci->opcode = MDACMD_IOCTL; 345 mci->addr.phys.controller = 0; 346 mci->timeout.value = 30; 347 mci->timeout.scale = MLY_TIMEOUT_SECONDS; 348 if (bus >= sc->mly_controllerinfo->physical_channels_present) { 349 mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getlogdevinfovalid); 350 mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID; 351 mci->addr.log.logdev = ((bus - sc->mly_controllerinfo->physical_channels_present) * MLY_MAX_TARGETS) 352 + target; 353 debug(2, "logical device %d", mci->addr.log.logdev); 354 } else { 355 mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getphysdevinfovalid); 356 mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID; 357 mci->addr.phys.lun = 0; 358 mci->addr.phys.target = target; 359 mci->addr.phys.channel = bus; 360 debug(2, "physical device %d:%d", mci->addr.phys.channel, mci->addr.phys.target); 361 } 362 363 /* 364 * Use the ready queue to get this command dispatched. 365 */ 366 mly_enqueue_ready(mc); 367 mly_startio(sc); 368} 369 370/******************************************************************************** 371 * Handle the completion of a rescan operation 372 */ 373static void 374mly_complete_rescan(struct mly_command *mc) 375{ 376 struct mly_softc *sc = mc->mc_sc; 377 struct mly_ioctl_getlogdevinfovalid *ldi; 378 struct mly_ioctl_getphysdevinfovalid *pdi; 379 int bus, target; 380 381 debug_called(2); 382 383 /* iff the command completed OK, we should use the result to update our data */ 384 if (mc->mc_status == 0) { 385 if (mc->mc_length == sizeof(*ldi)) { 386 ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data; 387 bus = MLY_LOGDEV_BUS(sc, ldi->logical_device_number); 388 target = MLY_LOGDEV_TARGET(ldi->logical_device_number); 389 sc->mly_btl[bus][target].mb_flags = MLY_BTL_LOGICAL; /* clears all other flags */ 390 sc->mly_btl[bus][target].mb_type = ldi->raid_level; 391 sc->mly_btl[bus][target].mb_state = ldi->state; 392 debug(2, "BTL rescan for %d returns %s, %s", ldi->logical_device_number, 393 mly_describe_code(mly_table_device_type, ldi->raid_level), 394 mly_describe_code(mly_table_device_state, ldi->state)); 395 } else if (mc->mc_length == sizeof(*pdi)) { 396 pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data; 397 bus = pdi->channel; 398 target = pdi->target; 399 sc->mly_btl[bus][target].mb_flags = MLY_BTL_PHYSICAL; /* clears all other flags */ 400 sc->mly_btl[bus][target].mb_type = MLY_DEVICE_TYPE_PHYSICAL; 401 sc->mly_btl[bus][target].mb_state = pdi->state; 402 sc->mly_btl[bus][target].mb_speed = pdi->speed; 403 sc->mly_btl[bus][target].mb_width = pdi->width; 404 if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED) 405 sc->mly_btl[bus][target].mb_flags |= MLY_BTL_PROTECTED; 406 debug(2, "BTL rescan for %d:%d returns %s", bus, target, 407 mly_describe_code(mly_table_device_state, pdi->state)); 408 } else { 409 mly_printf(sc, "BTL rescan result corrupted\n"); 410 } 411 } else { 412 /* 413 * A request sent for a device beyond the last device present will fail. 414 * We don't care about this, so we do nothing about it. 415 */ 416 } 417 free(mc->mc_data, M_DEVBUF); 418 mly_release_command(mc); 419} 420 421/******************************************************************************** 422 * Get the current health status and set the 'next event' counter to suit. 423 */ 424static int 425mly_get_eventstatus(struct mly_softc *sc) 426{ 427 struct mly_command_ioctl mci; 428 struct mly_health_status *mh; 429 u_int8_t status; 430 int error; 431 432 /* build the gethealthstatus ioctl and send it */ 433 bzero(&mci, sizeof(mci)); 434 mh = NULL; 435 mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS; 436 437 if ((error = mly_ioctl(sc, &mci, (void **)&mh, sizeof(*mh), &status, NULL, NULL))) 438 return(error); 439 if (status != 0) 440 return(EIO); 441 442 /* get the event counter */ 443 sc->mly_event_change = mh->change_counter; 444 sc->mly_event_waiting = mh->next_event; 445 sc->mly_event_counter = mh->next_event; 446 447 /* save the health status into the memory mailbox */ 448 bcopy(mh, &sc->mly_mmbox->mmm_health.status, sizeof(*mh)); 449 450 debug(1, "initial change counter %d, event counter %d", mh->change_counter, mh->next_event); 451 452 free(mh, M_DEVBUF); 453 return(0); 454} 455 456/******************************************************************************** 457 * Enable the memory mailbox mode. 458 */ 459static int 460mly_enable_mmbox(struct mly_softc *sc) 461{ 462 struct mly_command_ioctl mci; 463 u_int8_t *sp, status; 464 int error; 465 466 debug_called(1); 467 468 /* build the ioctl and send it */ 469 bzero(&mci, sizeof(mci)); 470 mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX; 471 /* set buffer addresses */ 472 mci.param.setmemorymailbox.command_mailbox_physaddr = 473 sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command); 474 mci.param.setmemorymailbox.status_mailbox_physaddr = 475 sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status); 476 mci.param.setmemorymailbox.health_buffer_physaddr = 477 sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health); 478 479 /* set buffer sizes - abuse of data_size field is revolting */ 480 sp = (u_int8_t *)&mci.data_size; 481 sp[0] = ((sizeof(union mly_command_packet) * MLY_MMBOX_COMMANDS) / 1024); 482 sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) / 1024; 483 mci.param.setmemorymailbox.health_buffer_size = sizeof(union mly_health_region) / 1024; 484 485 debug(1, "memory mailbox at %p (0x%llx/%d 0x%llx/%d 0x%llx/%d", sc->mly_mmbox, 486 mci.param.setmemorymailbox.command_mailbox_physaddr, sp[0], 487 mci.param.setmemorymailbox.status_mailbox_physaddr, sp[1], 488 mci.param.setmemorymailbox.health_buffer_physaddr, 489 mci.param.setmemorymailbox.health_buffer_size); 490 491 if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL))) 492 return(error); 493 if (status != 0) 494 return(EIO); 495 sc->mly_state |= MLY_STATE_MMBOX_ACTIVE; 496 debug(1, "memory mailbox active"); 497 return(0); 498} 499 500/******************************************************************************** 501 * Flush all pending I/O from the controller. 502 */ 503static int 504mly_flush(struct mly_softc *sc) 505{ 506 struct mly_command_ioctl mci; 507 u_int8_t status; 508 int error; 509 510 debug_called(1); 511 512 /* build the ioctl */ 513 bzero(&mci, sizeof(mci)); 514 mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA; 515 mci.param.deviceoperation.operation_device = MLY_OPDEVICE_PHYSICAL_CONTROLLER; 516 517 /* pass it off to the controller */ 518 if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL))) 519 return(error); 520 521 return((status == 0) ? 0 : EIO); 522} 523 524/******************************************************************************** 525 * Perform an ioctl command. 526 * 527 * If (data) is not NULL, the command requires data transfer. If (*data) is NULL 528 * the command requires data transfer from the controller, and we will allocate 529 * a buffer for it. If (*data) is not NULL, the command requires data transfer 530 * to the controller. 531 * 532 * XXX passing in the whole ioctl structure is ugly. Better ideas? 533 * 534 * XXX we don't even try to handle the case where datasize > 4k. We should. 535 */ 536static int 537mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, size_t datasize, 538 u_int8_t *status, void *sense_buffer, size_t *sense_length) 539{ 540 struct mly_command *mc; 541 struct mly_command_ioctl *mci; 542 int error; 543 544 debug_called(1); 545 546 mc = NULL; 547 if (mly_alloc_command(sc, &mc)) { 548 error = ENOMEM; 549 goto out; 550 } 551 552 /* copy the ioctl structure, but save some important fields and then fixup */ 553 mci = &mc->mc_packet->ioctl; 554 ioctl->sense_buffer_address = mci->sense_buffer_address; 555 ioctl->maximum_sense_size = mci->maximum_sense_size; 556 *mci = *ioctl; 557 mci->opcode = MDACMD_IOCTL; 558 mci->timeout.value = 30; 559 mci->timeout.scale = MLY_TIMEOUT_SECONDS; 560 561 /* handle the data buffer */ 562 if (data != NULL) { 563 if (*data == NULL) { 564 /* allocate data buffer */ 565 if ((mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT)) == NULL) { 566 error = ENOMEM; 567 goto out; 568 } 569 mc->mc_flags |= MLY_CMD_DATAIN; 570 } else { 571 mc->mc_data = *data; 572 mc->mc_flags |= MLY_CMD_DATAOUT; 573 } 574 mc->mc_length = datasize; 575 mc->mc_packet->generic.data_size = datasize; 576 } 577 578 /* run the command */ 579 if ((error = mly_immediate_command(mc))) 580 goto out; 581 582 /* clean up and return any data */ 583 *status = mc->mc_status; 584 if ((mc->mc_sense > 0) && (sense_buffer != NULL)) { 585 bcopy(mc->mc_packet, sense_buffer, mc->mc_sense); 586 *sense_length = mc->mc_sense; 587 goto out; 588 } 589 590 /* should we return a data pointer? */ 591 if ((data != NULL) && (*data == NULL)) 592 *data = mc->mc_data; 593 594 /* command completed OK */ 595 error = 0; 596 597out: 598 if (mc != NULL) { 599 /* do we need to free a data buffer we allocated? */ 600 if (error && (mc->mc_data != NULL) && (*data == NULL)) 601 free(mc->mc_data, M_DEVBUF); 602 mly_release_command(mc); 603 } 604 return(error); 605} 606 607/******************************************************************************** 608 * Fetch one event from the controller. 609 */ 610static void 611mly_fetch_event(struct mly_softc *sc) 612{ 613 struct mly_command *mc; 614 struct mly_command_ioctl *mci; 615 int s; 616 u_int32_t event; 617 618 debug_called(2); 619 620 /* get a command */ 621 mc = NULL; 622 if (mly_alloc_command(sc, &mc)) 623 return; /* we'll get retried the next time a command completes */ 624 625 /* set up the data buffer */ 626 if ((mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { 627 mly_release_command(mc); 628 return; /* we'll get retried the next time a command completes */ 629 } 630 mc->mc_length = sizeof(struct mly_event); 631 mc->mc_flags |= MLY_CMD_DATAIN; 632 mc->mc_complete = mly_complete_event; 633 634 /* 635 * Get an event number to fetch. It's possible that we've raced with another 636 * context for the last event, in which case there will be no more events. 637 */ 638 s = splcam(); 639 if (sc->mly_event_counter == sc->mly_event_waiting) { 640 mly_release_command(mc); 641 splx(s); 642 return; 643 } 644 event = sc->mly_event_counter++; 645 splx(s); 646 647 /* 648 * Build the ioctl. 649 * 650 * At this point we are committed to sending this request, as it 651 * will be the only one constructed for this particular event number. 652 */ 653 mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl; 654 mci->opcode = MDACMD_IOCTL; 655 mci->data_size = sizeof(struct mly_event); 656 mci->addr.phys.lun = (event >> 16) & 0xff; 657 mci->addr.phys.target = (event >> 24) & 0xff; 658 mci->addr.phys.channel = 0; 659 mci->addr.phys.controller = 0; 660 mci->timeout.value = 30; 661 mci->timeout.scale = MLY_TIMEOUT_SECONDS; 662 mci->sub_ioctl = MDACIOCTL_GETEVENT; 663 mci->param.getevent.sequence_number_low = event & 0xffff; 664 665 debug(2, "fetch event %u", event); 666 667 /* 668 * Use the ready queue to get this command dispatched. 669 */ 670 mly_enqueue_ready(mc); 671 mly_startio(sc); 672} 673 674/******************************************************************************** 675 * Handle the completion of an event poll. 676 * 677 * Note that we don't actually have to instigate another poll; the completion of 678 * this command will trigger that if there are any more events to poll for. 679 */ 680static void 681mly_complete_event(struct mly_command *mc) 682{ 683 struct mly_softc *sc = mc->mc_sc; 684 struct mly_event *me = (struct mly_event *)mc->mc_data; 685 686 debug_called(2); 687 688 /* 689 * If the event was successfully fetched, process it. 690 */ 691 if (mc->mc_status == SCSI_STATUS_OK) { 692 mly_process_event(sc, me); 693 free(me, M_DEVBUF); 694 } 695 mly_release_command(mc); 696} 697 698/******************************************************************************** 699 * Process a controller event. 700 */ 701static void 702mly_process_event(struct mly_softc *sc, struct mly_event *me) 703{ 704 struct scsi_sense_data *ssd = (struct scsi_sense_data *)&me->sense[0]; 705 char *fp, *tp; 706 int bus, target, event, class, action; 707 708 /* 709 * Errors can be reported using vendor-unique sense data. In this case, the 710 * event code will be 0x1c (Request sense data present), the sense key will 711 * be 0x09 (vendor specific), the MSB of the ASC will be set, and the 712 * actual event code will be a 16-bit value comprised of the ASCQ (low byte) 713 * and low seven bits of the ASC (low seven bits of the high byte). 714 */ 715 if ((me->code == 0x1c) && 716 ((ssd->flags & SSD_KEY) == SSD_KEY_Vendor_Specific) && 717 (ssd->add_sense_code & 0x80)) { 718 event = ((int)(ssd->add_sense_code & ~0x80) << 8) + ssd->add_sense_code_qual; 719 } else { 720 event = me->code; 721 } 722 723 /* look up event, get codes */ 724 fp = mly_describe_code(mly_table_event, event); 725 726 debug(2, "Event %d code 0x%x", me->sequence_number, me->code); 727 728 /* quiet event? */ 729 class = fp[0]; 730 if (isupper(class) && bootverbose) 731 class = tolower(class); 732 733 /* get action code, text string */ 734 action = fp[1]; 735 tp = &fp[2]; 736 737 /* 738 * Print some information about the event. 739 * 740 * This code uses a table derived from the corresponding portion of the Linux 741 * driver, and thus the parser is very similar. 742 */ 743 switch(class) { 744 case 'p': /* error on physical device */ 745 mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp); 746 if (action == 'r') 747 sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN; 748 break; 749 case 'l': /* error on logical unit */ 750 case 'm': /* message about logical unit */ 751 bus = MLY_LOGDEV_BUS(sc, me->lun); 752 target = MLY_LOGDEV_TARGET(me->lun); 753 mly_name_device(sc, bus, target); 754 mly_printf(sc, "logical device %d (%s) %s\n", me->lun, sc->mly_btl[bus][target].mb_name, tp); 755 if (action == 'r') 756 sc->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN; 757 break; 758 break; 759 case 's': /* report of sense data */ 760 if (((ssd->flags & SSD_KEY) == SSD_KEY_NO_SENSE) || 761 (((ssd->flags & SSD_KEY) == SSD_KEY_NOT_READY) && 762 (ssd->add_sense_code == 0x04) && 763 ((ssd->add_sense_code_qual == 0x01) || (ssd->add_sense_code_qual == 0x02)))) 764 break; /* ignore NO_SENSE or NOT_READY in one case */ 765 766 mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp); 767 mly_printf(sc, " sense key %d asc %02x ascq %02x\n", 768 ssd->flags & SSD_KEY, ssd->add_sense_code, ssd->add_sense_code_qual); 769 mly_printf(sc, " info %4D csi %4D\n", ssd->info, "", ssd->cmd_spec_info, ""); 770 if (action == 'r') 771 sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN; 772 break; 773 case 'e': 774 mly_printf(sc, tp, me->target, me->lun); 775 break; 776 case 'c': 777 mly_printf(sc, "controller %s\n", tp); 778 break; 779 case '?': 780 mly_printf(sc, "%s - %d\n", tp, me->code); 781 break; 782 default: /* probably a 'noisy' event being ignored */ 783 break; 784 } 785} 786 787/******************************************************************************** 788 * Perform periodic activities. 789 */ 790static void 791mly_periodic(void *data) 792{ 793 struct mly_softc *sc = (struct mly_softc *)data; 794 int nchn, bus, target; 795 796 debug_called(2); 797 798 /* 799 * Scan devices. 800 */ 801 nchn = sc->mly_controllerinfo->physical_channels_present + 802 sc->mly_controllerinfo->virtual_channels_present; 803 for (bus = 0; bus < nchn; bus++) { 804 for (target = 0; target < MLY_MAX_TARGETS; target++) { 805 806 /* ignore the controller in this scan */ 807 if (target == sc->mly_controllerparam->initiator_id) 808 continue; 809 810 /* perform device rescan? */ 811 if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_RESCAN) 812 mly_rescan_btl(sc, bus, target); 813 } 814 } 815 816 sc->mly_periodic = timeout(mly_periodic, sc, hz); 817} 818 819/******************************************************************************** 820 ******************************************************************************** 821 Command Processing 822 ******************************************************************************** 823 ********************************************************************************/ 824 825/******************************************************************************** 826 * Run a command and wait for it to complete. 827 * 828 */ 829static int 830mly_immediate_command(struct mly_command *mc) 831{ 832 struct mly_softc *sc = mc->mc_sc; 833 int error, s; 834 835 debug_called(2); 836 837 /* spinning at splcam is ugly, but we're only used during controller init */ 838 s = splcam(); 839 if ((error = mly_start(mc))) 840 return(error); 841 842 if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) { 843 /* sleep on the command */ 844 while(!(mc->mc_flags & MLY_CMD_COMPLETE)) { 845 tsleep(mc, PRIBIO, "mlywait", 0); 846 } 847 } else { 848 /* spin and collect status while we do */ 849 while(!(mc->mc_flags & MLY_CMD_COMPLETE)) { 850 mly_done(mc->mc_sc); 851 } 852 } 853 splx(s); 854 return(0); 855} 856 857/******************************************************************************** 858 * Start as much queued I/O as possible on the controller 859 */ 860void 861mly_startio(struct mly_softc *sc) 862{ 863 struct mly_command *mc; 864 865 debug_called(2); 866 867 for (;;) { 868 869 /* try for a ready command */ 870 mc = mly_dequeue_ready(sc); 871 872 /* try to build a command from a queued ccb */ 873 if (!mc) 874 mly_cam_command(sc, &mc); 875 876 /* no command == nothing to do */ 877 if (!mc) 878 break; 879 880 /* try to post the command */ 881 if (mly_start(mc)) { 882 /* controller busy, or no resources - defer for later */ 883 mly_requeue_ready(mc); 884 break; 885 } 886 } 887} 888 889/******************************************************************************** 890 * Deliver a command to the controller; allocate controller resources at the 891 * last moment. 892 */ 893static int 894mly_start(struct mly_command *mc) 895{ 896 struct mly_softc *sc = mc->mc_sc; 897 union mly_command_packet *pkt; 898 int s; 899 900 debug_called(2); 901 902 /* 903 * Set the command up for delivery to the controller. 904 */ 905 mly_map_command(mc); 906 mc->mc_packet->generic.command_id = mc->mc_slot; 907 908 s = splcam(); 909 910 /* 911 * Do we have to use the hardware mailbox? 912 */ 913 if (!(sc->mly_state & MLY_STATE_MMBOX_ACTIVE)) { 914 /* 915 * Check to see if the controller is ready for us. 916 */ 917 if (MLY_IDBR_TRUE(sc, MLY_HM_CMDSENT)) { 918 splx(s); 919 return(EBUSY); 920 } 921 mc->mc_flags |= MLY_CMD_BUSY; 922 923 /* 924 * It's ready, send the command. 925 */ 926 MLY_SET_MBOX(sc, sc->mly_command_mailbox, &mc->mc_packetphys); 927 MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_CMDSENT); 928 929 } else { /* use memory-mailbox mode */ 930 931 pkt = &sc->mly_mmbox->mmm_command[sc->mly_mmbox_command_index]; 932 933 /* check to see if the next index is free yet */ 934 if (pkt->mmbox.flag != 0) { 935 splx(s); 936 return(EBUSY); 937 } 938 mc->mc_flags |= MLY_CMD_BUSY; 939 940 /* copy in new command */ 941 bcopy(mc->mc_packet->mmbox.data, pkt->mmbox.data, sizeof(pkt->mmbox.data)); 942 /* barrier to ensure completion of previous write before we write the flag */ 943 bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE); /* tag/handle? */ 944 /* copy flag last */ 945 pkt->mmbox.flag = mc->mc_packet->mmbox.flag; 946 /* barrier to ensure completion of previous write before we notify the controller */ 947 bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE); /* tag/handle */ 948 949 /* signal controller, update index */ 950 MLY_SET_REG(sc, sc->mly_idbr, MLY_AM_CMDSENT); 951 sc->mly_mmbox_command_index = (sc->mly_mmbox_command_index + 1) % MLY_MMBOX_COMMANDS; 952 } 953 954 mly_enqueue_busy(mc); 955 splx(s); 956 return(0); 957} 958 959/******************************************************************************** 960 * Pick up command status from the controller, schedule a completion event 961 */ 962void 963mly_done(struct mly_softc *sc) 964{ 965 struct mly_command *mc; 966 union mly_status_packet *sp; 967 u_int16_t slot; 968 int s, worked; 969 970 s = splcam(); 971 worked = 0; 972 973 /* pick up hardware-mailbox commands */ 974 if (MLY_ODBR_TRUE(sc, MLY_HM_STSREADY)) { 975 slot = MLY_GET_REG2(sc, sc->mly_status_mailbox); 976 if (slot < MLY_SLOT_MAX) { 977 mc = &sc->mly_command[slot - MLY_SLOT_START]; 978 mc->mc_status = MLY_GET_REG(sc, sc->mly_status_mailbox + 2); 979 mc->mc_sense = MLY_GET_REG(sc, sc->mly_status_mailbox + 3); 980 mc->mc_resid = MLY_GET_REG4(sc, sc->mly_status_mailbox + 4); 981 mly_remove_busy(mc); 982 mc->mc_flags &= ~MLY_CMD_BUSY; 983 mly_enqueue_complete(mc); 984 worked = 1; 985 } else { 986 /* slot 0xffff may mean "extremely bogus command" */ 987 mly_printf(sc, "got HM completion for illegal slot %u\n", slot); 988 } 989 /* unconditionally acknowledge status */ 990 MLY_SET_REG(sc, sc->mly_odbr, MLY_HM_STSREADY); 991 MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK); 992 } 993 994 /* pick up memory-mailbox commands */ 995 if (MLY_ODBR_TRUE(sc, MLY_AM_STSREADY)) { 996 for (;;) { 997 sp = &sc->mly_mmbox->mmm_status[sc->mly_mmbox_status_index]; 998 999 /* check for more status */ 1000 if (sp->mmbox.flag == 0) 1001 break; 1002 1003 /* get slot number */ 1004 slot = sp->status.command_id; 1005 if (slot < MLY_SLOT_MAX) { 1006 mc = &sc->mly_command[slot - MLY_SLOT_START]; 1007 mc->mc_status = sp->status.status; 1008 mc->mc_sense = sp->status.sense_length; 1009 mc->mc_resid = sp->status.residue; 1010 mly_remove_busy(mc); 1011 mc->mc_flags &= ~MLY_CMD_BUSY; 1012 mly_enqueue_complete(mc); 1013 worked = 1; 1014 } else { 1015 /* slot 0xffff may mean "extremely bogus command" */ 1016 mly_printf(sc, "got AM completion for illegal slot %u at %d\n", 1017 slot, sc->mly_mmbox_status_index); 1018 } 1019 1020 /* clear and move to next index */ 1021 sp->mmbox.flag = 0; 1022 sc->mly_mmbox_status_index = (sc->mly_mmbox_status_index + 1) % MLY_MMBOX_STATUS; 1023 } 1024 /* acknowledge that we have collected status value(s) */ 1025 MLY_SET_REG(sc, sc->mly_odbr, MLY_AM_STSREADY); 1026 } 1027 1028 splx(s); 1029 if (worked) { 1030#if __FreeBSD_version >= 500005 1031 if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) 1032 taskqueue_enqueue(taskqueue_swi, &sc->mly_task_complete); 1033 else 1034#endif 1035 mly_complete(sc, 0); 1036 } 1037} 1038 1039/******************************************************************************** 1040 * Process completed commands 1041 */ 1042static void 1043mly_complete(void *context, int pending) 1044{ 1045 struct mly_softc *sc = (struct mly_softc *)context; 1046 struct mly_command *mc; 1047 void (* mc_complete)(struct mly_command *mc); 1048 1049 1050 debug_called(2); 1051 1052 /* 1053 * Spin pulling commands off the completed queue and processing them. 1054 */ 1055 while ((mc = mly_dequeue_complete(sc)) != NULL) { 1056 1057 /* 1058 * Free controller resources, mark command complete. 1059 * 1060 * Note that as soon as we mark the command complete, it may be freed 1061 * out from under us, so we need to save the mc_complete field in 1062 * order to later avoid dereferencing mc. (We would not expect to 1063 * have a polling/sleeping consumer with mc_complete != NULL). 1064 */ 1065 mly_unmap_command(mc); 1066 mc_complete = mc->mc_complete; 1067 mc->mc_flags |= MLY_CMD_COMPLETE; 1068 1069 /* 1070 * Call completion handler or wake up sleeping consumer. 1071 */ 1072 if (mc_complete != NULL) { 1073 mc_complete(mc); 1074 } else { 1075 wakeup(mc); 1076 } 1077 } 1078 1079 /* 1080 * We may have freed up controller resources which would allow us 1081 * to push more commands onto the controller, so we check here. 1082 */ 1083 mly_startio(sc); 1084 1085 /* 1086 * The controller may have updated the health status information, 1087 * so check for it here. 1088 * 1089 * Note that we only check for health status after a completed command. It 1090 * might be wise to ping the controller occasionally if it's been idle for 1091 * a while just to check up on it. While a filesystem is mounted, or I/O is 1092 * active this isn't really an issue. 1093 */ 1094 if (sc->mly_mmbox->mmm_health.status.change_counter != sc->mly_event_change) { 1095 sc->mly_event_change = sc->mly_mmbox->mmm_health.status.change_counter; 1096 debug(1, "event change %d, event status update, %d -> %d", sc->mly_event_change, 1097 sc->mly_event_waiting, sc->mly_mmbox->mmm_health.status.next_event); 1098 sc->mly_event_waiting = sc->mly_mmbox->mmm_health.status.next_event; 1099 1100 /* wake up anyone that might be interested in this */ 1101 wakeup(&sc->mly_event_change); 1102 } 1103 if (sc->mly_event_counter != sc->mly_event_waiting) 1104 mly_fetch_event(sc); 1105} 1106 1107/******************************************************************************** 1108 ******************************************************************************** 1109 Command Buffer Management 1110 ******************************************************************************** 1111 ********************************************************************************/ 1112 1113/******************************************************************************** 1114 * Allocate a command. 1115 */ 1116int 1117mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp) 1118{ 1119 struct mly_command *mc; 1120 1121 debug_called(3); 1122 1123 if ((mc = mly_dequeue_free(sc)) == NULL) 1124 return(ENOMEM); 1125 1126 *mcp = mc; 1127 return(0); 1128} 1129 1130/******************************************************************************** 1131 * Release a command back to the freelist. 1132 */ 1133void 1134mly_release_command(struct mly_command *mc) 1135{ 1136 debug_called(3); 1137 1138 /* 1139 * Fill in parts of the command that may cause confusion if 1140 * a consumer doesn't when we are later allocated. 1141 */ 1142 mc->mc_data = NULL; 1143 mc->mc_flags = 0; 1144 mc->mc_complete = NULL; 1145 mc->mc_private = NULL; 1146 1147 /* 1148 * By default, we set up to overwrite the command packet with 1149 * sense information. 1150 */ 1151 mc->mc_packet->generic.sense_buffer_address = mc->mc_packetphys; 1152 mc->mc_packet->generic.maximum_sense_size = sizeof(union mly_command_packet); 1153 1154 mly_enqueue_free(mc); 1155} 1156 1157/******************************************************************************** 1158 * Map helper for command allocation. 1159 */ 1160static void 1161mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1162{ 1163 struct mly_softc *sc = (struct mly_softc *)arg 1164 1165 debug_called(2); 1166 1167 sc->mly_packetphys = segs[0].ds_addr; 1168} 1169 1170/******************************************************************************** 1171 * Allocate and initialise command and packet structures. 1172 */ 1173static int 1174mly_alloc_commands(struct mly_softc *sc) 1175{ 1176 struct mly_command *mc; 1177 int i; 1178 1179 /* 1180 * Allocate enough space for all the command packets in one chunk and 1181 * map them permanently into controller-visible space. 1182 */ 1183 if (bus_dmamem_alloc(sc->mly_packet_dmat, (void **)&sc->mly_packet, 1184 BUS_DMA_NOWAIT, &sc->mly_packetmap)) { 1185 return(ENOMEM); 1186 } 1187 bus_dmamap_load(sc->mly_packet_dmat, sc->mly_packetmap, sc->mly_packet, 1188 MLY_MAXCOMMANDS * sizeof(union mly_command_packet), 1189 mly_alloc_commands_map, sc, 0); 1190 1191 for (i = 0; i < MLY_MAXCOMMANDS; i++) { 1192 mc = &sc->mly_command[i]; 1193 bzero(mc, sizeof(*mc)); 1194 mc->mc_sc = sc; 1195 mc->mc_slot = MLY_SLOT_START + i; 1196 mc->mc_packet = sc->mly_packet + i; 1197 mc->mc_packetphys = sc->mly_packetphys + (i * sizeof(union mly_command_packet)); 1198 if (!bus_dmamap_create(sc->mly_buffer_dmat, 0, &mc->mc_datamap)) 1199 mly_release_command(mc); 1200 } 1201 return(0); 1202} 1203 1204/******************************************************************************** 1205 * Command-mapping helper function - populate this command's s/g table 1206 * with the s/g entries for its data. 1207 */ 1208static void 1209mly_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1210{ 1211 struct mly_command *mc = (struct mly_command *)arg; 1212 struct mly_softc *sc = mc->mc_sc; 1213 struct mly_command_generic *gen = &(mc->mc_packet->generic); 1214 struct mly_sg_entry *sg; 1215 int i, tabofs; 1216 1217 debug_called(3); 1218 1219 /* can we use the transfer structure directly? */ 1220 if (nseg <= 2) { 1221 sg = &gen->transfer.direct.sg[0]; 1222 gen->command_control.extended_sg_table = 0; 1223 } else { 1224 tabofs = ((mc->mc_slot - MLY_SLOT_START) * MLY_MAXSGENTRIES); 1225 sg = sc->mly_sg_table + tabofs; 1226 gen->transfer.indirect.entries[0] = nseg; 1227 gen->transfer.indirect.table_physaddr[0] = sc->mly_sg_busaddr + (tabofs * sizeof(struct mly_sg_entry)); 1228 gen->command_control.extended_sg_table = 1; 1229 } 1230 1231 /* copy the s/g table */ 1232 for (i = 0; i < nseg; i++) { 1233 sg[i].physaddr = segs[i].ds_addr; 1234 sg[i].length = segs[i].ds_len; 1235 } 1236 1237} 1238 1239#if 0 1240/******************************************************************************** 1241 * Command-mapping helper function - save the cdb's physical address. 1242 * 1243 * We don't support 'large' SCSI commands at this time, so this is unused. 1244 */ 1245static void 1246mly_map_command_cdb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1247{ 1248 struct mly_command *mc = (struct mly_command *)arg; 1249 1250 debug_called(3); 1251 1252 /* XXX can we safely assume that a CDB will never cross a page boundary? */ 1253 if ((segs[0].ds_addr % PAGE_SIZE) > 1254 ((segs[0].ds_addr + mc->mc_packet->scsi_large.cdb_length) % PAGE_SIZE)) 1255 panic("cdb crosses page boundary"); 1256 1257 /* fix up fields in the command packet */ 1258 mc->mc_packet->scsi_large.cdb_physaddr = segs[0].ds_addr; 1259} 1260#endif 1261 1262/******************************************************************************** 1263 * Map a command into controller-visible space 1264 */ 1265static void 1266mly_map_command(struct mly_command *mc) 1267{ 1268 struct mly_softc *sc = mc->mc_sc; 1269 1270 debug_called(2); 1271 1272 /* don't map more than once */ 1273 if (mc->mc_flags & MLY_CMD_MAPPED) 1274 return; 1275 1276 /* does the command have a data buffer? */ 1277 if (mc->mc_data != NULL) 1278 bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mc->mc_length, 1279 mly_map_command_sg, mc, 0); 1280 1281 if (mc->mc_flags & MLY_CMD_DATAIN) 1282 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREREAD); 1283 if (mc->mc_flags & MLY_CMD_DATAOUT) 1284 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREWRITE); 1285 1286 mc->mc_flags |= MLY_CMD_MAPPED; 1287} 1288 1289/******************************************************************************** 1290 * Unmap a command from controller-visible space 1291 */ 1292static void 1293mly_unmap_command(struct mly_command *mc) 1294{ 1295 struct mly_softc *sc = mc->mc_sc; 1296 1297 debug_called(2); 1298 1299 if (!(mc->mc_flags & MLY_CMD_MAPPED)) 1300 return; 1301 1302 if (mc->mc_flags & MLY_CMD_DATAIN) 1303 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTREAD); 1304 if (mc->mc_flags & MLY_CMD_DATAOUT) 1305 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTWRITE); 1306 1307 /* does the command have a data buffer? */ 1308 if (mc->mc_data != NULL) 1309 bus_dmamap_unload(sc->mly_buffer_dmat, mc->mc_datamap); 1310 1311 mc->mc_flags &= ~MLY_CMD_MAPPED; 1312} 1313 1314/******************************************************************************** 1315 ******************************************************************************** 1316 Hardware Control 1317 ******************************************************************************** 1318 ********************************************************************************/ 1319 1320/******************************************************************************** 1321 * Handshake with the firmware while the card is being initialised. 1322 */ 1323static int 1324mly_fwhandshake(struct mly_softc *sc) 1325{ 1326 u_int8_t error, param0, param1; 1327 int spinup = 0; 1328 1329 debug_called(1); 1330 1331 /* set HM_STSACK and let the firmware initialise */ 1332 MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK); 1333 DELAY(1000); /* too short? */ 1334 1335 /* if HM_STSACK is still true, the controller is initialising */ 1336 if (!MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) 1337 return(0); 1338 mly_printf(sc, "controller initialisation started\n"); 1339 1340 /* spin waiting for initialisation to finish, or for a message to be delivered */ 1341 while (MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) { 1342 /* check for a message */ 1343 if (MLY_ERROR_VALID(sc)) { 1344 error = MLY_GET_REG(sc, sc->mly_error_status) & ~MLY_MSG_EMPTY; 1345 param0 = MLY_GET_REG(sc, sc->mly_command_mailbox); 1346 param1 = MLY_GET_REG(sc, sc->mly_command_mailbox + 1); 1347 1348 switch(error) { 1349 case MLY_MSG_SPINUP: 1350 if (!spinup) { 1351 mly_printf(sc, "drive spinup in progress\n"); 1352 spinup = 1; /* only print this once (should print drive being spun?) */ 1353 } 1354 break; 1355 case MLY_MSG_RACE_RECOVERY_FAIL: 1356 mly_printf(sc, "mirror race recovery failed, one or more drives offline\n"); 1357 break; 1358 case MLY_MSG_RACE_IN_PROGRESS: 1359 mly_printf(sc, "mirror race recovery in progress\n"); 1360 break; 1361 case MLY_MSG_RACE_ON_CRITICAL: 1362 mly_printf(sc, "mirror race recovery on a critical drive\n"); 1363 break; 1364 case MLY_MSG_PARITY_ERROR: 1365 mly_printf(sc, "FATAL MEMORY PARITY ERROR\n"); 1366 return(ENXIO); 1367 default: 1368 mly_printf(sc, "unknown initialisation code 0x%x\n", error); 1369 } 1370 } 1371 } 1372 return(0); 1373} 1374 1375/******************************************************************************** 1376 ******************************************************************************** 1377 Debugging and Diagnostics 1378 ******************************************************************************** 1379 ********************************************************************************/ 1380 1381/******************************************************************************** 1382 * Print some information about the controller. 1383 */ 1384static void 1385mly_describe_controller(struct mly_softc *sc) 1386{ 1387 struct mly_ioctl_getcontrollerinfo *mi = sc->mly_controllerinfo; 1388 1389 mly_printf(sc, "%16s, %d channel%s, firmware %d.%02d-%d-%02d (%02d%02d%02d%02d), %dMB RAM\n", 1390 mi->controller_name, mi->physical_channels_present, (mi->physical_channels_present) > 1 ? "s" : "", 1391 mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build, /* XXX turn encoding? */ 1392 mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day, 1393 mi->memory_size); 1394 1395 if (bootverbose) { 1396 mly_printf(sc, "%s %s (%x), %dMHz %d-bit %.16s\n", 1397 mly_describe_code(mly_table_oemname, mi->oem_information), 1398 mly_describe_code(mly_table_controllertype, mi->controller_type), mi->controller_type, 1399 mi->interface_speed, mi->interface_width, mi->interface_name); 1400 mly_printf(sc, "%dMB %dMHz %d-bit %s%s%s, cache %dMB\n", 1401 mi->memory_size, mi->memory_speed, mi->memory_width, 1402 mly_describe_code(mly_table_memorytype, mi->memory_type), 1403 mi->memory_parity ? "+parity": "",mi->memory_ecc ? "+ECC": "", 1404 mi->cache_size); 1405 mly_printf(sc, "CPU: %s @ %dMHZ\n", 1406 mly_describe_code(mly_table_cputype, mi->cpu[0].type), mi->cpu[0].speed); 1407 if (mi->l2cache_size != 0) 1408 mly_printf(sc, "%dKB L2 cache\n", mi->l2cache_size); 1409 if (mi->exmemory_size != 0) 1410 mly_printf(sc, "%dMB %dMHz %d-bit private %s%s%s\n", 1411 mi->exmemory_size, mi->exmemory_speed, mi->exmemory_width, 1412 mly_describe_code(mly_table_memorytype, mi->exmemory_type), 1413 mi->exmemory_parity ? "+parity": "",mi->exmemory_ecc ? "+ECC": ""); 1414 mly_printf(sc, "battery backup %s\n", mi->bbu_present ? "present" : "not installed"); 1415 mly_printf(sc, "maximum data transfer %d blocks, maximum sg entries/command %d\n", 1416 mi->maximum_block_count, mi->maximum_sg_entries); 1417 mly_printf(sc, "logical devices present/critical/offline %d/%d/%d\n", 1418 mi->logical_devices_present, mi->logical_devices_critical, mi->logical_devices_offline); 1419 mly_printf(sc, "physical devices present %d\n", 1420 mi->physical_devices_present); 1421 mly_printf(sc, "physical disks present/offline %d/%d\n", 1422 mi->physical_disks_present, mi->physical_disks_offline); 1423 mly_printf(sc, "%d physical channel%s, %d virtual channel%s of %d possible\n", 1424 mi->physical_channels_present, mi->physical_channels_present == 1 ? "" : "s", 1425 mi->virtual_channels_present, mi->virtual_channels_present == 1 ? "" : "s", 1426 mi->virtual_channels_possible); 1427 mly_printf(sc, "%d parallel commands supported\n", mi->maximum_parallel_commands); 1428 mly_printf(sc, "%dMB flash ROM, %d of %d maximum cycles\n", 1429 mi->flash_size, mi->flash_age, mi->flash_maximum_age); 1430 } 1431} 1432 1433#ifdef MLY_DEBUG 1434/******************************************************************************** 1435 * Print some controller state 1436 */ 1437static void 1438mly_printstate(struct mly_softc *sc) 1439{ 1440 mly_printf(sc, "IDBR %02x ODBR %02x ERROR %02x (%x %x %x)\n", 1441 MLY_GET_REG(sc, sc->mly_idbr), 1442 MLY_GET_REG(sc, sc->mly_odbr), 1443 MLY_GET_REG(sc, sc->mly_error_status), 1444 sc->mly_idbr, 1445 sc->mly_odbr, 1446 sc->mly_error_status); 1447 mly_printf(sc, "IMASK %02x ISTATUS %02x\n", 1448 MLY_GET_REG(sc, sc->mly_interrupt_mask), 1449 MLY_GET_REG(sc, sc->mly_interrupt_status)); 1450 mly_printf(sc, "COMMAND %02x %02x %02x %02x %02x %02x %02x %02x\n", 1451 MLY_GET_REG(sc, sc->mly_command_mailbox), 1452 MLY_GET_REG(sc, sc->mly_command_mailbox + 1), 1453 MLY_GET_REG(sc, sc->mly_command_mailbox + 2), 1454 MLY_GET_REG(sc, sc->mly_command_mailbox + 3), 1455 MLY_GET_REG(sc, sc->mly_command_mailbox + 4), 1456 MLY_GET_REG(sc, sc->mly_command_mailbox + 5), 1457 MLY_GET_REG(sc, sc->mly_command_mailbox + 6), 1458 MLY_GET_REG(sc, sc->mly_command_mailbox + 7)); 1459 mly_printf(sc, "STATUS %02x %02x %02x %02x %02x %02x %02x %02x\n", 1460 MLY_GET_REG(sc, sc->mly_status_mailbox), 1461 MLY_GET_REG(sc, sc->mly_status_mailbox + 1), 1462 MLY_GET_REG(sc, sc->mly_status_mailbox + 2), 1463 MLY_GET_REG(sc, sc->mly_status_mailbox + 3), 1464 MLY_GET_REG(sc, sc->mly_status_mailbox + 4), 1465 MLY_GET_REG(sc, sc->mly_status_mailbox + 5), 1466 MLY_GET_REG(sc, sc->mly_status_mailbox + 6), 1467 MLY_GET_REG(sc, sc->mly_status_mailbox + 7)); 1468 mly_printf(sc, " %04x %08x\n", 1469 MLY_GET_REG2(sc, sc->mly_status_mailbox), 1470 MLY_GET_REG4(sc, sc->mly_status_mailbox + 4)); 1471} 1472 1473struct mly_softc *mly_softc0 = NULL; 1474void 1475mly_printstate0(void) 1476{ 1477 if (mly_softc0 != NULL) 1478 mly_printstate(mly_softc0); 1479} 1480 1481/******************************************************************************** 1482 * Print a command 1483 */ 1484static void 1485mly_print_command(struct mly_command *mc) 1486{ 1487 struct mly_softc *sc = mc->mc_sc; 1488 1489 mly_printf(sc, "COMMAND @ %p\n", mc); 1490 mly_printf(sc, " slot %d\n", mc->mc_slot); 1491 mly_printf(sc, " status 0x%x\n", mc->mc_status); 1492 mly_printf(sc, " sense len %d\n", mc->mc_sense); 1493 mly_printf(sc, " resid %d\n", mc->mc_resid); 1494 mly_printf(sc, " packet %p/0x%llx\n", mc->mc_packet, mc->mc_packetphys); 1495 if (mc->mc_packet != NULL) 1496 mly_print_packet(mc); 1497 mly_printf(sc, " data %p/%d\n", mc->mc_data, mc->mc_length); 1498 mly_printf(sc, " flags %b\n", mc->mc_flags, "\20\1busy\2complete\3slotted\4mapped\5datain\6dataout\n"); 1499 mly_printf(sc, " complete %p\n", mc->mc_complete); 1500 mly_printf(sc, " private %p\n", mc->mc_private); 1501} 1502 1503/******************************************************************************** 1504 * Print a command packet 1505 */ 1506static void 1507mly_print_packet(struct mly_command *mc) 1508{ 1509 struct mly_softc *sc = mc->mc_sc; 1510 struct mly_command_generic *ge = (struct mly_command_generic *)mc->mc_packet; 1511 struct mly_command_scsi_small *ss = (struct mly_command_scsi_small *)mc->mc_packet; 1512 struct mly_command_scsi_large *sl = (struct mly_command_scsi_large *)mc->mc_packet; 1513 struct mly_command_ioctl *io = (struct mly_command_ioctl *)mc->mc_packet; 1514 int transfer; 1515 1516 mly_printf(sc, " command_id %d\n", ge->command_id); 1517 mly_printf(sc, " opcode %d\n", ge->opcode); 1518 mly_printf(sc, " command_control fua %d dpo %d est %d dd %s nas %d ddis %d\n", 1519 ge->command_control.force_unit_access, 1520 ge->command_control.disable_page_out, 1521 ge->command_control.extended_sg_table, 1522 (ge->command_control.data_direction == MLY_CCB_WRITE) ? "WRITE" : "READ", 1523 ge->command_control.no_auto_sense, 1524 ge->command_control.disable_disconnect); 1525 mly_printf(sc, " data_size %d\n", ge->data_size); 1526 mly_printf(sc, " sense_buffer_address 0x%llx\n", ge->sense_buffer_address); 1527 mly_printf(sc, " lun %d\n", ge->addr.phys.lun); 1528 mly_printf(sc, " target %d\n", ge->addr.phys.target); 1529 mly_printf(sc, " channel %d\n", ge->addr.phys.channel); 1530 mly_printf(sc, " logical device %d\n", ge->addr.log.logdev); 1531 mly_printf(sc, " controller %d\n", ge->addr.phys.controller); 1532 mly_printf(sc, " timeout %d %s\n", 1533 ge->timeout.value, 1534 (ge->timeout.scale == MLY_TIMEOUT_SECONDS) ? "seconds" : 1535 ((ge->timeout.scale == MLY_TIMEOUT_MINUTES) ? "minutes" : "hours")); 1536 mly_printf(sc, " maximum_sense_size %d\n", ge->maximum_sense_size); 1537 switch(ge->opcode) { 1538 case MDACMD_SCSIPT: 1539 case MDACMD_SCSI: 1540 mly_printf(sc, " cdb length %d\n", ss->cdb_length); 1541 mly_printf(sc, " cdb %*D\n", ss->cdb_length, ss->cdb, " "); 1542 transfer = 1; 1543 break; 1544 case MDACMD_SCSILC: 1545 case MDACMD_SCSILCPT: 1546 mly_printf(sc, " cdb length %d\n", sl->cdb_length); 1547 mly_printf(sc, " cdb 0x%llx\n", sl->cdb_physaddr); 1548 transfer = 1; 1549 break; 1550 case MDACMD_IOCTL: 1551 mly_printf(sc, " sub_ioctl 0x%x\n", io->sub_ioctl); 1552 switch(io->sub_ioctl) { 1553 case MDACIOCTL_SETMEMORYMAILBOX: 1554 mly_printf(sc, " health_buffer_size %d\n", 1555 io->param.setmemorymailbox.health_buffer_size); 1556 mly_printf(sc, " health_buffer_phys 0x%llx\n", 1557 io->param.setmemorymailbox.health_buffer_physaddr); 1558 mly_printf(sc, " command_mailbox 0x%llx\n", 1559 io->param.setmemorymailbox.command_mailbox_physaddr); 1560 mly_printf(sc, " status_mailbox 0x%llx\n", 1561 io->param.setmemorymailbox.status_mailbox_physaddr); 1562 transfer = 0; 1563 break; 1564 1565 case MDACIOCTL_SETREALTIMECLOCK: 1566 case MDACIOCTL_GETHEALTHSTATUS: 1567 case MDACIOCTL_GETCONTROLLERINFO: 1568 case MDACIOCTL_GETLOGDEVINFOVALID: 1569 case MDACIOCTL_GETPHYSDEVINFOVALID: 1570 case MDACIOCTL_GETPHYSDEVSTATISTICS: 1571 case MDACIOCTL_GETLOGDEVSTATISTICS: 1572 case MDACIOCTL_GETCONTROLLERSTATISTICS: 1573 case MDACIOCTL_GETBDT_FOR_SYSDRIVE: 1574 case MDACIOCTL_CREATENEWCONF: 1575 case MDACIOCTL_ADDNEWCONF: 1576 case MDACIOCTL_GETDEVCONFINFO: 1577 case MDACIOCTL_GETFREESPACELIST: 1578 case MDACIOCTL_MORE: 1579 case MDACIOCTL_SETPHYSDEVPARAMETER: 1580 case MDACIOCTL_GETPHYSDEVPARAMETER: 1581 case MDACIOCTL_GETLOGDEVPARAMETER: 1582 case MDACIOCTL_SETLOGDEVPARAMETER: 1583 mly_printf(sc, " param %10D\n", io->param.data.param, " "); 1584 transfer = 1; 1585 break; 1586 1587 case MDACIOCTL_GETEVENT: 1588 mly_printf(sc, " event %d\n", 1589 io->param.getevent.sequence_number_low + ((u_int32_t)io->addr.log.logdev << 16)); 1590 transfer = 1; 1591 break; 1592 1593 case MDACIOCTL_SETRAIDDEVSTATE: 1594 mly_printf(sc, " state %d\n", io->param.setraiddevstate.state); 1595 transfer = 0; 1596 break; 1597 1598 case MDACIOCTL_XLATEPHYSDEVTORAIDDEV: 1599 mly_printf(sc, " raid_device %d\n", io->param.xlatephysdevtoraiddev.raid_device); 1600 mly_printf(sc, " controller %d\n", io->param.xlatephysdevtoraiddev.controller); 1601 mly_printf(sc, " channel %d\n", io->param.xlatephysdevtoraiddev.channel); 1602 mly_printf(sc, " target %d\n", io->param.xlatephysdevtoraiddev.target); 1603 mly_printf(sc, " lun %d\n", io->param.xlatephysdevtoraiddev.lun); 1604 transfer = 0; 1605 break; 1606 1607 case MDACIOCTL_GETGROUPCONFINFO: 1608 mly_printf(sc, " group %d\n", io->param.getgroupconfinfo.group); 1609 transfer = 1; 1610 break; 1611 1612 case MDACIOCTL_GET_SUBSYSTEM_DATA: 1613 case MDACIOCTL_SET_SUBSYSTEM_DATA: 1614 case MDACIOCTL_STARTDISOCVERY: 1615 case MDACIOCTL_INITPHYSDEVSTART: 1616 case MDACIOCTL_INITPHYSDEVSTOP: 1617 case MDACIOCTL_INITRAIDDEVSTART: 1618 case MDACIOCTL_INITRAIDDEVSTOP: 1619 case MDACIOCTL_REBUILDRAIDDEVSTART: 1620 case MDACIOCTL_REBUILDRAIDDEVSTOP: 1621 case MDACIOCTL_MAKECONSISTENTDATASTART: 1622 case MDACIOCTL_MAKECONSISTENTDATASTOP: 1623 case MDACIOCTL_CONSISTENCYCHECKSTART: 1624 case MDACIOCTL_CONSISTENCYCHECKSTOP: 1625 case MDACIOCTL_RESETDEVICE: 1626 case MDACIOCTL_FLUSHDEVICEDATA: 1627 case MDACIOCTL_PAUSEDEVICE: 1628 case MDACIOCTL_UNPAUSEDEVICE: 1629 case MDACIOCTL_LOCATEDEVICE: 1630 case MDACIOCTL_SETMASTERSLAVEMODE: 1631 case MDACIOCTL_DELETERAIDDEV: 1632 case MDACIOCTL_REPLACEINTERNALDEV: 1633 case MDACIOCTL_CLEARCONF: 1634 case MDACIOCTL_GETCONTROLLERPARAMETER: 1635 case MDACIOCTL_SETCONTRLLERPARAMETER: 1636 case MDACIOCTL_CLEARCONFSUSPMODE: 1637 case MDACIOCTL_STOREIMAGE: 1638 case MDACIOCTL_READIMAGE: 1639 case MDACIOCTL_FLASHIMAGES: 1640 case MDACIOCTL_RENAMERAIDDEV: 1641 default: /* no idea what to print */ 1642 transfer = 0; 1643 break; 1644 } 1645 break; 1646 1647 case MDACMD_IOCTLCHECK: 1648 case MDACMD_MEMCOPY: 1649 default: 1650 transfer = 0; 1651 break; /* print nothing */ 1652 } 1653 if (transfer) { 1654 if (ge->command_control.extended_sg_table) { 1655 mly_printf(sc, " sg table 0x%llx/%d\n", 1656 ge->transfer.indirect.table_physaddr[0], ge->transfer.indirect.entries[0]); 1657 } else { 1658 mly_printf(sc, " 0000 0x%llx/%lld\n", 1659 ge->transfer.direct.sg[0].physaddr, ge->transfer.direct.sg[0].length); 1660 mly_printf(sc, " 0001 0x%llx/%lld\n", 1661 ge->transfer.direct.sg[1].physaddr, ge->transfer.direct.sg[1].length); 1662 } 1663 } 1664} 1665 1666/******************************************************************************** 1667 * Panic in a slightly informative fashion 1668 */ 1669static void 1670mly_panic(struct mly_softc *sc, char *reason) 1671{ 1672 mly_printstate(sc); 1673 panic(reason); 1674} 1675#endif 1676 1677/******************************************************************************** 1678 * Print queue statistics, callable from DDB. 1679 */ 1680void 1681mly_print_controller(int controller) 1682{ 1683 struct mly_softc *sc; 1684 1685 if ((sc = devclass_get_softc(devclass_find("mly"), controller)) == NULL) { 1686 printf("mly: controller %d invalid\n", controller); 1687 } else { 1688 device_printf(sc->mly_dev, "queue curr max\n"); 1689 device_printf(sc->mly_dev, "free %04d/%04d\n", 1690 sc->mly_qstat[MLYQ_FREE].q_length, sc->mly_qstat[MLYQ_FREE].q_max); 1691 device_printf(sc->mly_dev, "ready %04d/%04d\n", 1692 sc->mly_qstat[MLYQ_READY].q_length, sc->mly_qstat[MLYQ_READY].q_max); 1693 device_printf(sc->mly_dev, "busy %04d/%04d\n", 1694 sc->mly_qstat[MLYQ_BUSY].q_length, sc->mly_qstat[MLYQ_BUSY].q_max); 1695 device_printf(sc->mly_dev, "complete %04d/%04d\n", 1696 sc->mly_qstat[MLYQ_COMPLETE].q_length, sc->mly_qstat[MLYQ_COMPLETE].q_max); 1697 } 1698} 1699 1700 1701/******************************************************************************** 1702 ******************************************************************************** 1703 Control device interface 1704 ******************************************************************************** 1705 ********************************************************************************/ 1706 1707/******************************************************************************** 1708 * Accept an open operation on the control device. 1709 */ 1710static int 1711mly_user_open(dev_t dev, int flags, int fmt, struct proc *p) 1712{ 1713 int unit = minor(dev); 1714 struct mly_softc *sc = devclass_get_softc(devclass_find("mly"), unit); 1715 1716 sc->mly_state |= MLY_STATE_OPEN; 1717 return(0); 1718} 1719 1720/******************************************************************************** 1721 * Accept the last close on the control device. 1722 */ 1723static int 1724mly_user_close(dev_t dev, int flags, int fmt, struct proc *p) 1725{ 1726 int unit = minor(dev); 1727 struct mly_softc *sc = devclass_get_softc(devclass_find("mly"), unit); 1728 1729 sc->mly_state &= ~MLY_STATE_OPEN; 1730 return (0); 1731} 1732 1733/******************************************************************************** 1734 * Handle controller-specific control operations. 1735 */ 1736static int 1737mly_user_ioctl(dev_t dev, u_long cmd, caddr_t addr, int32_t flag, struct proc *p) 1738{ 1739 struct mly_softc *sc = (struct mly_softc *)dev->si_drv1; 1740 struct mly_user_command *uc = (struct mly_user_command *)addr; 1741 struct mly_user_health *uh = (struct mly_user_health *)addr; 1742 1743 switch(cmd) { 1744 case MLYIO_COMMAND: 1745 return(mly_user_command(sc, uc)); 1746 case MLYIO_HEALTH: 1747 return(mly_user_health(sc, uh)); 1748 default: 1749 return(ENOIOCTL); 1750 } 1751} 1752 1753/******************************************************************************** 1754 * Execute a command passed in from userspace. 1755 * 1756 * The control structure contains the actual command for the controller, as well 1757 * as the user-space data pointer and data size, and an optional sense buffer 1758 * size/pointer. On completion, the data size is adjusted to the command 1759 * residual, and the sense buffer size to the size of the returned sense data. 1760 * 1761 */ 1762static int 1763mly_user_command(struct mly_softc *sc, struct mly_user_command *uc) 1764{ 1765 struct mly_command *mc; 1766 int error, s; 1767 1768 /* allocate a command */ 1769 if (mly_alloc_command(sc, &mc)) { 1770 error = ENOMEM; 1771 goto out; /* XXX Linux version will wait for a command */ 1772 } 1773 1774 /* handle data size/direction */ 1775 mc->mc_length = (uc->DataTransferLength >= 0) ? uc->DataTransferLength : -uc->DataTransferLength; 1776 if (mc->mc_length > 0) { 1777 if ((mc->mc_data = malloc(mc->mc_length, M_DEVBUF, M_NOWAIT)) == NULL) { 1778 error = ENOMEM; 1779 goto out; 1780 } 1781 } 1782 if (uc->DataTransferLength > 0) { 1783 mc->mc_flags |= MLY_CMD_DATAIN; 1784 bzero(mc->mc_data, mc->mc_length); 1785 } 1786 if (uc->DataTransferLength < 0) { 1787 mc->mc_flags |= MLY_CMD_DATAOUT; 1788 if ((error = copyin(uc->DataTransferBuffer, mc->mc_data, mc->mc_length)) != 0) 1789 goto out; 1790 } 1791 1792 /* copy the controller command */ 1793 bcopy(&uc->CommandMailbox, mc->mc_packet, sizeof(uc->CommandMailbox)); 1794 1795 /* clear command completion handler so that we get woken up */ 1796 mc->mc_complete = NULL; 1797 1798 /* execute the command */ 1799 s = splcam(); 1800 mly_requeue_ready(mc); 1801 mly_startio(sc); 1802 while (!(mc->mc_flags & MLY_CMD_COMPLETE)) 1803 tsleep(mc, PRIBIO, "mlyioctl", 0); 1804 splx(s); 1805 1806 /* return the data to userspace */ 1807 if (uc->DataTransferLength > 0) 1808 if ((error = copyout(mc->mc_data, uc->DataTransferBuffer, mc->mc_length)) != 0) 1809 goto out; 1810 1811 /* return the sense buffer to userspace */ 1812 if ((uc->RequestSenseLength > 0) && (mc->mc_sense > 0)) { 1813 if ((error = copyout(mc->mc_packet, uc->RequestSenseBuffer, 1814 min(uc->RequestSenseLength, mc->mc_sense))) != 0) 1815 goto out; 1816 } 1817 1818 /* return command results to userspace (caller will copy out) */ 1819 uc->DataTransferLength = mc->mc_resid; 1820 uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense); 1821 uc->CommandStatus = mc->mc_status; 1822 error = 0; 1823 1824 out: 1825 if (mc->mc_data != NULL) 1826 free(mc->mc_data, M_DEVBUF); 1827 if (mc != NULL) 1828 mly_release_command(mc); 1829 return(error); 1830} 1831 1832/******************************************************************************** 1833 * Return health status to userspace. If the health change index in the user 1834 * structure does not match that currently exported by the controller, we 1835 * return the current status immediately. Otherwise, we block until either 1836 * interrupted or new status is delivered. 1837 */ 1838static int 1839mly_user_health(struct mly_softc *sc, struct mly_user_health *uh) 1840{ 1841 struct mly_health_status mh; 1842 int error, s; 1843 1844 /* fetch the current health status from userspace */ 1845 if ((error = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh))) != 0) 1846 return(error); 1847 1848 /* spin waiting for a status update */ 1849 s = splcam(); 1850 error = EWOULDBLOCK; 1851 while ((error != 0) && (sc->mly_event_change == mh.change_counter)) 1852 error = tsleep(&sc->mly_event_change, PRIBIO | PCATCH, "mlyhealth", 0); 1853 splx(s); 1854 1855 /* copy the controller's health status buffer out (there is a race here if it changes again) */ 1856 error = copyout(&sc->mly_mmbox->mmm_health.status, uh->HealthStatusBuffer, 1857 sizeof(uh->HealthStatusBuffer)); 1858 return(error); 1859} 1860