ata_da.c revision 198897
1/*- 2 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/cam/ata/ata_da.c 198897 2009-11-04 15:24:32Z mav $"); 29 30#include <sys/param.h> 31 32#ifdef _KERNEL 33#include <sys/systm.h> 34#include <sys/kernel.h> 35#include <sys/bio.h> 36#include <sys/sysctl.h> 37#include <sys/taskqueue.h> 38#include <sys/lock.h> 39#include <sys/mutex.h> 40#include <sys/conf.h> 41#include <sys/devicestat.h> 42#include <sys/eventhandler.h> 43#include <sys/malloc.h> 44#include <sys/cons.h> 45#include <geom/geom_disk.h> 46#endif /* _KERNEL */ 47 48#ifndef _KERNEL 49#include <stdio.h> 50#include <string.h> 51#endif /* _KERNEL */ 52 53#include <cam/cam.h> 54#include <cam/cam_ccb.h> 55#include <cam/cam_periph.h> 56#include <cam/cam_xpt_periph.h> 57#include <cam/cam_sim.h> 58 59#include <cam/ata/ata_all.h> 60 61#ifdef _KERNEL 62 63#define ATA_MAX_28BIT_LBA 268435455UL 64 65typedef enum { 66 ADA_STATE_NORMAL 67} ada_state; 68 69typedef enum { 70 ADA_FLAG_PACK_INVALID = 0x001, 71 ADA_FLAG_CAN_48BIT = 0x002, 72 ADA_FLAG_CAN_FLUSHCACHE = 0x004, 73 ADA_FLAG_CAN_NCQ = 0x008, 74 ADA_FLAG_CAN_DMA = 0x010, 75 ADA_FLAG_NEED_OTAG = 0x020, 76 ADA_FLAG_WENT_IDLE = 0x040, 77 ADA_FLAG_OPEN = 0x100, 78 ADA_FLAG_SCTX_INIT = 0x200 79} ada_flags; 80 81typedef enum { 82 ADA_Q_NONE = 0x00 83} ada_quirks; 84 85typedef enum { 86 ADA_CCB_BUFFER_IO = 0x03, 87 ADA_CCB_WAITING = 0x04, 88 ADA_CCB_DUMP = 0x05, 89 ADA_CCB_TYPE_MASK = 0x0F, 90} ada_ccb_state; 91 92/* Offsets into our private area for storing information */ 93#define ccb_state ppriv_field0 94#define ccb_bp ppriv_ptr1 95 96struct disk_params { 97 u_int8_t heads; 98 u_int8_t secs_per_track; 99 u_int32_t cylinders; 100 u_int32_t secsize; /* Number of bytes/logical sector */ 101 u_int64_t sectors; /* Total number sectors */ 102}; 103 104struct ada_softc { 105 struct bio_queue_head bio_queue; 106 ada_state state; 107 ada_flags flags; 108 ada_quirks quirks; 109 int ordered_tag_count; 110 int outstanding_cmds; 111 struct disk_params params; 112 struct disk *disk; 113 union ccb saved_ccb; 114 struct task sysctl_task; 115 struct sysctl_ctx_list sysctl_ctx; 116 struct sysctl_oid *sysctl_tree; 117 struct callout sendordered_c; 118}; 119 120struct ada_quirk_entry { 121 struct scsi_inquiry_pattern inq_pat; 122 ada_quirks quirks; 123}; 124 125//static struct ada_quirk_entry ada_quirk_table[] = 126//{ 127//}; 128 129static disk_strategy_t adastrategy; 130static dumper_t adadump; 131static periph_init_t adainit; 132static void adaasync(void *callback_arg, u_int32_t code, 133 struct cam_path *path, void *arg); 134static void adasysctlinit(void *context, int pending); 135static periph_ctor_t adaregister; 136static periph_dtor_t adacleanup; 137static periph_start_t adastart; 138static periph_oninv_t adaoninvalidate; 139static void adadone(struct cam_periph *periph, 140 union ccb *done_ccb); 141static int adaerror(union ccb *ccb, u_int32_t cam_flags, 142 u_int32_t sense_flags); 143static void adagetparams(struct cam_periph *periph, 144 struct ccb_getdev *cgd); 145static timeout_t adasendorderedtag; 146static void adashutdown(void *arg, int howto); 147 148#ifndef ADA_DEFAULT_TIMEOUT 149#define ADA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */ 150#endif 151 152#ifndef ADA_DEFAULT_RETRY 153#define ADA_DEFAULT_RETRY 4 154#endif 155 156#ifndef ADA_DEFAULT_SEND_ORDERED 157#define ADA_DEFAULT_SEND_ORDERED 1 158#endif 159 160 161static int ada_retry_count = ADA_DEFAULT_RETRY; 162static int ada_default_timeout = ADA_DEFAULT_TIMEOUT; 163static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED; 164 165SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0, 166 "CAM Direct Access Disk driver"); 167SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW, 168 &ada_retry_count, 0, "Normal I/O retry count"); 169TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count); 170SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW, 171 &ada_default_timeout, 0, "Normal I/O timeout (in seconds)"); 172TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout); 173SYSCTL_INT(_kern_cam_ada, OID_AUTO, ada_send_ordered, CTLFLAG_RW, 174 &ada_send_ordered, 0, "Send Ordered Tags"); 175TUNABLE_INT("kern.cam.ada.ada_send_ordered", &ada_send_ordered); 176 177/* 178 * ADA_ORDEREDTAG_INTERVAL determines how often, relative 179 * to the default timeout, we check to see whether an ordered 180 * tagged transaction is appropriate to prevent simple tag 181 * starvation. Since we'd like to ensure that there is at least 182 * 1/2 of the timeout length left for a starved transaction to 183 * complete after we've sent an ordered tag, we must poll at least 184 * four times in every timeout period. This takes care of the worst 185 * case where a starved transaction starts during an interval that 186 * meets the requirement "don't send an ordered tag" test so it takes 187 * us two intervals to determine that a tag must be sent. 188 */ 189#ifndef ADA_ORDEREDTAG_INTERVAL 190#define ADA_ORDEREDTAG_INTERVAL 4 191#endif 192 193static struct periph_driver adadriver = 194{ 195 adainit, "ada", 196 TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0 197}; 198 199PERIPHDRIVER_DECLARE(ada, adadriver); 200 201MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers"); 202 203static int 204adaopen(struct disk *dp) 205{ 206 struct cam_periph *periph; 207 struct ada_softc *softc; 208 int unit; 209 int error; 210 211 periph = (struct cam_periph *)dp->d_drv1; 212 if (periph == NULL) { 213 return (ENXIO); 214 } 215 216 if (cam_periph_acquire(periph) != CAM_REQ_CMP) { 217 return(ENXIO); 218 } 219 220 cam_periph_lock(periph); 221 if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { 222 cam_periph_unlock(periph); 223 cam_periph_release(periph); 224 return (error); 225 } 226 227 unit = periph->unit_number; 228 softc = (struct ada_softc *)periph->softc; 229 softc->flags |= ADA_FLAG_OPEN; 230 231 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 232 ("adaopen: disk=%s%d (unit %d)\n", dp->d_name, dp->d_unit, 233 unit)); 234 235 if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) { 236 /* Invalidate our pack information. */ 237 softc->flags &= ~ADA_FLAG_PACK_INVALID; 238 } 239 240 cam_periph_unhold(periph); 241 cam_periph_unlock(periph); 242 return (0); 243} 244 245static int 246adaclose(struct disk *dp) 247{ 248 struct cam_periph *periph; 249 struct ada_softc *softc; 250 union ccb *ccb; 251 int error; 252 253 periph = (struct cam_periph *)dp->d_drv1; 254 if (periph == NULL) 255 return (ENXIO); 256 257 cam_periph_lock(periph); 258 if ((error = cam_periph_hold(periph, PRIBIO)) != 0) { 259 cam_periph_unlock(periph); 260 cam_periph_release(periph); 261 return (error); 262 } 263 264 softc = (struct ada_softc *)periph->softc; 265 /* We only sync the cache if the drive is capable of it. */ 266 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) { 267 268 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 269 cam_fill_ataio(&ccb->ataio, 270 1, 271 adadone, 272 CAM_DIR_NONE, 273 0, 274 NULL, 275 0, 276 ada_default_timeout*1000); 277 278 if (softc->flags & ADA_FLAG_CAN_48BIT) 279 ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0); 280 else 281 ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0); 282 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0, 283 /*sense_flags*/0, softc->disk->d_devstat); 284 285 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 286 xpt_print(periph->path, "Synchronize cache failed\n"); 287 288 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 289 cam_release_devq(ccb->ccb_h.path, 290 /*relsim_flags*/0, 291 /*reduction*/0, 292 /*timeout*/0, 293 /*getcount_only*/0); 294 xpt_release_ccb(ccb); 295 } 296 297 softc->flags &= ~ADA_FLAG_OPEN; 298 cam_periph_unhold(periph); 299 cam_periph_unlock(periph); 300 cam_periph_release(periph); 301 return (0); 302} 303 304/* 305 * Actually translate the requested transfer into one the physical driver 306 * can understand. The transfer is described by a buf and will include 307 * only one physical transfer. 308 */ 309static void 310adastrategy(struct bio *bp) 311{ 312 struct cam_periph *periph; 313 struct ada_softc *softc; 314 315 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 316 if (periph == NULL) { 317 biofinish(bp, NULL, ENXIO); 318 return; 319 } 320 softc = (struct ada_softc *)periph->softc; 321 322 cam_periph_lock(periph); 323 324 /* 325 * If the device has been made invalid, error out 326 */ 327 if ((softc->flags & ADA_FLAG_PACK_INVALID)) { 328 cam_periph_unlock(periph); 329 biofinish(bp, NULL, ENXIO); 330 return; 331 } 332 333 /* 334 * Place it in the queue of disk activities for this disk 335 */ 336 bioq_disksort(&softc->bio_queue, bp); 337 338 /* 339 * Schedule ourselves for performing the work. 340 */ 341 xpt_schedule(periph, CAM_PRIORITY_NORMAL); 342 cam_periph_unlock(periph); 343 344 return; 345} 346 347static int 348adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) 349{ 350 struct cam_periph *periph; 351 struct ada_softc *softc; 352 u_int secsize; 353 union ccb ccb; 354 struct disk *dp; 355 uint64_t lba; 356 uint16_t count; 357 358 dp = arg; 359 periph = dp->d_drv1; 360 if (periph == NULL) 361 return (ENXIO); 362 softc = (struct ada_softc *)periph->softc; 363 cam_periph_lock(periph); 364 secsize = softc->params.secsize; 365 lba = offset / secsize; 366 count = length / secsize; 367 368 if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) { 369 cam_periph_unlock(periph); 370 return (ENXIO); 371 } 372 373 if (length > 0) { 374 periph->flags |= CAM_PERIPH_POLLED; 375 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 376 ccb.ccb_h.ccb_state = ADA_CCB_DUMP; 377 cam_fill_ataio(&ccb.ataio, 378 0, 379 adadone, 380 CAM_DIR_OUT, 381 0, 382 (u_int8_t *) virtual, 383 length, 384 ada_default_timeout*1000); 385 if ((softc->flags & ADA_FLAG_CAN_48BIT) && 386 (lba + count >= ATA_MAX_28BIT_LBA || 387 count >= 256)) { 388 ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48, 389 0, lba, count); 390 } else { 391 ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA, 392 0, lba, count); 393 } 394 xpt_polled_action(&ccb); 395 396 if ((ccb.ataio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 397 printf("Aborting dump due to I/O error.\n"); 398 cam_periph_unlock(periph); 399 return(EIO); 400 } 401 cam_periph_unlock(periph); 402 return(0); 403 } 404 405 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) { 406 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 407 408 ccb.ccb_h.ccb_state = ADA_CCB_DUMP; 409 cam_fill_ataio(&ccb.ataio, 410 1, 411 adadone, 412 CAM_DIR_NONE, 413 0, 414 NULL, 415 0, 416 ada_default_timeout*1000); 417 418 if (softc->flags & ADA_FLAG_CAN_48BIT) 419 ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0); 420 else 421 ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0); 422 xpt_polled_action(&ccb); 423 424 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 425 xpt_print(periph->path, "Synchronize cache failed\n"); 426 427 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) 428 cam_release_devq(ccb.ccb_h.path, 429 /*relsim_flags*/0, 430 /*reduction*/0, 431 /*timeout*/0, 432 /*getcount_only*/0); 433 } 434 periph->flags &= ~CAM_PERIPH_POLLED; 435 cam_periph_unlock(periph); 436 return (0); 437} 438 439static void 440adainit(void) 441{ 442 cam_status status; 443 444 /* 445 * Install a global async callback. This callback will 446 * receive async callbacks like "new device found". 447 */ 448 status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL); 449 450 if (status != CAM_REQ_CMP) { 451 printf("ada: Failed to attach master async callback " 452 "due to status 0x%x!\n", status); 453 } else if (ada_send_ordered) { 454 455 /* Register our shutdown event handler */ 456 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown, 457 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) 458 printf("adainit: shutdown event registration failed!\n"); 459 } 460} 461 462static void 463adaoninvalidate(struct cam_periph *periph) 464{ 465 struct ada_softc *softc; 466 467 softc = (struct ada_softc *)periph->softc; 468 469 /* 470 * De-register any async callbacks. 471 */ 472 xpt_register_async(0, adaasync, periph, periph->path); 473 474 softc->flags |= ADA_FLAG_PACK_INVALID; 475 476 /* 477 * Return all queued I/O with ENXIO. 478 * XXX Handle any transactions queued to the card 479 * with XPT_ABORT_CCB. 480 */ 481 bioq_flush(&softc->bio_queue, NULL, ENXIO); 482 483 disk_gone(softc->disk); 484 xpt_print(periph->path, "lost device\n"); 485} 486 487static void 488adacleanup(struct cam_periph *periph) 489{ 490 struct ada_softc *softc; 491 492 softc = (struct ada_softc *)periph->softc; 493 494 xpt_print(periph->path, "removing device entry\n"); 495 cam_periph_unlock(periph); 496 497 /* 498 * If we can't free the sysctl tree, oh well... 499 */ 500 if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0 501 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) { 502 xpt_print(periph->path, "can't remove sysctl context\n"); 503 } 504 505 disk_destroy(softc->disk); 506 callout_drain(&softc->sendordered_c); 507 free(softc, M_DEVBUF); 508 cam_periph_lock(periph); 509} 510 511static void 512adaasync(void *callback_arg, u_int32_t code, 513 struct cam_path *path, void *arg) 514{ 515 struct cam_periph *periph; 516 517 periph = (struct cam_periph *)callback_arg; 518 switch (code) { 519 case AC_FOUND_DEVICE: 520 { 521 struct ccb_getdev *cgd; 522 cam_status status; 523 524 cgd = (struct ccb_getdev *)arg; 525 if (cgd == NULL) 526 break; 527 528 if (cgd->protocol != PROTO_ATA) 529 break; 530 531 /* 532 * Allocate a peripheral instance for 533 * this device and start the probe 534 * process. 535 */ 536 status = cam_periph_alloc(adaregister, adaoninvalidate, 537 adacleanup, adastart, 538 "ada", CAM_PERIPH_BIO, 539 cgd->ccb_h.path, adaasync, 540 AC_FOUND_DEVICE, cgd); 541 542 if (status != CAM_REQ_CMP 543 && status != CAM_REQ_INPROG) 544 printf("adaasync: Unable to attach to new device " 545 "due to status 0x%x\n", status); 546 break; 547 } 548 default: 549 cam_periph_async(periph, code, path, arg); 550 break; 551 } 552} 553 554static void 555adasysctlinit(void *context, int pending) 556{ 557 struct cam_periph *periph; 558 struct ada_softc *softc; 559 char tmpstr[80], tmpstr2[80]; 560 561 periph = (struct cam_periph *)context; 562 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 563 return; 564 565 softc = (struct ada_softc *)periph->softc; 566 snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number); 567 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); 568 569 sysctl_ctx_init(&softc->sysctl_ctx); 570 softc->flags |= ADA_FLAG_SCTX_INIT; 571 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 572 SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2, 573 CTLFLAG_RD, 0, tmpstr); 574 if (softc->sysctl_tree == NULL) { 575 printf("adasysctlinit: unable to allocate sysctl tree\n"); 576 cam_periph_release(periph); 577 return; 578 } 579 580 cam_periph_release(periph); 581} 582 583static cam_status 584adaregister(struct cam_periph *periph, void *arg) 585{ 586 struct ada_softc *softc; 587 struct ccb_pathinq cpi; 588 struct ccb_getdev *cgd; 589 char announce_buf[80]; 590 struct disk_params *dp; 591 caddr_t match; 592 u_int maxio; 593 594 cgd = (struct ccb_getdev *)arg; 595 if (periph == NULL) { 596 printf("adaregister: periph was NULL!!\n"); 597 return(CAM_REQ_CMP_ERR); 598 } 599 600 if (cgd == NULL) { 601 printf("adaregister: no getdev CCB, can't register device\n"); 602 return(CAM_REQ_CMP_ERR); 603 } 604 605 softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF, 606 M_NOWAIT|M_ZERO); 607 608 if (softc == NULL) { 609 printf("adaregister: Unable to probe new device. " 610 "Unable to allocate softc\n"); 611 return(CAM_REQ_CMP_ERR); 612 } 613 614 bioq_init(&softc->bio_queue); 615 616 if (cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) 617 softc->flags |= ADA_FLAG_CAN_DMA; 618 if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) 619 softc->flags |= ADA_FLAG_CAN_48BIT; 620 if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE) 621 softc->flags |= ADA_FLAG_CAN_FLUSHCACHE; 622 if (cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ && 623 cgd->ident_data.queue >= 31) 624 softc->flags |= ADA_FLAG_CAN_NCQ; 625 softc->state = ADA_STATE_NORMAL; 626 627 periph->softc = softc; 628 629 /* 630 * See if this device has any quirks. 631 */ 632// match = cam_quirkmatch((caddr_t)&cgd->inq_data, 633// (caddr_t)ada_quirk_table, 634// sizeof(ada_quirk_table)/sizeof(*ada_quirk_table), 635// sizeof(*ada_quirk_table), scsi_inquiry_match); 636 match = NULL; 637 638 if (match != NULL) 639 softc->quirks = ((struct ada_quirk_entry *)match)->quirks; 640 else 641 softc->quirks = ADA_Q_NONE; 642 643 /* Check if the SIM does not want queued commands */ 644 bzero(&cpi, sizeof(cpi)); 645 xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 646 cpi.ccb_h.func_code = XPT_PATH_INQ; 647 xpt_action((union ccb *)&cpi); 648 if (cpi.ccb_h.status != CAM_REQ_CMP || 649 (cpi.hba_inquiry & PI_TAG_ABLE) == 0) 650 softc->flags &= ~ADA_FLAG_CAN_NCQ; 651 652 TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph); 653 654 /* 655 * Register this media as a disk 656 */ 657 mtx_unlock(periph->sim->mtx); 658 adagetparams(periph, cgd); 659 softc->disk = disk_alloc(); 660 softc->disk->d_open = adaopen; 661 softc->disk->d_close = adaclose; 662 softc->disk->d_strategy = adastrategy; 663 softc->disk->d_dump = adadump; 664 softc->disk->d_name = "ada"; 665 softc->disk->d_drv1 = periph; 666 maxio = cpi.maxio; /* Honor max I/O size of SIM */ 667 if (maxio == 0) 668 maxio = DFLTPHYS; /* traditional default */ 669 else if (maxio > MAXPHYS) 670 maxio = MAXPHYS; /* for safety */ 671 if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) 672 maxio = min(maxio, 65536 * softc->params.secsize); 673 else /* 28bit ATA command limit */ 674 maxio = min(maxio, 256 * softc->params.secsize); 675 softc->disk->d_maxsize = maxio; 676 softc->disk->d_unit = periph->unit_number; 677 softc->disk->d_flags = 0; 678 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) 679 softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; 680 strlcpy(softc->disk->d_ident, cgd->serial_num, 681 MIN(sizeof(softc->disk->d_ident), cgd->serial_num_len + 1)); 682 683 softc->disk->d_sectorsize = softc->params.secsize; 684 softc->disk->d_mediasize = (off_t)softc->params.sectors * 685 softc->params.secsize; 686 softc->disk->d_stripesize = ata_physical_sector_size(&cgd->ident_data); 687 softc->disk->d_stripeoffset = softc->disk->d_stripesize - 688 ata_logical_sector_offset(&cgd->ident_data); 689 /* XXX: these are not actually "firmware" values, so they may be wrong */ 690 softc->disk->d_fwsectors = softc->params.secs_per_track; 691 softc->disk->d_fwheads = softc->params.heads; 692 693 disk_create(softc->disk, DISK_VERSION); 694 mtx_lock(periph->sim->mtx); 695 696 dp = &softc->params; 697 snprintf(announce_buf, sizeof(announce_buf), 698 "%juMB (%ju %u byte sectors: %dH %dS/T %dC)", 699 (uintmax_t)(((uintmax_t)dp->secsize * 700 dp->sectors) / (1024*1024)), 701 (uintmax_t)dp->sectors, 702 dp->secsize, dp->heads, 703 dp->secs_per_track, dp->cylinders); 704 xpt_announce_periph(periph, announce_buf); 705 if (softc->flags & ADA_FLAG_CAN_NCQ) { 706 printf("%s%d: Native Command Queueing enabled\n", 707 periph->periph_name, periph->unit_number); 708 } 709 710 /* 711 * Add async callbacks for bus reset and 712 * bus device reset calls. I don't bother 713 * checking if this fails as, in most cases, 714 * the system will function just fine without 715 * them and the only alternative would be to 716 * not attach the device on failure. 717 */ 718 xpt_register_async(AC_LOST_DEVICE, 719 adaasync, periph, periph->path); 720 721 /* 722 * Schedule a periodic event to occasionally send an 723 * ordered tag to a device. 724 */ 725 callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0); 726 callout_reset(&softc->sendordered_c, 727 (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL, 728 adasendorderedtag, softc); 729 730 return(CAM_REQ_CMP); 731} 732 733static void 734adastart(struct cam_periph *periph, union ccb *start_ccb) 735{ 736 struct ada_softc *softc = (struct ada_softc *)periph->softc; 737 struct ccb_ataio *ataio = &start_ccb->ataio; 738 739 switch (softc->state) { 740 case ADA_STATE_NORMAL: 741 { 742 /* Pull a buffer from the queue and get going on it */ 743 struct bio *bp; 744 745 /* 746 * See if there is a buf with work for us to do.. 747 */ 748 bp = bioq_first(&softc->bio_queue); 749 if (periph->immediate_priority <= periph->pinfo.priority) { 750 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 751 ("queuing for immediate ccb\n")); 752 start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING; 753 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 754 periph_links.sle); 755 periph->immediate_priority = CAM_PRIORITY_NONE; 756 wakeup(&periph->ccb_list); 757 } else if (bp == NULL) { 758 xpt_release_ccb(start_ccb); 759 } else { 760 u_int8_t tag_code; 761 762 bioq_remove(&softc->bio_queue, bp); 763 764 if ((softc->flags & ADA_FLAG_NEED_OTAG) != 0) { 765 softc->flags &= ~ADA_FLAG_NEED_OTAG; 766 softc->ordered_tag_count++; 767 tag_code = 0; 768 } else { 769 tag_code = 1; 770 } 771 switch (bp->bio_cmd) { 772 case BIO_READ: 773 case BIO_WRITE: 774 { 775 uint64_t lba = bp->bio_pblkno; 776 uint16_t count = bp->bio_bcount / softc->params.secsize; 777 778 cam_fill_ataio(ataio, 779 ada_retry_count, 780 adadone, 781 bp->bio_cmd == BIO_READ ? 782 CAM_DIR_IN : CAM_DIR_OUT, 783 tag_code, 784 bp->bio_data, 785 bp->bio_bcount, 786 ada_default_timeout*1000); 787 788 if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) { 789 if (bp->bio_cmd == BIO_READ) { 790 ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED, 791 lba, count); 792 } else { 793 ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED, 794 lba, count); 795 } 796 } else if ((softc->flags & ADA_FLAG_CAN_48BIT) && 797 (lba + count >= ATA_MAX_28BIT_LBA || 798 count > 256)) { 799 if (softc->flags & ADA_FLAG_CAN_DMA) { 800 if (bp->bio_cmd == BIO_READ) { 801 ata_48bit_cmd(ataio, ATA_READ_DMA48, 802 0, lba, count); 803 } else { 804 ata_48bit_cmd(ataio, ATA_WRITE_DMA48, 805 0, lba, count); 806 } 807 } else { 808 if (bp->bio_cmd == BIO_READ) { 809 ata_48bit_cmd(ataio, ATA_READ_MUL48, 810 0, lba, count); 811 } else { 812 ata_48bit_cmd(ataio, ATA_WRITE_MUL48, 813 0, lba, count); 814 } 815 } 816 } else { 817 if (count == 256) 818 count = 0; 819 if (softc->flags & ADA_FLAG_CAN_DMA) { 820 if (bp->bio_cmd == BIO_READ) { 821 ata_28bit_cmd(ataio, ATA_READ_DMA, 822 0, lba, count); 823 } else { 824 ata_28bit_cmd(ataio, ATA_WRITE_DMA, 825 0, lba, count); 826 } 827 } else { 828 if (bp->bio_cmd == BIO_READ) { 829 ata_28bit_cmd(ataio, ATA_READ_MUL, 830 0, lba, count); 831 } else { 832 ata_28bit_cmd(ataio, ATA_WRITE_MUL, 833 0, lba, count); 834 } 835 } 836 } 837 } 838 break; 839 case BIO_FLUSH: 840 cam_fill_ataio(ataio, 841 1, 842 adadone, 843 CAM_DIR_NONE, 844 0, 845 NULL, 846 0, 847 ada_default_timeout*1000); 848 849 if (softc->flags & ADA_FLAG_CAN_48BIT) 850 ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0); 851 else 852 ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0); 853 break; 854 } 855 start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO; 856 start_ccb->ccb_h.ccb_bp = bp; 857 softc->outstanding_cmds++; 858 xpt_action(start_ccb); 859 bp = bioq_first(&softc->bio_queue); 860 } 861 862 if (bp != NULL) { 863 /* Have more work to do, so ensure we stay scheduled */ 864 xpt_schedule(periph, CAM_PRIORITY_NORMAL); 865 } 866 break; 867 } 868 } 869} 870 871static void 872adadone(struct cam_periph *periph, union ccb *done_ccb) 873{ 874 struct ada_softc *softc; 875 struct ccb_ataio *ataio; 876 877 softc = (struct ada_softc *)periph->softc; 878 ataio = &done_ccb->ataio; 879 switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) { 880 case ADA_CCB_BUFFER_IO: 881 { 882 struct bio *bp; 883 884 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 885 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 886 int error; 887 888 error = adaerror(done_ccb, 0, 0); 889 if (error == ERESTART) { 890 /* A retry was scheduled, so just return. */ 891 return; 892 } 893 if (error != 0) { 894 if (error == ENXIO) { 895 /* 896 * Catastrophic error. Mark our pack as 897 * invalid. 898 */ 899 /* 900 * XXX See if this is really a media 901 * XXX change first? 902 */ 903 xpt_print(periph->path, 904 "Invalidating pack\n"); 905 softc->flags |= ADA_FLAG_PACK_INVALID; 906 } 907 908 /* 909 * return all queued I/O with EIO, so that 910 * the client can retry these I/Os in the 911 * proper order should it attempt to recover. 912 */ 913 bioq_flush(&softc->bio_queue, NULL, EIO); 914 bp->bio_error = error; 915 bp->bio_resid = bp->bio_bcount; 916 bp->bio_flags |= BIO_ERROR; 917 } else { 918 bp->bio_resid = ataio->resid; 919 bp->bio_error = 0; 920 if (bp->bio_resid != 0) 921 bp->bio_flags |= BIO_ERROR; 922 } 923 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 924 cam_release_devq(done_ccb->ccb_h.path, 925 /*relsim_flags*/0, 926 /*reduction*/0, 927 /*timeout*/0, 928 /*getcount_only*/0); 929 } else { 930 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 931 panic("REQ_CMP with QFRZN"); 932 bp->bio_resid = ataio->resid; 933 if (ataio->resid > 0) 934 bp->bio_flags |= BIO_ERROR; 935 } 936 softc->outstanding_cmds--; 937 if (softc->outstanding_cmds == 0) 938 softc->flags |= ADA_FLAG_WENT_IDLE; 939 940 biodone(bp); 941 break; 942 } 943 case ADA_CCB_WAITING: 944 { 945 /* Caller will release the CCB */ 946 wakeup(&done_ccb->ccb_h.cbfcnp); 947 return; 948 } 949 case ADA_CCB_DUMP: 950 /* No-op. We're polling */ 951 return; 952 default: 953 break; 954 } 955 xpt_release_ccb(done_ccb); 956} 957 958static int 959adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 960{ 961 struct ada_softc *softc; 962 struct cam_periph *periph; 963 964 periph = xpt_path_periph(ccb->ccb_h.path); 965 softc = (struct ada_softc *)periph->softc; 966 967 return(cam_periph_error(ccb, cam_flags, sense_flags, 968 &softc->saved_ccb)); 969} 970 971static void 972adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd) 973{ 974 struct ada_softc *softc = (struct ada_softc *)periph->softc; 975 struct disk_params *dp = &softc->params; 976 u_int64_t lbasize48; 977 u_int32_t lbasize; 978 979 dp->secsize = ata_logical_sector_size(&cgd->ident_data); 980 if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) && 981 cgd->ident_data.current_heads && cgd->ident_data.current_sectors) { 982 dp->heads = cgd->ident_data.current_heads; 983 dp->secs_per_track = cgd->ident_data.current_sectors; 984 dp->cylinders = cgd->ident_data.cylinders; 985 dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 | 986 ((u_int32_t)cgd->ident_data.current_size_2 << 16); 987 } else { 988 dp->heads = cgd->ident_data.heads; 989 dp->secs_per_track = cgd->ident_data.sectors; 990 dp->cylinders = cgd->ident_data.cylinders; 991 dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track; 992 } 993 lbasize = (u_int32_t)cgd->ident_data.lba_size_1 | 994 ((u_int32_t)cgd->ident_data.lba_size_2 << 16); 995 996 /* use the 28bit LBA size if valid or bigger than the CHS mapping */ 997 if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize) 998 dp->sectors = lbasize; 999 1000 /* use the 48bit LBA size if valid */ 1001 lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) | 1002 ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) | 1003 ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) | 1004 ((u_int64_t)cgd->ident_data.lba_size48_4 << 48); 1005 if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) && 1006 lbasize48 > ATA_MAX_28BIT_LBA) 1007 dp->sectors = lbasize48; 1008} 1009 1010static void 1011adasendorderedtag(void *arg) 1012{ 1013 struct ada_softc *softc = arg; 1014 1015 if (ada_send_ordered) { 1016 if ((softc->ordered_tag_count == 0) 1017 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) { 1018 softc->flags |= ADA_FLAG_NEED_OTAG; 1019 } 1020 if (softc->outstanding_cmds > 0) 1021 softc->flags &= ~ADA_FLAG_WENT_IDLE; 1022 1023 softc->ordered_tag_count = 0; 1024 } 1025 /* Queue us up again */ 1026 callout_reset(&softc->sendordered_c, 1027 (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL, 1028 adasendorderedtag, softc); 1029} 1030 1031/* 1032 * Step through all ADA peripheral drivers, and if the device is still open, 1033 * sync the disk cache to physical media. 1034 */ 1035static void 1036adashutdown(void * arg, int howto) 1037{ 1038 struct cam_periph *periph; 1039 struct ada_softc *softc; 1040 1041 TAILQ_FOREACH(periph, &adadriver.units, unit_links) { 1042 union ccb ccb; 1043 1044 cam_periph_lock(periph); 1045 softc = (struct ada_softc *)periph->softc; 1046 /* 1047 * We only sync the cache if the drive is still open, and 1048 * if the drive is capable of it.. 1049 */ 1050 if (((softc->flags & ADA_FLAG_OPEN) == 0) || 1051 (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) { 1052 cam_periph_unlock(periph); 1053 continue; 1054 } 1055 1056 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1057 1058 ccb.ccb_h.ccb_state = ADA_CCB_DUMP; 1059 cam_fill_ataio(&ccb.ataio, 1060 1, 1061 adadone, 1062 CAM_DIR_NONE, 1063 0, 1064 NULL, 1065 0, 1066 ada_default_timeout*1000); 1067 1068 if (softc->flags & ADA_FLAG_CAN_48BIT) 1069 ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0); 1070 else 1071 ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0); 1072 xpt_polled_action(&ccb); 1073 1074 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 1075 xpt_print(periph->path, "Synchronize cache failed\n"); 1076 1077 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) 1078 cam_release_devq(ccb.ccb_h.path, 1079 /*relsim_flags*/0, 1080 /*reduction*/0, 1081 /*timeout*/0, 1082 /*getcount_only*/0); 1083 cam_periph_unlock(periph); 1084 } 1085} 1086 1087#endif /* _KERNEL */ 1088