scsi_sa.c revision 41297
1/* 2 * Implementation of SCSI Sequential Access Peripheral driver for CAM. 3 * 4 * Copyright (c) 1997 Justin T. Gibbs 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $Id: scsi_sa.c,v 1.4 1998/10/22 22:16:56 ken Exp $ 29 */ 30 31#include <sys/param.h> 32#include <sys/queue.h> 33#ifdef KERNEL 34#include <sys/systm.h> 35#include <sys/kernel.h> 36#endif 37#include <sys/types.h> 38#include <sys/buf.h> 39#include <sys/malloc.h> 40#include <sys/mtio.h> 41#include <sys/conf.h> 42#include <sys/buf.h> 43#include <sys/devicestat.h> 44#include <machine/limits.h> 45 46#ifndef KERNEL 47#include <stdio.h> 48#include <string.h> 49#endif 50 51#include <cam/cam.h> 52#include <cam/cam_ccb.h> 53#include <cam/cam_extend.h> 54#include <cam/cam_periph.h> 55#include <cam/cam_xpt_periph.h> 56#include <cam/cam_debug.h> 57 58#include <cam/scsi/scsi_all.h> 59#include <cam/scsi/scsi_message.h> 60#include <cam/scsi/scsi_sa.h> 61 62#ifdef KERNEL 63 64#include <opt_sa.h> 65 66#ifndef SA_SPACE_TIMEOUT 67#define SA_SPACE_TIMEOUT 1 * 60 68#endif 69#ifndef SA_REWIND_TIMEOUT 70#define SA_REWIND_TIMEOUT 2 * 60 71#endif 72#ifndef SA_ERASE_TIMEOUT 73#define SA_ERASE_TIMEOUT 4 * 60 74#endif 75 76#define SAUNIT(DEV) ((minor(DEV)&0xF0) >> 4) /* 4 bit unit. */ 77#define SASETUNIT(DEV, U) makedev(major(DEV), ((U) << 4)) 78 79typedef enum { 80 SA_STATE_NORMAL 81} sa_state; 82 83typedef enum { 84 SA_CCB_BUFFER_IO, 85 SA_CCB_WAITING 86} sa_ccb_types; 87 88#define ccb_type ppriv_field0 89#define ccb_bp ppriv_ptr1 90 91typedef enum { 92 SA_FLAG_OPEN = 0x0001, 93 SA_FLAG_FIXED = 0x0002, 94 SA_FLAG_TAPE_LOCKED = 0x0004, 95 SA_FLAG_TAPE_MOUNTED = 0x0008, 96 SA_FLAG_TAPE_WP = 0x0010, 97 SA_FLAG_TAPE_WRITTEN = 0x0020, 98 SA_FLAG_2FM_AT_EOD = 0x0040, 99 SA_FLAG_EOM_PENDING = 0x0080, 100 SA_FLAG_EIO_PENDING = 0x0100, 101 SA_FLAG_EOF_PENDING = 0x0200, 102 SA_FLAG_ERR_PENDING = (SA_FLAG_EOM_PENDING|SA_FLAG_EIO_PENDING| 103 SA_FLAG_EOF_PENDING), 104 SA_FLAG_INVALID = 0x0400, 105 SA_FLAG_COMP_ENABLED = 0x0800, 106 SA_FLAG_COMP_UNSUPP = 0x1000 107} sa_flags; 108 109typedef enum { 110 SA_MODE_REWIND = 0x00, 111 SA_MODE_NOREWIND = 0x01, 112 SA_MODE_OFFLINE = 0x02 113} sa_mode; 114 115typedef enum { 116 SA_PARAM_NONE = 0x00, 117 SA_PARAM_BLOCKSIZE = 0x01, 118 SA_PARAM_DENSITY = 0x02, 119 SA_PARAM_COMPRESSION = 0x04, 120 SA_PARAM_BUFF_MODE = 0x08, 121 SA_PARAM_NUMBLOCKS = 0x10, 122 SA_PARAM_WP = 0x20, 123 SA_PARAM_SPEED = 0x40, 124 SA_PARAM_ALL = 0x7f 125} sa_params; 126 127typedef enum { 128 SA_QUIRK_NONE = 0x00, 129 SA_QUIRK_NOCOMP = 0x01 130} sa_quirks; 131 132struct sa_softc { 133 sa_state state; 134 sa_flags flags; 135 sa_quirks quirks; 136 struct buf_queue_head buf_queue; 137 struct devstat device_stats; 138 int blk_gran; 139 int blk_mask; 140 int blk_shift; 141 u_int32_t max_blk; 142 u_int32_t min_blk; 143 u_int8_t media_density; 144 u_int32_t media_blksize; 145 u_int32_t media_numblks; 146 u_int32_t comp_algorithm; 147 u_int32_t saved_comp_algorithm; 148 u_int8_t speed; 149 int buffer_mode; 150 int filemarks; 151 union ccb saved_ccb; 152}; 153 154struct sa_quirk_entry { 155 struct scsi_inquiry_pattern inq_pat; 156 sa_quirks quirks; 157}; 158 159static struct sa_quirk_entry sa_quirk_table[] = 160{ 161 { 162 { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", 163 "Python 25601*", "*"}, /*quirks*/SA_QUIRK_NOCOMP 164 } 165}; 166 167static d_open_t saopen; 168static d_read_t saread; 169static d_write_t sawrite; 170static d_close_t saclose; 171static d_strategy_t sastrategy; 172static d_ioctl_t saioctl; 173static periph_init_t sainit; 174static periph_ctor_t saregister; 175static periph_oninv_t saoninvalidate; 176static periph_dtor_t sacleanup; 177static periph_start_t sastart; 178static void saasync(void *callback_arg, u_int32_t code, 179 struct cam_path *path, void *arg); 180static void sadone(struct cam_periph *periph, 181 union ccb *start_ccb); 182static int saerror(union ccb *ccb, u_int32_t cam_flags, 183 u_int32_t sense_flags); 184static int sacheckeod(struct cam_periph *periph); 185static int sagetparams(struct cam_periph *periph, 186 sa_params params_to_get, 187 u_int32_t *blocksize, u_int8_t *density, 188 u_int32_t *numblocks, int *buff_mode, 189 u_int8_t *write_protect, u_int8_t *speed, 190 int *comp_supported, int *comp_enabled, 191 u_int32_t *comp_algorithm, 192 struct scsi_data_compression_page *comp_page); 193static int sasetparams(struct cam_periph *periph, 194 sa_params params_to_set, 195 u_int32_t blocksize, u_int8_t density, 196 u_int32_t comp_algorithm); 197static void saprevent(struct cam_periph *periph, int action); 198static int sarewind(struct cam_periph *periph); 199static int saspace(struct cam_periph *periph, int count, 200 scsi_space_code code); 201static int samount(struct cam_periph *periph); 202static int saretension(struct cam_periph *periph); 203static int sareservereleaseunit(struct cam_periph *periph, 204 int reserve); 205static int saloadunload(struct cam_periph *periph, int load); 206static int saerase(struct cam_periph *periph, int longerase); 207static int sawritefilemarks(struct cam_periph *periph, 208 int nmarks, int setmarks); 209 210static struct periph_driver sadriver = 211{ 212 sainit, "sa", 213 TAILQ_HEAD_INITIALIZER(sadriver.units), /* generation */ 0 214}; 215 216DATA_SET(periphdriver_set, sadriver); 217 218#define SAUNIT(DEV) ((minor(DEV)&0xF0) >> 4) /* 4 bit unit. */ 219#define SASETUNIT(DEV, U) makedev(major(DEV), ((U) << 4)) 220 221#define SAMODE(z) ((minor(z) & 0x03)) 222#define SADENSITY(z) (((minor(z) >> 2) & 0x03)) 223 224/* For 2.2-stable support */ 225#ifndef D_TAPE 226#define D_TAPE 0 227#endif 228 229#define CTLMODE 3 230#define SA_CDEV_MAJOR 14 231#define SA_BDEV_MAJOR 5 232 233static struct cdevsw sa_cdevsw = 234{ 235 /*d_open*/ saopen, 236 /*d_close*/ saclose, 237 /*d_read*/ saread, 238 /*d_write*/ sawrite, 239 /*d_ioctl*/ saioctl, 240 /*d_stop*/ nostop, 241 /*d_reset*/ noreset, 242 /*d_devtotty*/ nodevtotty, 243 /*d_poll*/ seltrue, 244 /*d_mmap*/ nommap, 245 /*d_strategy*/ sastrategy, 246 /*d_name*/ "sa", 247 /*d_spare*/ NULL, 248 /*d_maj*/ -1, 249 /*d_dump*/ nodump, 250 /*d_psize*/ nopsize, 251 /*d_flags*/ D_TAPE, 252 /*d_maxio*/ 0, 253 /*b_maj*/ -1 254}; 255 256static struct extend_array *saperiphs; 257 258static int 259saopen(dev_t dev, int flags, int fmt, struct proc *p) 260{ 261 struct cam_periph *periph; 262 struct sa_softc *softc; 263 int unit; 264 int mode; 265 int density; 266 int error; 267 int s; 268 269 unit = SAUNIT(dev); 270 mode = SAMODE(dev); 271 density = SADENSITY(dev); 272 273 periph = cam_extend_get(saperiphs, unit); 274 if (periph == NULL) 275 return (ENXIO); 276 277 softc = (struct sa_softc *)periph->softc; 278 279 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 280 ("saaopen: dev=0x%x (unit %d , mode %d, density %d)\n", dev, 281 unit, mode, density)); 282 283 s = splsoftcam(); 284 if (softc->flags & SA_FLAG_INVALID) { 285 splx(s); 286 return(ENXIO); 287 } 288 289 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) { 290 splx(s); 291 return (error); /* error code from tsleep */ 292 } 293 294 splx(s); 295 296 if ((softc->flags & SA_FLAG_OPEN) == 0) { 297 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 298 return(ENXIO); 299 300 if ((error = sareservereleaseunit(periph, TRUE)) != 0) { 301 cam_periph_unlock(periph); 302 cam_periph_release(periph); 303 return(error); 304 } 305 } 306 307 if (error == 0) { 308 if ((softc->flags & SA_FLAG_OPEN) != 0) { 309 error = EBUSY; 310 } 311 312 if (error == 0) { 313 error = samount(periph); 314 } 315 /* Perform other checking... */ 316 } 317 318 if (error == 0) { 319 saprevent(periph, PR_PREVENT); 320 softc->flags |= SA_FLAG_OPEN; 321 } 322 323 cam_periph_unlock(periph); 324 return (error); 325} 326 327static int 328saclose(dev_t dev, int flag, int fmt, struct proc *p) 329{ 330 struct cam_periph *periph; 331 struct sa_softc *softc; 332 int unit; 333 int mode; 334 int error; 335 336 unit = SAUNIT(dev); 337 mode = SAMODE(dev); 338 periph = cam_extend_get(saperiphs, unit); 339 if (periph == NULL) 340 return (ENXIO); 341 342 softc = (struct sa_softc *)periph->softc; 343 344 if ((error = cam_periph_lock(periph, PRIBIO)) != 0) { 345 return (error); /* error code from tsleep */ 346 } 347 348 sacheckeod(periph); 349 350 saprevent(periph, PR_ALLOW); 351 352 switch (mode) { 353 case SA_MODE_REWIND: 354 sarewind(periph); 355 break; 356 case SA_MODE_OFFLINE: 357 sarewind(periph); 358 saloadunload(periph, /*load*/FALSE); 359 break; 360 case SA_MODE_NOREWIND: 361 default: 362 break; 363 } 364 365 softc->flags &= ~SA_FLAG_OPEN; 366 367 /* release the device */ 368 sareservereleaseunit(periph, FALSE); 369 370 cam_periph_unlock(periph); 371 cam_periph_release(periph); 372 373 return (0); 374} 375 376static int 377saread(dev_t dev, struct uio *uio, int ioflag) 378{ 379 return(physio(sastrategy, NULL, dev, 1, minphys, uio)); 380} 381 382static int 383sawrite(dev_t dev, struct uio *uio, int ioflag) 384{ 385 return(physio(sastrategy, NULL, dev, 0, minphys, uio)); 386} 387 388/* 389 * Actually translate the requested transfer into one the physical driver 390 * can understand. The transfer is described by a buf and will include 391 * only one physical transfer. 392 */ 393static void 394sastrategy(struct buf *bp) 395{ 396 struct cam_periph *periph; 397 struct sa_softc *softc; 398 u_int unit; 399 int s; 400 401 unit = SAUNIT(bp->b_dev); 402 periph = cam_extend_get(saperiphs, unit); 403 if (periph == NULL) { 404 bp->b_error = ENXIO; 405 goto bad; 406 } 407 softc = (struct sa_softc *)periph->softc; 408 409 s = splsoftcam(); 410 411 if (softc->flags & SA_FLAG_INVALID) { 412 splx(s); 413 bp->b_error = ENXIO; 414 goto bad; 415 } 416 417 splx(s); 418 419 /* 420 * If it's a null transfer, return immediatly 421 */ 422 if (bp->b_bcount == 0) 423 goto done; 424 425 /* valid request? */ 426 if (softc->flags & SA_FLAG_FIXED) { 427 /* 428 * Fixed block device. The byte count must 429 * be a multiple of our block size. 430 */ 431 if (((softc->blk_mask != ~0) 432 && ((bp->b_bcount & softc->blk_mask) != 0)) 433 || ((softc->blk_mask == ~0) 434 && ((bp->b_bcount % softc->min_blk) != 0))) { 435 xpt_print_path(periph->path); 436 printf("Invalid request. Fixed block device " 437 "requests must be a multiple " 438 "of %d bytes\n", softc->min_blk); 439 bp->b_error = EINVAL; 440 goto bad; 441 } 442 } else if ((bp->b_bcount > softc->max_blk) 443 || (bp->b_bcount < softc->min_blk) 444 || (bp->b_bcount & softc->blk_mask) != 0) { 445 446 xpt_print_path(periph->path); 447 printf("Invalid request. Variable block device " 448 "requests must be "); 449 if (softc->blk_mask != 0) { 450 printf("a multiple of %d ", 451 (0x1 << softc->blk_gran)); 452 } 453 printf("between %d and %d bytes\n", 454 softc->min_blk, softc->max_blk); 455 bp->b_error = EINVAL; 456 goto bad; 457 } 458 459 /* 460 * Mask interrupts so that the pack cannot be invalidated until 461 * after we are in the queue. Otherwise, we might not properly 462 * clean up one of the buffers. 463 */ 464 s = splbio(); 465 466 /* 467 * Place it in the queue of disk activities for this disk 468 */ 469 bufq_insert_tail(&softc->buf_queue, bp); 470 471 splx(s); 472 473 /* 474 * Schedule ourselves for performing the work. 475 */ 476 xpt_schedule(periph, /* XXX priority */1); 477 478 return; 479bad: 480 bp->b_flags |= B_ERROR; 481done: 482 483 /* 484 * Correctly set the buf to indicate a completed xfer 485 */ 486 bp->b_resid = bp->b_bcount; 487 biodone(bp); 488} 489 490static int 491saioctl(dev_t dev, u_long cmd, caddr_t arg, int flag, struct proc *p) 492{ 493 struct cam_periph *periph; 494 struct sa_softc *softc; 495 int unit; 496 int mode; 497 int density; 498 int error; 499 500 unit = SAUNIT(dev); 501 mode = SAMODE(dev); 502 density = SADENSITY(dev); 503 504 periph = cam_extend_get(saperiphs, unit); 505 if (periph == NULL) 506 return (ENXIO); 507 508 softc = (struct sa_softc *)periph->softc; 509 510 /* 511 * Find the device that the user is talking about 512 */ 513 switch (cmd) { 514 case MTIOCGET: 515 { 516 struct mtget *g = (struct mtget *)arg; 517 518 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 519 ("saioctl: MTIOGET\n")); 520 521 bzero(g, sizeof(struct mtget)); 522 g->mt_type = 0x7; /* Ultrix compat *//*? */ 523 g->mt_density = softc->media_density; 524 g->mt_blksiz = softc->media_blksize; 525 if (softc->flags & SA_FLAG_COMP_UNSUPP) { 526 g->mt_comp = MT_COMP_UNSUPP; 527 g->mt_comp0 = MT_COMP_UNSUPP; 528 g->mt_comp1 = MT_COMP_UNSUPP; 529 g->mt_comp2 = MT_COMP_UNSUPP; 530 g->mt_comp3 = MT_COMP_UNSUPP; 531 } else if ((softc->flags & SA_FLAG_COMP_ENABLED) == 0) { 532 g->mt_comp = MT_COMP_DISABLED; 533 g->mt_comp0 = MT_COMP_DISABLED; 534 g->mt_comp1 = MT_COMP_DISABLED; 535 g->mt_comp2 = MT_COMP_DISABLED; 536 g->mt_comp3 = MT_COMP_DISABLED; 537 } else { 538 g->mt_comp = softc->comp_algorithm; 539 g->mt_comp0 = softc->comp_algorithm; 540 g->mt_comp1 = softc->comp_algorithm; 541 g->mt_comp2 = softc->comp_algorithm; 542 g->mt_comp3 = softc->comp_algorithm; 543 } 544 g->mt_density0 = softc->media_density; 545 g->mt_density1 = softc->media_density; 546 g->mt_density2 = softc->media_density; 547 g->mt_density3 = softc->media_density; 548 g->mt_blksiz0 = softc->media_blksize; 549 g->mt_blksiz1 = softc->media_blksize; 550 g->mt_blksiz2 = softc->media_blksize; 551 g->mt_blksiz3 = softc->media_blksize; 552 error = 0; 553 break; 554 } 555 case MTIOCTOP: 556 { 557 struct mtop *mt; 558 int count; 559 560 mt = (struct mtop *)arg; 561 562 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 563 ("saioctl: op=0x%x count=0x%x\n", 564 mt->mt_op, mt->mt_count)); 565 566 count = mt->mt_count; 567 switch (mt->mt_op) { 568 case MTWEOF: /* write an end-of-file record */ 569 error = sawritefilemarks(periph, count, 570 /*setmarks*/FALSE); 571 break; 572 case MTBSR: /* backward space record */ 573 case MTFSR: /* forward space record */ 574 case MTBSF: /* backward space file */ 575 case MTFSF: /* forward space file */ 576 case MTEOD: /* space to end of recorded medium */ 577 { 578 int nmarks; 579 scsi_space_code spaceop; 580 581 nmarks = softc->filemarks; 582 error = sacheckeod(periph); 583 nmarks -= softc->filemarks; 584 585 if ((mt->mt_op == MTBSR) || (mt->mt_op == MTBSF)) 586 count = -count; 587 588 if ((mt->mt_op == MTBSF) || (mt->mt_op == MTFSF)) 589 spaceop = SS_FILEMARKS; 590 else if ((mt->mt_op == MTBSR) || (mt->mt_op == MTFSR)) 591 spaceop = SS_BLOCKS; 592 else { 593 spaceop = SS_EOD; 594 count = 0; 595 nmarks = 0; 596 } 597 598 nmarks = softc->filemarks; 599 error = sacheckeod(periph); 600 nmarks -= softc->filemarks; 601 if (error == 0) 602 error = saspace(periph, count - nmarks, 603 spaceop); 604 break; 605 } 606 case MTREW: /* rewind */ 607 error = sarewind(periph); 608 break; 609 case MTERASE: /* erase */ 610 error = saerase(periph, count); 611 break; 612 case MTRETENS: /* re-tension tape */ 613 error = saretension(periph); 614 break; 615 case MTOFFL: /* rewind and put the drive offline */ 616 /* 617 * Be sure to allow media removal before 618 * attempting the eject. 619 */ 620 saprevent(periph, PR_ALLOW); 621 error = sarewind(periph); 622 623 if (error == 0) 624 error = saloadunload(periph, /*load*/FALSE); 625 else 626 break; 627 628 /* XXX KDM */ 629 softc->flags &= ~SA_FLAG_TAPE_LOCKED; 630 softc->flags &= ~SA_FLAG_TAPE_MOUNTED; 631 break; 632 case MTNOP: /* no operation, sets status only */ 633 case MTCACHE: /* enable controller cache */ 634 case MTNOCACHE: /* disable controller cache */ 635 error = 0; 636 break; 637 case MTSETBSIZ: /* Set block size for device */ 638 639 error = sasetparams(periph, SA_PARAM_BLOCKSIZE, count, 640 0, 0); 641 break; 642 case MTSETDNSTY: /* Set density for device and mode */ 643 if (count > UCHAR_MAX) { 644 error = EINVAL; 645 break; 646 } else { 647 error = sasetparams(periph, SA_PARAM_DENSITY, 648 0, count, 0); 649 } 650 break; 651 case MTCOMP: /* enable compression */ 652 /* 653 * Some devices don't support compression, and 654 * don't like it if you ask them for the 655 * compression page. 656 */ 657 if ((softc->quirks & SA_QUIRK_NOCOMP) 658 || (softc->flags & SA_FLAG_COMP_UNSUPP)) { 659 error = ENODEV; 660 break; 661 } 662 error = sasetparams(periph, SA_PARAM_COMPRESSION, 663 0, 0, count); 664 break; 665 default: 666 error = EINVAL; 667 } 668 break; 669 } 670 case MTIOCIEOT: 671 case MTIOCEEOT: 672 error = 0; 673 break; 674 default: 675 error = cam_periph_ioctl(periph, cmd, arg, saerror); 676 break; 677 } 678 return (error); 679} 680 681static void 682sainit(void) 683{ 684 cam_status status; 685 struct cam_path *path; 686 687 /* 688 * Create our extend array for storing the devices we attach to. 689 */ 690 saperiphs = cam_extend_new(); 691 if (saperiphs == NULL) { 692 printf("sa: Failed to alloc extend array!\n"); 693 return; 694 } 695 696 /* 697 * Install a global async callback. 698 */ 699 status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 700 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 701 702 if (status == CAM_REQ_CMP) { 703 /* Register the async callbacks of interrest */ 704 struct ccb_setasync csa; /* 705 * This is an immediate CCB, 706 * so using the stack is OK 707 */ 708 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 709 csa.ccb_h.func_code = XPT_SASYNC_CB; 710 csa.event_enable = AC_FOUND_DEVICE; 711 csa.callback = saasync; 712 csa.callback_arg = NULL; 713 xpt_action((union ccb *)&csa); 714 status = csa.ccb_h.status; 715 xpt_free_path(path); 716 } 717 718 if (status != CAM_REQ_CMP) { 719 printf("sa: Failed to attach master async callback " 720 "due to status 0x%x!\n", status); 721 } else { 722 /* If we were successfull, register our devsw */ 723 cdevsw_add_generic(SA_BDEV_MAJOR, SA_CDEV_MAJOR, &sa_cdevsw); 724 } 725} 726 727static void 728saoninvalidate(struct cam_periph *periph) 729{ 730 struct sa_softc *softc; 731 struct buf *q_bp; 732 struct ccb_setasync csa; 733 int s; 734 735 softc = (struct sa_softc *)periph->softc; 736 737 /* 738 * De-register any async callbacks. 739 */ 740 xpt_setup_ccb(&csa.ccb_h, periph->path, 741 /* priority */ 5); 742 csa.ccb_h.func_code = XPT_SASYNC_CB; 743 csa.event_enable = 0; 744 csa.callback = saasync; 745 csa.callback_arg = periph; 746 xpt_action((union ccb *)&csa); 747 748 softc->flags |= SA_FLAG_INVALID; 749 750 /* 751 * Although the oninvalidate() routines are always called at 752 * splsoftcam, we need to be at splbio() here to keep the buffer 753 * queue from being modified while we traverse it. 754 */ 755 s = splbio(); 756 757 /* 758 * Return all queued I/O with ENXIO. 759 * XXX Handle any transactions queued to the card 760 * with XPT_ABORT_CCB. 761 */ 762 while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){ 763 bufq_remove(&softc->buf_queue, q_bp); 764 q_bp->b_resid = q_bp->b_bcount; 765 q_bp->b_error = ENXIO; 766 q_bp->b_flags |= B_ERROR; 767 biodone(q_bp); 768 } 769 splx(s); 770 771 xpt_print_path(periph->path); 772 printf("lost device\n"); 773 774} 775 776static void 777sacleanup(struct cam_periph *periph) 778{ 779 struct sa_softc *softc; 780 781 softc = (struct sa_softc *)periph->softc; 782 783 devstat_remove_entry(&softc->device_stats); 784 cam_extend_release(saperiphs, periph->unit_number); 785 xpt_print_path(periph->path); 786 printf("removing device entry\n"); 787 free(softc, M_DEVBUF); 788} 789 790static void 791saasync(void *callback_arg, u_int32_t code, 792 struct cam_path *path, void *arg) 793{ 794 struct cam_periph *periph; 795 796 periph = (struct cam_periph *)callback_arg; 797 switch (code) { 798 case AC_FOUND_DEVICE: 799 { 800 struct ccb_getdev *cgd; 801 cam_status status; 802 803 cgd = (struct ccb_getdev *)arg; 804 805 if (cgd->pd_type != T_SEQUENTIAL) 806 break; 807 808 /* 809 * Allocate a peripheral instance for 810 * this device and start the probe 811 * process. 812 */ 813 status = cam_periph_alloc(saregister, saoninvalidate, 814 sacleanup, sastart, 815 "sa", CAM_PERIPH_BIO, cgd->ccb_h.path, 816 saasync, AC_FOUND_DEVICE, cgd); 817 818 if (status != CAM_REQ_CMP 819 && status != CAM_REQ_INPROG) 820 printf("saasync: Unable to probe new device " 821 "due to status 0x%x\n", status); 822 break; 823 } 824 case AC_LOST_DEVICE: 825 cam_periph_invalidate(periph); 826 break; 827 case AC_TRANSFER_NEG: 828 case AC_SENT_BDR: 829 case AC_SCSI_AEN: 830 case AC_UNSOL_RESEL: 831 case AC_BUS_RESET: 832 default: 833 break; 834 } 835} 836 837static cam_status 838saregister(struct cam_periph *periph, void *arg) 839{ 840 struct sa_softc *softc; 841 struct ccb_setasync csa; 842 struct ccb_getdev *cgd; 843 caddr_t match; 844 845 cgd = (struct ccb_getdev *)arg; 846 if (periph == NULL) { 847 printf("saregister: periph was NULL!!\n"); 848 return(CAM_REQ_CMP_ERR); 849 } 850 851 if (cgd == NULL) { 852 printf("saregister: no getdev CCB, can't register device\n"); 853 return(CAM_REQ_CMP_ERR); 854 } 855 856 softc = (struct sa_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); 857 858 if (softc == NULL) { 859 printf("saregister: Unable to probe new device. " 860 "Unable to allocate softc\n"); 861 return(CAM_REQ_CMP_ERR); 862 } 863 864 bzero(softc, sizeof(*softc)); 865 softc->state = SA_STATE_NORMAL; 866 bufq_init(&softc->buf_queue); 867 periph->softc = softc; 868 cam_extend_set(saperiphs, periph->unit_number, periph); 869 870 /* 871 * See if this device has any quirks. 872 */ 873 match = cam_quirkmatch((caddr_t)&cgd->inq_data, 874 (caddr_t)sa_quirk_table, 875 sizeof(sa_quirk_table)/sizeof(*sa_quirk_table), 876 sizeof(*sa_quirk_table), scsi_inquiry_match); 877 878 if (match != NULL) 879 softc->quirks = ((struct sa_quirk_entry *)match)->quirks; 880 else 881 softc->quirks = SA_QUIRK_NONE; 882 883 /* 884 * The SA driver supports a blocksize, but we don't know the 885 * blocksize until we sense the media. So, set a flag to 886 * indicate that the blocksize is unavailable right now. 887 * We'll clear the flag as soon as we've done a read capacity. 888 */ 889 devstat_add_entry(&softc->device_stats, "sa", 890 periph->unit_number, 0, 891 DEVSTAT_BS_UNAVAILABLE, 892 cgd->pd_type | DEVSTAT_TYPE_IF_SCSI); 893 894 /* 895 * Add an async callback so that we get 896 * notified if this device goes away. 897 */ 898 xpt_setup_ccb(&csa.ccb_h, periph->path, /* priority */ 5); 899 csa.ccb_h.func_code = XPT_SASYNC_CB; 900 csa.event_enable = AC_LOST_DEVICE; 901 csa.callback = saasync; 902 csa.callback_arg = periph; 903 xpt_action((union ccb *)&csa); 904 905 xpt_announce_periph(periph, NULL); 906 907 return(CAM_REQ_CMP); 908} 909 910static void 911sastart(struct cam_periph *periph, union ccb *start_ccb) 912{ 913 struct sa_softc *softc; 914 915 softc = (struct sa_softc *)periph->softc; 916 917 918 switch (softc->state) { 919 case SA_STATE_NORMAL: 920 { 921 /* Pull a buffer from the queue and get going on it */ 922 struct buf *bp; 923 int s; 924 925 /* 926 * See if there is a buf with work for us to do.. 927 */ 928 s = splbio(); 929 bp = bufq_first(&softc->buf_queue); 930 if (periph->immediate_priority <= periph->pinfo.priority) { 931 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 932 ("queuing for immediate ccb\n")); 933 start_ccb->ccb_h.ccb_type = SA_CCB_WAITING; 934 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 935 periph_links.sle); 936 periph->immediate_priority = CAM_PRIORITY_NONE; 937 splx(s); 938 wakeup(&periph->ccb_list); 939 } else if (bp == NULL) { 940 splx(s); 941 xpt_release_ccb(start_ccb); 942 } else if ((softc->flags & SA_FLAG_ERR_PENDING) != 0) { 943 944 bufq_remove(&softc->buf_queue, bp); 945 bp->b_resid = bp->b_bcount; 946 bp->b_flags |= B_ERROR; 947 if ((softc->flags & SA_FLAG_EOM_PENDING) != 0) { 948 if ((bp->b_flags & B_READ) == 0) 949 bp->b_error = ENOSPC; 950 } 951 if ((softc->flags & SA_FLAG_EIO_PENDING) != 0) { 952 bp->b_error = EIO; 953 } 954 softc->flags &= ~SA_FLAG_ERR_PENDING; 955 bp = bufq_first(&softc->buf_queue); 956 splx(s); 957 biodone(bp); 958 } else { 959 u_int32_t length; 960 961 bufq_remove(&softc->buf_queue, bp); 962 963 if ((softc->flags & SA_FLAG_FIXED) != 0) { 964 if (softc->blk_shift != 0) { 965 length = 966 bp->b_bcount >> softc->blk_shift; 967 } else { 968 length = 969 bp->b_bcount / softc->min_blk; 970 } 971 } else { 972 length = bp->b_bcount; 973 } 974 975 devstat_start_transaction(&softc->device_stats); 976 977 /* 978 * XXX - Perhaps we should... 979 * suppress illegal length indication if we are 980 * running in variable block mode so that we don't 981 * have to request sense every time our requested 982 * block size is larger than the written block. 983 * The residual information from the ccb allows 984 * us to identify this situation anyway. The only 985 * problem with this is that we will not get 986 * information about blocks that are larger than 987 * our read buffer unless we set the block size 988 * in the mode page to something other than 0. 989 */ 990 scsi_sa_read_write(&start_ccb->csio, 991 /*retries*/4, 992 sadone, 993 MSG_SIMPLE_Q_TAG, 994 bp->b_flags & B_READ, 995 /*SILI*/FALSE, 996 softc->flags & SA_FLAG_FIXED, 997 length, 998 bp->b_data, 999 bp->b_bcount, 1000 SSD_FULL_SIZE, 1001 120 * 60 * 1000); /* 2min */ 1002 start_ccb->ccb_h.ccb_type = SA_CCB_BUFFER_IO; 1003 start_ccb->ccb_h.ccb_bp = bp; 1004 bp = bufq_first(&softc->buf_queue); 1005 splx(s); 1006 1007 xpt_action(start_ccb); 1008 } 1009 1010 if (bp != NULL) { 1011 /* Have more work to do, so ensure we stay scheduled */ 1012 xpt_schedule(periph, /* XXX priority */1); 1013 } 1014 break; 1015 } 1016 } 1017} 1018 1019 1020static void 1021sadone(struct cam_periph *periph, union ccb *done_ccb) 1022{ 1023 struct sa_softc *softc; 1024 struct ccb_scsiio *csio; 1025 1026 softc = (struct sa_softc *)periph->softc; 1027 csio = &done_ccb->csio; 1028 switch (csio->ccb_h.ccb_type) { 1029 case SA_CCB_BUFFER_IO: 1030 { 1031 struct buf *bp; 1032 int error; 1033 1034 bp = (struct buf *)done_ccb->ccb_h.ccb_bp; 1035 error = 0; 1036 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1037 1038 if ((error = saerror(done_ccb, 0, 0)) == ERESTART) { 1039 /* 1040 * A retry was scheuled, so 1041 * just return. 1042 */ 1043 return; 1044 } 1045 } 1046 1047 if (error == EIO) { 1048 int s; 1049 struct buf *q_bp; 1050 1051 /* 1052 * Catastrophic error. Mark our pack as invalid, 1053 * return all queued I/O with EIO, and unfreeze 1054 * our queue so that future transactions that 1055 * attempt to fix this problem can get to the 1056 * device. 1057 * 1058 */ 1059 1060 s = splbio(); 1061 softc->flags &= ~SA_FLAG_TAPE_MOUNTED; 1062 1063 while ((q_bp = bufq_first(&softc->buf_queue)) != NULL) { 1064 bufq_remove(&softc->buf_queue, q_bp); 1065 q_bp->b_resid = q_bp->b_bcount; 1066 q_bp->b_error = EIO; 1067 q_bp->b_flags |= B_ERROR; 1068 biodone(q_bp); 1069 } 1070 splx(s); 1071 } 1072 if (error != 0) { 1073 bp->b_resid = bp->b_bcount; 1074 bp->b_error = error; 1075 bp->b_flags |= B_ERROR; 1076 cam_release_devq(done_ccb->ccb_h.path, 1077 /*relsim_flags*/0, 1078 /*reduction*/0, 1079 /*timeout*/0, 1080 /*getcount_only*/0); 1081 } else { 1082 bp->b_resid = csio->resid; 1083 bp->b_error = 0; 1084 if (csio->resid != 0) { 1085 bp->b_flags |= B_ERROR; 1086 } 1087 if ((bp->b_flags & B_READ) == 0) { 1088 softc->flags |= SA_FLAG_TAPE_WRITTEN; 1089 softc->filemarks = 0; 1090 } 1091 } 1092 1093 devstat_end_transaction(&softc->device_stats, 1094 bp->b_bcount - bp->b_resid, 1095 done_ccb->csio.tag_action & 0xf, 1096 (bp->b_flags & B_READ) ? DEVSTAT_READ 1097 : DEVSTAT_WRITE); 1098 biodone(bp); 1099 break; 1100 } 1101 case SA_CCB_WAITING: 1102 { 1103 /* Caller will release the CCB */ 1104 wakeup(&done_ccb->ccb_h.cbfcnp); 1105 return; 1106 } 1107 } 1108 xpt_release_ccb(done_ccb); 1109} 1110 1111static int 1112samount(struct cam_periph *periph) 1113{ 1114 struct sa_softc *softc; 1115 union ccb *ccb; 1116 struct ccb_scsiio *csio; 1117 int error; 1118 1119 softc = (struct sa_softc *)periph->softc; 1120 ccb = cam_periph_getccb(periph, /* priority */1); 1121 csio = &ccb->csio; 1122 error = 0; 1123 1124 /* 1125 * Determine if something has happend since the last 1126 * open/mount that would invalidate a mount. This 1127 * will also eat any pending UAs. 1128 */ 1129 scsi_test_unit_ready(csio, 1130 /*retries*/1, 1131 sadone, 1132 MSG_SIMPLE_Q_TAG, 1133 SSD_FULL_SIZE, 1134 /*timeout*/5000); 1135 1136 cam_periph_runccb(ccb, /*error handler*/NULL, /*cam_flags*/0, 1137 /*sense_flags*/0, &softc->device_stats); 1138 1139 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1140 cam_release_devq(ccb->ccb_h.path, 1141 /*relsim_flags*/0, 1142 /*reduction*/0, 1143 /*timeout*/0, 1144 /*getcount_only*/0); 1145 softc->flags &= ~SA_FLAG_TAPE_MOUNTED; 1146 } 1147 1148 if ((softc->flags & SA_FLAG_TAPE_MOUNTED) == 0) { 1149 struct scsi_read_block_limits_data *rblim; 1150 int comp_enabled, comp_supported; 1151 u_int8_t write_protect; 1152 1153 /* 1154 * Clear out old state. 1155 */ 1156 softc->flags &= ~(SA_FLAG_TAPE_WP|SA_FLAG_TAPE_WRITTEN| 1157 SA_FLAG_ERR_PENDING|SA_FLAG_COMP_ENABLED| 1158 SA_FLAG_COMP_UNSUPP); 1159 softc->filemarks = 0; 1160 1161 /* 1162 * First off, determine block limits. 1163 */ 1164 rblim = (struct scsi_read_block_limits_data *) 1165 malloc(sizeof(*rblim), M_TEMP, M_WAITOK); 1166 1167 scsi_read_block_limits(csio, 1168 /*retries*/1, 1169 sadone, 1170 MSG_SIMPLE_Q_TAG, 1171 rblim, 1172 SSD_FULL_SIZE, 1173 /*timeout*/5000); 1174 1175 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 1176 /*sense_flags*/SF_RETRY_UA, 1177 &softc->device_stats); 1178 1179 xpt_release_ccb(ccb); 1180 1181 if (error != 0) 1182 goto exit; 1183 1184 softc->blk_gran = RBL_GRAN(rblim); 1185 softc->max_blk = scsi_3btoul(rblim->maximum); 1186 softc->min_blk = scsi_2btoul(rblim->minimum); 1187 if (softc->max_blk == softc->min_blk) { 1188 softc->flags |= SA_FLAG_FIXED; 1189 if (powerof2(softc->min_blk)) { 1190 softc->blk_mask = softc->min_blk - 1; 1191 softc->blk_shift = 0; 1192 softc->blk_shift = ffs(softc->min_blk) - 1; 1193 } else { 1194 softc->blk_mask = ~0; 1195 softc->blk_shift = 0; 1196 } 1197 } else { 1198 /* 1199 * SCSI-III spec allows 0 1200 * to mean "unspecified" 1201 */ 1202 if (softc->max_blk == 0) { 1203 softc->max_blk = ~0; 1204 } 1205 softc->blk_shift = 0; 1206 if (softc->blk_gran != 0) { 1207 softc->blk_mask = softc->blk_gran - 1; 1208 } else { 1209 softc->blk_mask = 0; 1210 } 1211 } 1212 1213 /* 1214 * Next, perform a mode sense to determine 1215 * current density, blocksize, compression etc. 1216 */ 1217 error = sagetparams(periph, SA_PARAM_ALL, 1218 &softc->media_blksize, 1219 &softc->media_density, 1220 &softc->media_numblks, 1221 &softc->buffer_mode, &write_protect, 1222 &softc->speed, &comp_supported, 1223 &comp_enabled, &softc->comp_algorithm, 1224 NULL); 1225 1226 if (error != 0) 1227 goto exit; 1228 1229 if (write_protect) 1230 softc->flags |= SA_FLAG_TAPE_WP; 1231 1232 if (comp_supported) { 1233 if (comp_enabled) { 1234 softc->flags |= SA_FLAG_COMP_ENABLED; 1235 1236 if (softc->saved_comp_algorithm == 0) 1237 softc->saved_comp_algorithm = 1238 softc->comp_algorithm; 1239 } 1240 } else 1241 softc->flags |= SA_FLAG_COMP_UNSUPP; 1242 1243 if (softc->buffer_mode != SMH_SA_BUF_MODE_NOBUF) 1244 goto exit; 1245 1246 error = sasetparams(periph, SA_PARAM_BUFF_MODE, 0, 0, 0); 1247 1248 if (error == 0) 1249 softc->buffer_mode = SMH_SA_BUF_MODE_SIBUF; 1250exit: 1251 if (rblim != NULL) 1252 free(rblim, M_TEMP); 1253 1254 if (error != 0) { 1255 cam_release_devq(ccb->ccb_h.path, 1256 /*relsim_flags*/0, 1257 /*reduction*/0, 1258 /*timeout*/0, 1259 /*getcount_only*/0); 1260 } 1261 } else 1262 xpt_release_ccb(ccb); 1263 1264 return (error); 1265} 1266 1267static int 1268sacheckeod(struct cam_periph *periph) 1269{ 1270 int error; 1271 int markswanted; 1272 struct sa_softc *softc; 1273 1274 softc = (struct sa_softc *)periph->softc; 1275 markswanted = 0; 1276 1277 if ((softc->flags & SA_FLAG_TAPE_WRITTEN) != 0) { 1278 markswanted++; 1279 1280 if ((softc->flags & SA_FLAG_2FM_AT_EOD) != 0) 1281 markswanted++; 1282 } 1283 1284 if (softc->filemarks < markswanted) { 1285 markswanted -= softc->filemarks; 1286 error = sawritefilemarks(periph, markswanted, 1287 /*setmarks*/FALSE); 1288 } else { 1289 error = 0; 1290 } 1291 return (error); 1292} 1293 1294static int 1295saerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 1296{ 1297 struct cam_periph *periph; 1298 struct sa_softc *softc; 1299 struct ccb_scsiio *csio; 1300 struct scsi_sense_data *sense; 1301 int error_code, sense_key, asc, ascq; 1302 int error; 1303 1304 periph = xpt_path_periph(ccb->ccb_h.path); 1305 softc = (struct sa_softc *)periph->softc; 1306 csio = &ccb->csio; 1307 sense = &csio->sense_data; 1308 scsi_extract_sense(sense, &error_code, &sense_key, &asc, &ascq); 1309 error = 0; 1310 1311 if (((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) 1312 && ((sense->flags & (SSD_EOM|SSD_FILEMARK|SSD_ILI)) != 0) 1313 && ((sense_key == SSD_KEY_NO_SENSE) 1314 || (sense_key == SSD_KEY_BLANK_CHECK))) { 1315 u_int32_t info; 1316 u_int32_t resid; 1317 int defer_action; 1318 1319 /* 1320 * Filter out some sense codes of interest. 1321 */ 1322 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) { 1323 info = scsi_4btoul(sense->info); 1324 resid = info; 1325 if ((softc->flags & SA_FLAG_FIXED) != 0) 1326 resid *= softc->media_blksize; 1327 } else { 1328 resid = csio->dxfer_len; 1329 info = resid; 1330 if ((softc->flags & SA_FLAG_FIXED) != 0) 1331 info /= softc->media_blksize; 1332 } 1333 if ((resid > 0 && resid < csio->dxfer_len) 1334 && (softc->flags & SA_FLAG_FIXED) != 0) 1335 defer_action = TRUE; 1336 else 1337 defer_action = FALSE; 1338 1339 if ((sense->flags & SSD_EOM) != 0 1340 || (sense_key == 0x8 /* BLANK CHECK*/)) { 1341 csio->resid = resid; 1342 if (defer_action) { 1343 softc->flags |= SA_FLAG_EOM_PENDING; 1344 } else { 1345 if (csio->cdb_io.cdb_bytes[0] == SA_WRITE) 1346 error = ENOSPC; 1347 } 1348 } 1349 if ((sense->flags & SSD_FILEMARK) != 0) { 1350 csio->resid = resid; 1351 if (defer_action) 1352 softc->flags |= SA_FLAG_EOF_PENDING; 1353 } 1354 if (sense->flags & SSD_ILI) { 1355 if (info < 0) { 1356 /* 1357 * The record was too big. 1358 */ 1359 xpt_print_path(csio->ccb_h.path); 1360 printf("%d-byte tape record bigger " 1361 "than suplied read buffer\n", 1362 csio->dxfer_len - info); 1363 csio->resid = csio->dxfer_len; 1364 error = EIO; 1365 } else { 1366 csio->resid = resid; 1367 if ((softc->flags & SA_FLAG_FIXED) != 0) { 1368 if (defer_action) 1369 softc->flags |= 1370 SA_FLAG_EIO_PENDING; 1371 else 1372 error = EIO; 1373 } 1374 } 1375 } 1376 } 1377 if (error == 0) 1378 error = cam_periph_error(ccb, cam_flags, sense_flags, 1379 &softc->saved_ccb); 1380 1381 return (error); 1382} 1383 1384static int 1385sagetparams(struct cam_periph *periph, sa_params params_to_get, 1386 u_int32_t *blocksize, u_int8_t *density, u_int32_t *numblocks, 1387 int *buff_mode, u_int8_t *write_protect, u_int8_t *speed, 1388 int *comp_supported, int *comp_enabled, u_int32_t *comp_algorithm, 1389 struct scsi_data_compression_page *comp_page) 1390{ 1391 union ccb *ccb; 1392 void *mode_buffer; 1393 struct scsi_mode_header_6 *mode_hdr; 1394 struct scsi_mode_blk_desc *mode_blk; 1395 struct scsi_data_compression_page *ncomp_page; 1396 int mode_buffer_len; 1397 struct sa_softc *softc; 1398 int error; 1399 cam_status status; 1400 1401 softc = (struct sa_softc *)periph->softc; 1402 1403 ccb = cam_periph_getccb(periph, /*priority*/ 1); 1404 1405retry: 1406 mode_buffer_len = sizeof(*mode_hdr) + sizeof(*mode_blk); 1407 1408 if (params_to_get & SA_PARAM_COMPRESSION) { 1409 if (softc->quirks & SA_QUIRK_NOCOMP) { 1410 *comp_supported = FALSE; 1411 params_to_get &= ~SA_PARAM_COMPRESSION; 1412 } else 1413 mode_buffer_len += 1414 sizeof(struct scsi_data_compression_page); 1415 } 1416 1417 mode_buffer = malloc(mode_buffer_len, M_TEMP, M_WAITOK); 1418 1419 bzero(mode_buffer, mode_buffer_len); 1420 1421 mode_hdr = (struct scsi_mode_header_6 *)mode_buffer; 1422 mode_blk = (struct scsi_mode_blk_desc *)&mode_hdr[1]; 1423 1424 if (params_to_get & SA_PARAM_COMPRESSION) 1425 ncomp_page = (struct scsi_data_compression_page *)&mode_blk[1]; 1426 else 1427 ncomp_page = NULL; 1428 1429 scsi_mode_sense(&ccb->csio, 1430 /*retries*/ 1, 1431 /*cbfcnp*/ sadone, 1432 /*tag_action*/ MSG_SIMPLE_Q_TAG, 1433 /*dbd*/ FALSE, 1434 /*page_code*/ SMS_PAGE_CTRL_CURRENT, 1435 /*page*/ (params_to_get & SA_PARAM_COMPRESSION) ? 1436 SA_DATA_COMPRESSION_PAGE : 1437 SMS_VENDOR_SPECIFIC_PAGE, 1438 /*param_buf*/ mode_buffer, 1439 /*param_len*/ mode_buffer_len, 1440 /*sense_len*/ SSD_FULL_SIZE, 1441 /*timeout*/ 5000); 1442 1443 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/ 0, 1444 /*sense_flags*/SF_NO_PRINT, 1445 &softc->device_stats); 1446 1447 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1448 cam_release_devq(ccb->ccb_h.path, 1449 /* relsim_flags */0, 1450 /* opening_reduction */0, 1451 /* timeout */0, 1452 /* getcount_only */ FALSE); 1453 1454 status = ccb->ccb_h.status & CAM_STATUS_MASK; 1455 1456 if (error == EINVAL 1457 && (params_to_get & SA_PARAM_COMPRESSION) != 0) { 1458 /* 1459 * Most likely doesn't support the compression 1460 * page. Remeber this for the future and attempt 1461 * the request without asking for compression info. 1462 */ 1463 softc->quirks |= SA_QUIRK_NOCOMP; 1464 free(mode_buffer, M_TEMP); 1465 goto retry; 1466 } else if (error == 0) { 1467 struct scsi_data_compression_page *temp_comp_page; 1468 1469 temp_comp_page = NULL; 1470 1471 /* 1472 * If the user only wants the compression information, and 1473 * the device doesn't send back the block descriptor, it's 1474 * no big deal. If the user wants more than just 1475 * compression, though, and the device doesn't pass back the 1476 * block descriptor, we need to send another mode sense to 1477 * get the block descriptor. 1478 */ 1479 if ((mode_hdr->blk_desc_len == 0) 1480 && (params_to_get & SA_PARAM_COMPRESSION) 1481 && ((params_to_get & ~(SA_PARAM_COMPRESSION)) != 0)) { 1482 1483 /* 1484 * Decrease the mode buffer length by the size of 1485 * the compression page, to make sure the data 1486 * there doesn't get overwritten. 1487 */ 1488 mode_buffer_len -= sizeof(*ncomp_page); 1489 1490 /* 1491 * Now move the compression page that we presumably 1492 * got back down the memory chunk a little bit so 1493 * it doesn't get spammed. 1494 */ 1495 temp_comp_page = 1496 (struct scsi_data_compression_page *)&mode_hdr[1]; 1497 bcopy(temp_comp_page, ncomp_page, sizeof(*ncomp_page)); 1498 1499 /* 1500 * Now, we issue another mode sense and just ask 1501 * for the block descriptor, etc. 1502 */ 1503 scsi_mode_sense(&ccb->csio, 1504 /*retries*/ 1, 1505 /*cbfcnp*/ sadone, 1506 /*tag_action*/ MSG_SIMPLE_Q_TAG, 1507 /*dbd*/ FALSE, 1508 /*page_code*/ SMS_PAGE_CTRL_CURRENT, 1509 /*page*/ SMS_VENDOR_SPECIFIC_PAGE, 1510 /*param_buf*/ mode_buffer, 1511 /*param_len*/ mode_buffer_len, 1512 /*sense_len*/ SSD_FULL_SIZE, 1513 /*timeout*/ 5000); 1514 1515 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/ 0, 1516 /*sense_flags*/ 0, 1517 &softc->device_stats); 1518 1519 if (error != 0) 1520 goto sagetparamsexit; 1521 1522 } 1523 1524 if (params_to_get & SA_PARAM_BLOCKSIZE) 1525 *blocksize = scsi_3btoul(mode_blk->blklen); 1526 1527 if (params_to_get & SA_PARAM_NUMBLOCKS) 1528 *numblocks = scsi_3btoul(mode_blk->nblocks); 1529 1530 if (params_to_get & SA_PARAM_BUFF_MODE) 1531 *buff_mode = mode_hdr->dev_spec & SMH_SA_BUF_MODE_MASK; 1532 1533 if (params_to_get & SA_PARAM_DENSITY) 1534 *density = mode_blk->density; 1535 1536 if (params_to_get & SA_PARAM_WP) 1537 *write_protect = (mode_hdr->dev_spec & SMH_SA_WP) ? 1538 TRUE : FALSE; 1539 if (params_to_get & SA_PARAM_SPEED) 1540 *speed = mode_hdr->dev_spec & SMH_SA_SPEED_MASK; 1541 1542 if (params_to_get & SA_PARAM_COMPRESSION) { 1543 *comp_supported =(ncomp_page->dce_and_dcc & SA_DCP_DCC)? 1544 TRUE : FALSE; 1545 *comp_enabled = (ncomp_page->dce_and_dcc & SA_DCP_DCE)? 1546 TRUE : FALSE; 1547 *comp_algorithm = 1548 scsi_4btoul(ncomp_page->comp_algorithm); 1549 if (comp_page != NULL) 1550 bcopy(ncomp_page, comp_page,sizeof(*comp_page)); 1551 } 1552 1553 } else if (status == CAM_SCSI_STATUS_ERROR) { 1554 /* Tell the user about the fatal error. */ 1555 scsi_sense_print(&ccb->csio); 1556 } 1557 1558sagetparamsexit: 1559 1560 xpt_release_ccb(ccb); 1561 free(mode_buffer, M_TEMP); 1562 return(error); 1563} 1564 1565/* 1566 * The purpose of this function is to set one of four different parameters 1567 * for a tape drive: 1568 * - blocksize 1569 * - density 1570 * - compression / compression algorithm 1571 * - buffering mode 1572 * 1573 * The assumption is that this will be called from saioctl(), and therefore 1574 * from a process context. Thus the waiting malloc calls below. If that 1575 * assumption ever changes, the malloc calls should be changed to be 1576 * NOWAIT mallocs. 1577 * 1578 * Any or all of the four parameters may be set when this function is 1579 * called. It should handle setting more than one parameter at once. 1580 */ 1581static int 1582sasetparams(struct cam_periph *periph, sa_params params_to_set, 1583 u_int32_t blocksize, u_int8_t density, u_int32_t comp_algorithm) 1584{ 1585 struct sa_softc *softc; 1586 u_int32_t current_blocksize; 1587 u_int32_t current_comp_algorithm; 1588 u_int8_t current_density; 1589 u_int8_t current_speed; 1590 int comp_enabled, comp_supported; 1591 void *mode_buffer; 1592 int mode_buffer_len; 1593 struct scsi_mode_header_6 *mode_hdr; 1594 struct scsi_mode_blk_desc *mode_blk; 1595 struct scsi_data_compression_page *comp_page; 1596 struct scsi_data_compression_page *current_comp_page; 1597 int buff_mode; 1598 union ccb *ccb; 1599 int error; 1600 1601 softc = (struct sa_softc *)periph->softc; 1602 1603 /* silence the compiler */ 1604 ccb = NULL; 1605 1606 current_comp_page = malloc(sizeof(*current_comp_page),M_TEMP, M_WAITOK); 1607 1608 /* 1609 * Since it doesn't make sense to set the number of blocks, or 1610 * write protection, we won't try to get the current value. We 1611 * always want to get the blocksize, so we can set it back to the 1612 * proper value. 1613 */ 1614 error = sagetparams(periph, params_to_set | SA_PARAM_BLOCKSIZE | 1615 SA_PARAM_SPEED, ¤t_blocksize, 1616 ¤t_density, NULL, &buff_mode, NULL, 1617 ¤t_speed, &comp_supported, &comp_enabled, 1618 ¤t_comp_algorithm, current_comp_page); 1619 1620 if (error != 0) { 1621 free(current_comp_page, M_TEMP); 1622 return(error); 1623 } 1624 1625 mode_buffer_len = sizeof(*mode_hdr) + sizeof(*mode_blk); 1626 if (params_to_set & SA_PARAM_COMPRESSION) 1627 mode_buffer_len += sizeof(struct scsi_data_compression_page); 1628 1629 mode_buffer = malloc(mode_buffer_len, M_TEMP, M_WAITOK); 1630 1631 bzero(mode_buffer, mode_buffer_len); 1632 1633 mode_hdr = (struct scsi_mode_header_6 *)mode_buffer; 1634 mode_blk = (struct scsi_mode_blk_desc *)&mode_hdr[1]; 1635 1636 if (params_to_set & SA_PARAM_COMPRESSION) { 1637 comp_page = (struct scsi_data_compression_page *)&mode_blk[1]; 1638 bcopy(current_comp_page, comp_page, sizeof(*comp_page)); 1639 } else 1640 comp_page = NULL; 1641 1642 /* 1643 * If the caller wants us to set the blocksize, use the one they 1644 * pass in. Otherwise, use the blocksize we got back from the 1645 * mode select above. 1646 */ 1647 if (params_to_set & SA_PARAM_BLOCKSIZE) 1648 scsi_ulto3b(blocksize, mode_blk->blklen); 1649 else 1650 scsi_ulto3b(current_blocksize, mode_blk->blklen); 1651 1652 /* 1653 * 0x7f means "same as before" 1654 */ 1655 if (params_to_set & SA_PARAM_DENSITY) 1656 mode_blk->density = density; 1657 else 1658 mode_blk->density = 0x7f; 1659 1660 /* 1661 * For mode selects, these two fields must be zero. 1662 */ 1663 mode_hdr->data_length = 0; 1664 mode_hdr->medium_type = 0; 1665 1666 /* set the speed to the current value */ 1667 mode_hdr->dev_spec = current_speed; 1668 1669 /* set single-initiator buffering mode */ 1670 mode_hdr->dev_spec |= SMH_SA_BUF_MODE_SIBUF; 1671 1672 mode_hdr->blk_desc_len = sizeof(struct scsi_mode_blk_desc); 1673 1674 /* 1675 * First, if the user wants us to set the compression algorithm or 1676 * just turn compression on, check to make sure that this drive 1677 * supports compression. 1678 */ 1679 if ((params_to_set & SA_PARAM_COMPRESSION) 1680 && (current_comp_page->dce_and_dcc & SA_DCP_DCC)) { 1681 1682 /* 1683 * If the compression algorithm is 0, disable compression. 1684 * If the compression algorithm is non-zero, enable 1685 * compression and set the compression type to the 1686 * specified compression algorithm, unless the algorithm is 1687 * MT_COMP_ENABLE. In that case, we look at the 1688 * compression algorithm that is currently set and if it is 1689 * non-zero, we leave it as-is. If it is zero, and we have 1690 * saved a compression algorithm from a time when 1691 * compression was enabled before, set the compression to 1692 * the saved value. 1693 */ 1694 if (comp_algorithm == 0) { 1695 /* disable compression */ 1696 comp_page->dce_and_dcc &= ~SA_DCP_DCE; 1697 } else { 1698 /* enable compression */ 1699 comp_page->dce_and_dcc |= SA_DCP_DCE; 1700 1701 /* enable decompression */ 1702 comp_page->dde_and_red |= SA_DCP_DDE; 1703 1704 if (comp_algorithm != MT_COMP_ENABLE) { 1705 /* set the compression algorithm */ 1706 scsi_ulto4b(comp_algorithm, 1707 comp_page->comp_algorithm); 1708 1709 } else if ((scsi_4btoul(comp_page->comp_algorithm) == 0) 1710 && (softc->saved_comp_algorithm != 0)) { 1711 scsi_ulto4b(softc->saved_comp_algorithm, 1712 comp_page->comp_algorithm); 1713 } 1714 } 1715 } else if (params_to_set & SA_PARAM_COMPRESSION) { 1716 /* 1717 * The drive doesn't support compression, so turn off the 1718 * set compression bit. 1719 */ 1720 params_to_set &= ~SA_PARAM_COMPRESSION; 1721 1722 /* 1723 * Should probably do something other than a printf...like 1724 * set a flag in the softc saying that this drive doesn't 1725 * support compression. 1726 */ 1727 xpt_print_path(periph->path); 1728 printf("sasetparams: device does not support compression\n"); 1729 1730 /* 1731 * If that was the only thing the user wanted us to set, 1732 * clean up allocated resources and return with 'operation 1733 * not supported'. 1734 */ 1735 if (params_to_set == SA_PARAM_NONE) { 1736 free(mode_buffer, M_TEMP); 1737 return(ENODEV); 1738 } 1739 1740 /* 1741 * That wasn't the only thing the user wanted us to set. 1742 * So, decrease the stated mode buffer length by the size 1743 * of the compression mode page. 1744 */ 1745 mode_buffer_len -= sizeof(*comp_page); 1746 } 1747 1748 ccb = cam_periph_getccb(periph, /*priority*/ 1); 1749 1750 scsi_mode_select(&ccb->csio, 1751 /*retries*/1, 1752 /*cbfcnp*/ sadone, 1753 /*tag_action*/ MSG_SIMPLE_Q_TAG, 1754 /*scsi_page_fmt*/(params_to_set & SA_PARAM_COMPRESSION)? 1755 TRUE : FALSE, 1756 /*save_pages*/ FALSE, 1757 /*param_buf*/ mode_buffer, 1758 /*param_len*/ mode_buffer_len, 1759 /*sense_len*/ SSD_FULL_SIZE, 1760 /*timeout*/ 5000); 1761 1762 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/ 0, 1763 /*sense_flags*/ 0, &softc->device_stats); 1764 1765 if (error == 0) { 1766 xpt_release_ccb(ccb); 1767 } else { 1768 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1769 cam_release_devq(ccb->ccb_h.path, 1770 /*relsim_flags*/0, 1771 /*reduction*/0, 1772 /*timeout*/0, 1773 /*getcount_only*/0); 1774 /* 1775 * If we were setting the blocksize, and that failed, we 1776 * want to set it to its original value. If we weren't 1777 * setting the blocksize, we don't want to change it. 1778 */ 1779 scsi_ulto3b(current_blocksize, mode_blk->blklen); 1780 1781 /* 1782 * 0x7f means "same as before". 1783 */ 1784 if (params_to_set & SA_PARAM_DENSITY) 1785 mode_blk->density = current_density; 1786 else 1787 mode_blk->density = 0x7f; 1788 1789 if (params_to_set & SA_PARAM_COMPRESSION) 1790 bcopy(current_comp_page, comp_page, 1791 sizeof(struct scsi_data_compression_page)); 1792 1793 /* 1794 * The retry count is the only CCB field that might have been 1795 * changed that we care about, so reset it back to 1. 1796 */ 1797 ccb->ccb_h.retry_count = 1; 1798 cam_periph_runccb(ccb, saerror, /*cam_flags*/ 0, 1799 /*sense_flags*/ 0, &softc->device_stats); 1800 1801 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1802 cam_release_devq(ccb->ccb_h.path, 1803 /*relsim_flags*/0, 1804 /*reduction*/0, 1805 /*timeout*/0, 1806 /*getcount_only*/0); 1807 1808 xpt_release_ccb(ccb); 1809 } 1810 1811 if (params_to_set & SA_PARAM_COMPRESSION) 1812 free(current_comp_page, M_TEMP); 1813 1814 free(mode_buffer, M_TEMP); 1815 return(error); 1816} 1817 1818static void 1819saprevent(struct cam_periph *periph, int action) 1820{ 1821 struct sa_softc *softc; 1822 union ccb *ccb; 1823 int error; 1824 1825 softc = (struct sa_softc *)periph->softc; 1826 1827 if (((action == PR_ALLOW) 1828 && (softc->flags & SA_FLAG_TAPE_LOCKED) == 0) 1829 || ((action == PR_PREVENT) 1830 && (softc->flags & SA_FLAG_TAPE_LOCKED) != 0)) { 1831 return; 1832 } 1833 1834 ccb = cam_periph_getccb(periph, /*priority*/1); 1835 1836 scsi_prevent(&ccb->csio, 1837 /*retries*/0, 1838 /*cbcfp*/sadone, 1839 MSG_SIMPLE_Q_TAG, 1840 action, 1841 SSD_FULL_SIZE, 1842 60000); 1843 1844 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 1845 /*sense_flags*/0, &softc->device_stats); 1846 1847 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1848 cam_release_devq(ccb->ccb_h.path, 1849 /*relsim_flags*/0, 1850 /*reduction*/0, 1851 /*timeout*/0, 1852 /*getcount_only*/0); 1853 1854 1855 if (error == 0) { 1856 if (action == PR_ALLOW) 1857 softc->flags &= ~SA_FLAG_TAPE_LOCKED; 1858 else 1859 softc->flags |= SA_FLAG_TAPE_LOCKED; 1860 } 1861 1862 xpt_release_ccb(ccb); 1863} 1864 1865static int 1866sarewind(struct cam_periph *periph) 1867{ 1868 union ccb *ccb; 1869 struct sa_softc *softc; 1870 int error; 1871 1872 softc = (struct sa_softc *)periph->softc; 1873 1874 ccb = cam_periph_getccb(periph, /*priority*/1); 1875 1876 scsi_rewind(&ccb->csio, 1877 /*retries*/1, 1878 /*cbcfp*/sadone, 1879 MSG_SIMPLE_Q_TAG, 1880 /*immediate*/FALSE, 1881 SSD_FULL_SIZE, 1882 (SA_REWIND_TIMEOUT) * 60 * 1000); 1883 1884 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 1885 /*sense_flags*/0, &softc->device_stats); 1886 1887 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1888 cam_release_devq(ccb->ccb_h.path, 1889 /*relsim_flags*/0, 1890 /*reduction*/0, 1891 /*timeout*/0, 1892 /*getcount_only*/0); 1893 1894 xpt_release_ccb(ccb); 1895 1896 return (error); 1897} 1898 1899static int 1900saspace(struct cam_periph *periph, int count, scsi_space_code code) 1901{ 1902 union ccb *ccb; 1903 struct sa_softc *softc; 1904 int error; 1905 1906 softc = (struct sa_softc *)periph->softc; 1907 1908 ccb = cam_periph_getccb(periph, /*priority*/1); 1909 1910 scsi_space(&ccb->csio, 1911 /*retries*/1, 1912 /*cbcfp*/sadone, 1913 MSG_SIMPLE_Q_TAG, 1914 code, count, 1915 SSD_FULL_SIZE, 1916 (SA_SPACE_TIMEOUT) * 60 * 1000); 1917 1918 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 1919 /*sense_flags*/0, &softc->device_stats); 1920 1921 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1922 cam_release_devq(ccb->ccb_h.path, 1923 /*relsim_flags*/0, 1924 /*reduction*/0, 1925 /*timeout*/0, 1926 /*getcount_only*/0); 1927 1928 xpt_release_ccb(ccb); 1929 1930 return (error); 1931} 1932 1933static int 1934sawritefilemarks(struct cam_periph *periph, int nmarks, int setmarks) 1935{ 1936 union ccb *ccb; 1937 struct sa_softc *softc; 1938 int error; 1939 1940 softc = (struct sa_softc *)periph->softc; 1941 1942 ccb = cam_periph_getccb(periph, /*priority*/1); 1943 1944 scsi_write_filemarks(&ccb->csio, 1945 /*retries*/1, 1946 /*cbcfp*/sadone, 1947 MSG_SIMPLE_Q_TAG, 1948 /*immediate*/FALSE, 1949 setmarks, 1950 nmarks, 1951 SSD_FULL_SIZE, 1952 60000); 1953 1954 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 1955 /*sense_flags*/0, &softc->device_stats); 1956 1957 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1958 cam_release_devq(ccb->ccb_h.path, 1959 /*relsim_flags*/0, 1960 /*reduction*/0, 1961 /*timeout*/0, 1962 /*getcount_only*/0); 1963 1964 if (error == 0) { 1965 struct sa_softc *softc; 1966 1967 softc = (struct sa_softc *)periph->softc; 1968 softc->filemarks += nmarks; 1969 } 1970 1971 xpt_release_ccb(ccb); 1972 1973 return (error); 1974} 1975 1976static int 1977saretension(struct cam_periph *periph) 1978{ 1979 union ccb *ccb; 1980 struct sa_softc *softc; 1981 int error; 1982 1983 softc = (struct sa_softc *)periph->softc; 1984 1985 ccb = cam_periph_getccb(periph, /*priority*/1); 1986 1987 scsi_load_unload(&ccb->csio, 1988 /*retries*/ 1, 1989 /*cbfcnp*/ sadone, 1990 MSG_SIMPLE_Q_TAG, 1991 /*immediate*/ FALSE, 1992 /*eot*/ FALSE, 1993 /*reten*/ TRUE, 1994 /*load*/ TRUE, 1995 SSD_FULL_SIZE, 1996 60000); 1997 1998 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 1999 /*sense_flags*/0, &softc->device_stats); 2000 2001 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 2002 cam_release_devq(ccb->ccb_h.path, 2003 /*relsim_flags*/0, 2004 /*reduction*/0, 2005 /*timeout*/0, 2006 /*getcount_only*/0); 2007 2008 xpt_release_ccb(ccb); 2009 2010 return(error); 2011} 2012 2013static int 2014sareservereleaseunit(struct cam_periph *periph, int reserve) 2015{ 2016 union ccb *ccb; 2017 struct sa_softc *softc; 2018 int error; 2019 2020 softc = (struct sa_softc *)periph->softc; 2021 2022 ccb = cam_periph_getccb(periph, /*priority*/ 1); 2023 2024 scsi_reserve_release_unit(&ccb->csio, 2025 /*retries*/ 1, 2026 /*cbfcnp*/ sadone, 2027 /*tag_action*/ MSG_SIMPLE_Q_TAG, 2028 /*third_party*/ FALSE, 2029 /*third_party_id*/ 0, 2030 /*sense_len*/ SSD_FULL_SIZE, 2031 /*timeout*/ 5000, 2032 reserve); 2033 2034 /* 2035 * We set SF_RETRY_UA, since this is often the first command run 2036 * when a tape device is opened, and there may be a unit attention 2037 * condition pending. 2038 */ 2039 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 2040 /*sense_flags*/SF_RETRY_UA, 2041 &softc->device_stats); 2042 2043 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 2044 cam_release_devq(ccb->ccb_h.path, 2045 /*relsim_flags*/0, 2046 /*reduction*/0, 2047 /*timeout*/0, 2048 /*getcount_only*/0); 2049 2050 xpt_release_ccb(ccb); 2051 2052 return (error); 2053} 2054 2055static int 2056saloadunload(struct cam_periph *periph, int load) 2057{ 2058 union ccb *ccb; 2059 struct sa_softc *softc; 2060 int error; 2061 2062 softc = (struct sa_softc *)periph->softc; 2063 2064 ccb = cam_periph_getccb(periph, /*priority*/1); 2065 2066 scsi_load_unload(&ccb->csio, 2067 /*retries*/1, 2068 /*cbfcnp*/sadone, 2069 MSG_SIMPLE_Q_TAG, 2070 /*immediate*/FALSE, 2071 /*eot*/FALSE, 2072 /*reten*/FALSE, 2073 load, 2074 SSD_FULL_SIZE, 2075 60000); 2076 2077 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 2078 /*sense_flags*/0, &softc->device_stats); 2079 2080 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 2081 cam_release_devq(ccb->ccb_h.path, 2082 /*relsim_flags*/0, 2083 /*reduction*/0, 2084 /*timeout*/0, 2085 /*getcount_only*/0); 2086 2087 xpt_release_ccb(ccb); 2088 2089 return (error); 2090} 2091 2092static int 2093saerase(struct cam_periph *periph, int longerase) 2094{ 2095 2096 union ccb *ccb; 2097 struct sa_softc *softc; 2098 int error; 2099 2100 softc = (struct sa_softc *)periph->softc; 2101 2102 ccb = cam_periph_getccb(periph, /*priority*/ 1); 2103 2104 scsi_erase(&ccb->csio, 2105 /*retries*/ 1, 2106 /*cbfcnp*/ sadone, 2107 /*tag_action*/ MSG_SIMPLE_Q_TAG, 2108 /*immediate*/ FALSE, 2109 /*long_erase*/ longerase, 2110 /*sense_len*/ SSD_FULL_SIZE, 2111 /*timeout*/ (SA_ERASE_TIMEOUT) * 60 * 1000); 2112 2113 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 2114 /*sense_flags*/0, &softc->device_stats); 2115 2116 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 2117 cam_release_devq(ccb->ccb_h.path, 2118 /*relsim_flags*/0, 2119 /*reduction*/0, 2120 /*timeout*/0, 2121 /*getcount_only*/0); 2122 2123 xpt_release_ccb(ccb); 2124 2125 return (error); 2126} 2127 2128#endif /* KERNEL */ 2129 2130/* 2131 * Read tape block limits command. 2132 */ 2133void 2134scsi_read_block_limits(struct ccb_scsiio *csio, u_int32_t retries, 2135 void (*cbfcnp)(struct cam_periph *, union ccb *), 2136 u_int8_t tag_action, 2137 struct scsi_read_block_limits_data *rlimit_buf, 2138 u_int8_t sense_len, u_int32_t timeout) 2139{ 2140 struct scsi_read_block_limits *scsi_cmd; 2141 2142 cam_fill_csio(csio, 2143 retries, 2144 cbfcnp, 2145 /*flags*/CAM_DIR_IN, 2146 tag_action, 2147 /*data_ptr*/(u_int8_t *)rlimit_buf, 2148 /*dxfer_len*/sizeof(*rlimit_buf), 2149 sense_len, 2150 sizeof(*scsi_cmd), 2151 timeout); 2152 2153 scsi_cmd = (struct scsi_read_block_limits *)&csio->cdb_io.cdb_bytes; 2154 bzero(scsi_cmd, sizeof(*scsi_cmd)); 2155 scsi_cmd->opcode = READ_BLOCK_LIMITS; 2156} 2157 2158void 2159scsi_sa_read_write(struct ccb_scsiio *csio, u_int32_t retries, 2160 void (*cbfcnp)(struct cam_periph *, union ccb *), 2161 u_int8_t tag_action, int readop, int sli, 2162 int fixed, u_int32_t length, u_int8_t *data_ptr, 2163 u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) 2164{ 2165 struct scsi_sa_rw *scsi_cmd; 2166 2167 scsi_cmd = (struct scsi_sa_rw *)&csio->cdb_io.cdb_bytes; 2168 scsi_cmd->opcode = readop ? SA_READ : SA_WRITE; 2169 scsi_cmd->sli_fixed = 0; 2170 if (sli && readop) 2171 scsi_cmd->sli_fixed |= SAR_SLI; 2172 if (fixed) 2173 scsi_cmd->sli_fixed |= SARW_FIXED; 2174 scsi_ulto3b(length, scsi_cmd->length); 2175 scsi_cmd->control = 0; 2176 2177 cam_fill_csio(csio, 2178 retries, 2179 cbfcnp, 2180 /*flags*/readop ? CAM_DIR_IN : CAM_DIR_OUT, 2181 tag_action, 2182 data_ptr, 2183 dxfer_len, 2184 sense_len, 2185 sizeof(*scsi_cmd), 2186 timeout); 2187} 2188 2189void 2190scsi_load_unload(struct ccb_scsiio *csio, u_int32_t retries, 2191 void (*cbfcnp)(struct cam_periph *, union ccb *), 2192 u_int8_t tag_action, int immediate, int eot, 2193 int reten, int load, u_int8_t sense_len, 2194 u_int32_t timeout) 2195{ 2196 struct scsi_load_unload *scsi_cmd; 2197 2198 scsi_cmd = (struct scsi_load_unload *)&csio->cdb_io.cdb_bytes; 2199 bzero(scsi_cmd, sizeof(*scsi_cmd)); 2200 scsi_cmd->opcode = LOAD_UNLOAD; 2201 if (immediate) 2202 scsi_cmd->immediate = SLU_IMMED; 2203 if (eot) 2204 scsi_cmd->eot_reten_load |= SLU_EOT; 2205 if (reten) 2206 scsi_cmd->eot_reten_load |= SLU_RETEN; 2207 if (load) 2208 scsi_cmd->eot_reten_load |= SLU_LOAD; 2209 2210 cam_fill_csio(csio, 2211 retries, 2212 cbfcnp, 2213 /*flags*/CAM_DIR_NONE, 2214 tag_action, 2215 /*data_ptr*/NULL, 2216 /*dxfer_len*/0, 2217 sense_len, 2218 sizeof(*scsi_cmd), 2219 timeout); 2220} 2221 2222void 2223scsi_rewind(struct ccb_scsiio *csio, u_int32_t retries, 2224 void (*cbfcnp)(struct cam_periph *, union ccb *), 2225 u_int8_t tag_action, int immediate, u_int8_t sense_len, 2226 u_int32_t timeout) 2227{ 2228 struct scsi_rewind *scsi_cmd; 2229 2230 scsi_cmd = (struct scsi_rewind *)&csio->cdb_io.cdb_bytes; 2231 bzero(scsi_cmd, sizeof(*scsi_cmd)); 2232 scsi_cmd->opcode = REWIND; 2233 if (immediate) 2234 scsi_cmd->immediate = SREW_IMMED; 2235 2236 cam_fill_csio(csio, 2237 retries, 2238 cbfcnp, 2239 /*flags*/CAM_DIR_NONE, 2240 tag_action, 2241 /*data_ptr*/NULL, 2242 /*dxfer_len*/0, 2243 sense_len, 2244 sizeof(*scsi_cmd), 2245 timeout); 2246} 2247 2248void 2249scsi_space(struct ccb_scsiio *csio, u_int32_t retries, 2250 void (*cbfcnp)(struct cam_periph *, union ccb *), 2251 u_int8_t tag_action, scsi_space_code code, 2252 u_int32_t count, u_int8_t sense_len, u_int32_t timeout) 2253{ 2254 struct scsi_space *scsi_cmd; 2255 2256 scsi_cmd = (struct scsi_space *)&csio->cdb_io.cdb_bytes; 2257 scsi_cmd->opcode = SPACE; 2258 scsi_cmd->code = code; 2259 scsi_ulto3b(count, scsi_cmd->count); 2260 scsi_cmd->control = 0; 2261 2262 cam_fill_csio(csio, 2263 retries, 2264 cbfcnp, 2265 /*flags*/CAM_DIR_NONE, 2266 tag_action, 2267 /*data_ptr*/NULL, 2268 /*dxfer_len*/0, 2269 sense_len, 2270 sizeof(*scsi_cmd), 2271 timeout); 2272} 2273 2274void 2275scsi_write_filemarks(struct ccb_scsiio *csio, u_int32_t retries, 2276 void (*cbfcnp)(struct cam_periph *, union ccb *), 2277 u_int8_t tag_action, int immediate, int setmark, 2278 u_int32_t num_marks, u_int8_t sense_len, 2279 u_int32_t timeout) 2280{ 2281 struct scsi_write_filemarks *scsi_cmd; 2282 2283 scsi_cmd = (struct scsi_write_filemarks *)&csio->cdb_io.cdb_bytes; 2284 bzero(scsi_cmd, sizeof(*scsi_cmd)); 2285 scsi_cmd->opcode = WRITE_FILEMARKS; 2286 if (immediate) 2287 scsi_cmd->byte2 |= SWFMRK_IMMED; 2288 if (setmark) 2289 scsi_cmd->byte2 |= SWFMRK_WSMK; 2290 2291 scsi_ulto3b(num_marks, scsi_cmd->num_marks); 2292 2293 cam_fill_csio(csio, 2294 retries, 2295 cbfcnp, 2296 /*flags*/CAM_DIR_NONE, 2297 tag_action, 2298 /*data_ptr*/NULL, 2299 /*dxfer_len*/0, 2300 sense_len, 2301 sizeof(*scsi_cmd), 2302 timeout); 2303} 2304 2305/* 2306 * The reserve and release unit commands differ only by their opcodes. 2307 */ 2308void 2309scsi_reserve_release_unit(struct ccb_scsiio *csio, u_int32_t retries, 2310 void (*cbfcnp)(struct cam_periph *, union ccb *), 2311 u_int8_t tag_action, int third_party, 2312 int third_party_id, u_int8_t sense_len, 2313 u_int32_t timeout, int reserve) 2314{ 2315 struct scsi_reserve_release_unit *scsi_cmd; 2316 2317 scsi_cmd = (struct scsi_reserve_release_unit *)&csio->cdb_io.cdb_bytes; 2318 bzero(scsi_cmd, sizeof(*scsi_cmd)); 2319 2320 if (reserve) 2321 scsi_cmd->opcode = RESERVE_UNIT; 2322 else 2323 scsi_cmd->opcode = RELEASE_UNIT; 2324 2325 if (third_party) { 2326 scsi_cmd->lun_thirdparty |= SRRU_3RD_PARTY; 2327 scsi_cmd->lun_thirdparty |= 2328 ((third_party_id << SRRU_3RD_SHAMT) & SRRU_3RD_MASK); 2329 } 2330 2331 cam_fill_csio(csio, 2332 retries, 2333 cbfcnp, 2334 /*flags*/ CAM_DIR_NONE, 2335 tag_action, 2336 /*data_ptr*/ NULL, 2337 /*dxfer_len*/ 0, 2338 sense_len, 2339 sizeof(*scsi_cmd), 2340 timeout); 2341} 2342 2343void 2344scsi_erase(struct ccb_scsiio *csio, u_int32_t retries, 2345 void (*cbfcnp)(struct cam_periph *, union ccb *), 2346 u_int8_t tag_action, int immediate, int long_erase, 2347 u_int8_t sense_len, u_int32_t timeout) 2348{ 2349 struct scsi_erase *scsi_cmd; 2350 2351 scsi_cmd = (struct scsi_erase *)&csio->cdb_io.cdb_bytes; 2352 bzero(scsi_cmd, sizeof(*scsi_cmd)); 2353 2354 scsi_cmd->opcode = ERASE; 2355 2356 if (immediate) 2357 scsi_cmd->lun_imm_long |= SE_IMMED; 2358 2359 if (long_erase) 2360 scsi_cmd->lun_imm_long |= SE_LONG; 2361 2362 cam_fill_csio(csio, 2363 retries, 2364 cbfcnp, 2365 /*flags*/ CAM_DIR_NONE, 2366 tag_action, 2367 /*data_ptr*/ NULL, 2368 /*dxfer_len*/ 0, 2369 sense_len, 2370 sizeof(*scsi_cmd), 2371 timeout); 2372} 2373