1/* 2 * drivers/s390/char/tape_core.c 3 * basic function of the tape device driver 4 * 5 * S390 and zSeries version 6 * Copyright IBM Corp. 2001, 2009 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 10 * Martin Schwidefsky <schwidefsky@de.ibm.com> 11 * Stefan Bader <shbader@de.ibm.com> 12 */ 13 14#define KMSG_COMPONENT "tape" 15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 16 17#include <linux/module.h> 18#include <linux/init.h> // for kernel parameters 19#include <linux/kmod.h> // for requesting modules 20#include <linux/spinlock.h> // for locks 21#include <linux/vmalloc.h> 22#include <linux/list.h> 23#include <linux/slab.h> 24 25#include <asm/types.h> // for variable types 26 27#define TAPE_DBF_AREA tape_core_dbf 28 29#include "tape.h" 30#include "tape_std.h" 31 32#define LONG_BUSY_TIMEOUT 180 /* seconds */ 33 34static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); 35static void tape_delayed_next_request(struct work_struct *); 36static void tape_long_busy_timeout(unsigned long data); 37 38/* 39 * One list to contain all tape devices of all disciplines, so 40 * we can assign the devices to minor numbers of the same major 41 * The list is protected by the rwlock 42 */ 43static LIST_HEAD(tape_device_list); 44static DEFINE_RWLOCK(tape_device_lock); 45 46/* 47 * Pointer to debug area. 48 */ 49debug_info_t *TAPE_DBF_AREA = NULL; 50EXPORT_SYMBOL(TAPE_DBF_AREA); 51 52/* 53 * Printable strings for tape enumerations. 54 */ 55const char *tape_state_verbose[TS_SIZE] = 56{ 57 [TS_UNUSED] = "UNUSED", 58 [TS_IN_USE] = "IN_USE", 59 [TS_BLKUSE] = "BLKUSE", 60 [TS_INIT] = "INIT ", 61 [TS_NOT_OPER] = "NOT_OP" 62}; 63 64const char *tape_op_verbose[TO_SIZE] = 65{ 66 [TO_BLOCK] = "BLK", [TO_BSB] = "BSB", 67 [TO_BSF] = "BSF", [TO_DSE] = "DSE", 68 [TO_FSB] = "FSB", [TO_FSF] = "FSF", 69 [TO_LBL] = "LBL", [TO_NOP] = "NOP", 70 [TO_RBA] = "RBA", [TO_RBI] = "RBI", 71 [TO_RFO] = "RFO", [TO_REW] = "REW", 72 [TO_RUN] = "RUN", [TO_WRI] = "WRI", 73 [TO_WTM] = "WTM", [TO_MSEN] = "MSN", 74 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", 75 [TO_READ_ATTMSG] = "RAT", 76 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", 77 [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON", 78 [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS", 79 [TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC", 80}; 81 82static int devid_to_int(struct ccw_dev_id *dev_id) 83{ 84 return dev_id->devno + (dev_id->ssid << 16); 85} 86 87static ssize_t 88tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf) 89{ 90 struct tape_device *tdev; 91 92 tdev = dev_get_drvdata(dev); 93 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state); 94} 95 96static 97DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL); 98 99static ssize_t 100tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf) 101{ 102 struct tape_device *tdev; 103 104 tdev = dev_get_drvdata(dev); 105 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor); 106} 107 108static 109DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL); 110 111static ssize_t 112tape_state_show(struct device *dev, struct device_attribute *attr, char *buf) 113{ 114 struct tape_device *tdev; 115 116 tdev = dev_get_drvdata(dev); 117 return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ? 118 "OFFLINE" : tape_state_verbose[tdev->tape_state]); 119} 120 121static 122DEVICE_ATTR(state, 0444, tape_state_show, NULL); 123 124static ssize_t 125tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf) 126{ 127 struct tape_device *tdev; 128 ssize_t rc; 129 130 tdev = dev_get_drvdata(dev); 131 if (tdev->first_minor < 0) 132 return scnprintf(buf, PAGE_SIZE, "N/A\n"); 133 134 spin_lock_irq(get_ccwdev_lock(tdev->cdev)); 135 if (list_empty(&tdev->req_queue)) 136 rc = scnprintf(buf, PAGE_SIZE, "---\n"); 137 else { 138 struct tape_request *req; 139 140 req = list_entry(tdev->req_queue.next, struct tape_request, 141 list); 142 rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]); 143 } 144 spin_unlock_irq(get_ccwdev_lock(tdev->cdev)); 145 return rc; 146} 147 148static 149DEVICE_ATTR(operation, 0444, tape_operation_show, NULL); 150 151static ssize_t 152tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf) 153{ 154 struct tape_device *tdev; 155 156 tdev = dev_get_drvdata(dev); 157 158 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size); 159} 160 161static 162DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL); 163 164static struct attribute *tape_attrs[] = { 165 &dev_attr_medium_state.attr, 166 &dev_attr_first_minor.attr, 167 &dev_attr_state.attr, 168 &dev_attr_operation.attr, 169 &dev_attr_blocksize.attr, 170 NULL 171}; 172 173static struct attribute_group tape_attr_group = { 174 .attrs = tape_attrs, 175}; 176 177/* 178 * Tape state functions 179 */ 180void 181tape_state_set(struct tape_device *device, enum tape_state newstate) 182{ 183 const char *str; 184 185 if (device->tape_state == TS_NOT_OPER) { 186 DBF_EVENT(3, "ts_set err: not oper\n"); 187 return; 188 } 189 DBF_EVENT(4, "ts. dev: %x\n", device->first_minor); 190 DBF_EVENT(4, "old ts:\t\n"); 191 if (device->tape_state < TS_SIZE && device->tape_state >=0 ) 192 str = tape_state_verbose[device->tape_state]; 193 else 194 str = "UNKNOWN TS"; 195 DBF_EVENT(4, "%s\n", str); 196 DBF_EVENT(4, "new ts:\t\n"); 197 if (newstate < TS_SIZE && newstate >= 0) 198 str = tape_state_verbose[newstate]; 199 else 200 str = "UNKNOWN TS"; 201 DBF_EVENT(4, "%s\n", str); 202 device->tape_state = newstate; 203 wake_up(&device->state_change_wq); 204} 205 206void 207tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) 208{ 209 if (device->medium_state == newstate) 210 return; 211 switch(newstate){ 212 case MS_UNLOADED: 213 device->tape_generic_status |= GMT_DR_OPEN(~0); 214 if (device->medium_state == MS_LOADED) 215 pr_info("%s: The tape cartridge has been successfully " 216 "unloaded\n", dev_name(&device->cdev->dev)); 217 break; 218 case MS_LOADED: 219 device->tape_generic_status &= ~GMT_DR_OPEN(~0); 220 if (device->medium_state == MS_UNLOADED) 221 pr_info("%s: A tape cartridge has been mounted\n", 222 dev_name(&device->cdev->dev)); 223 break; 224 default: 225 // print nothing 226 break; 227 } 228 device->medium_state = newstate; 229 wake_up(&device->state_change_wq); 230} 231 232/* 233 * Stop running ccw. Has to be called with the device lock held. 234 */ 235static int 236__tape_cancel_io(struct tape_device *device, struct tape_request *request) 237{ 238 int retries; 239 int rc; 240 241 /* Check if interrupt has already been processed */ 242 if (request->callback == NULL) 243 return 0; 244 245 rc = 0; 246 for (retries = 0; retries < 5; retries++) { 247 rc = ccw_device_clear(device->cdev, (long) request); 248 249 switch (rc) { 250 case 0: 251 request->status = TAPE_REQUEST_DONE; 252 return 0; 253 case -EBUSY: 254 request->status = TAPE_REQUEST_CANCEL; 255 schedule_delayed_work(&device->tape_dnr, 0); 256 return 0; 257 case -ENODEV: 258 DBF_EXCEPTION(2, "device gone, retry\n"); 259 break; 260 case -EIO: 261 DBF_EXCEPTION(2, "I/O error, retry\n"); 262 break; 263 default: 264 BUG(); 265 } 266 } 267 268 return rc; 269} 270 271/* 272 * Add device into the sorted list, giving it the first 273 * available minor number. 274 */ 275static int 276tape_assign_minor(struct tape_device *device) 277{ 278 struct tape_device *tmp; 279 int minor; 280 281 minor = 0; 282 write_lock(&tape_device_lock); 283 list_for_each_entry(tmp, &tape_device_list, node) { 284 if (minor < tmp->first_minor) 285 break; 286 minor += TAPE_MINORS_PER_DEV; 287 } 288 if (minor >= 256) { 289 write_unlock(&tape_device_lock); 290 return -ENODEV; 291 } 292 device->first_minor = minor; 293 list_add_tail(&device->node, &tmp->node); 294 write_unlock(&tape_device_lock); 295 return 0; 296} 297 298/* remove device from the list */ 299static void 300tape_remove_minor(struct tape_device *device) 301{ 302 write_lock(&tape_device_lock); 303 list_del_init(&device->node); 304 device->first_minor = -1; 305 write_unlock(&tape_device_lock); 306} 307 308/* 309 * Set a device online. 310 * 311 * This function is called by the common I/O layer to move a device from the 312 * detected but offline into the online state. 313 * If we return an error (RC < 0) the device remains in the offline state. This 314 * can happen if the device is assigned somewhere else, for example. 315 */ 316int 317tape_generic_online(struct tape_device *device, 318 struct tape_discipline *discipline) 319{ 320 int rc; 321 322 DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline); 323 324 if (device->tape_state != TS_INIT) { 325 DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state); 326 return -EINVAL; 327 } 328 329 init_timer(&device->lb_timeout); 330 device->lb_timeout.function = tape_long_busy_timeout; 331 332 /* Let the discipline have a go at the device. */ 333 device->discipline = discipline; 334 if (!try_module_get(discipline->owner)) { 335 return -EINVAL; 336 } 337 338 rc = discipline->setup_device(device); 339 if (rc) 340 goto out; 341 rc = tape_assign_minor(device); 342 if (rc) 343 goto out_discipline; 344 345 rc = tapechar_setup_device(device); 346 if (rc) 347 goto out_minor; 348 rc = tapeblock_setup_device(device); 349 if (rc) 350 goto out_char; 351 352 tape_state_set(device, TS_UNUSED); 353 354 DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id); 355 356 return 0; 357 358out_char: 359 tapechar_cleanup_device(device); 360out_minor: 361 tape_remove_minor(device); 362out_discipline: 363 device->discipline->cleanup_device(device); 364 device->discipline = NULL; 365out: 366 module_put(discipline->owner); 367 return rc; 368} 369 370static void 371tape_cleanup_device(struct tape_device *device) 372{ 373 tapeblock_cleanup_device(device); 374 tapechar_cleanup_device(device); 375 device->discipline->cleanup_device(device); 376 module_put(device->discipline->owner); 377 tape_remove_minor(device); 378 tape_med_state_set(device, MS_UNKNOWN); 379} 380 381/* 382 * Suspend device. 383 * 384 * Called by the common I/O layer if the drive should be suspended on user 385 * request. We refuse to suspend if the device is loaded or in use for the 386 * following reason: 387 * While the Linux guest is suspended, it might be logged off which causes 388 * devices to be detached. Tape devices are automatically rewound and unloaded 389 * during DETACH processing (unless the tape device was attached with the 390 * NOASSIGN or MULTIUSER option). After rewind/unload, there is no way to 391 * resume the original state of the tape device, since we would need to 392 * manually re-load the cartridge which was active at suspend time. 393 */ 394int tape_generic_pm_suspend(struct ccw_device *cdev) 395{ 396 struct tape_device *device; 397 398 device = dev_get_drvdata(&cdev->dev); 399 if (!device) { 400 return -ENODEV; 401 } 402 403 DBF_LH(3, "(%08x): tape_generic_pm_suspend(%p)\n", 404 device->cdev_id, device); 405 406 if (device->medium_state != MS_UNLOADED) { 407 pr_err("A cartridge is loaded in tape device %s, " 408 "refusing to suspend\n", dev_name(&cdev->dev)); 409 return -EBUSY; 410 } 411 412 spin_lock_irq(get_ccwdev_lock(device->cdev)); 413 switch (device->tape_state) { 414 case TS_INIT: 415 case TS_NOT_OPER: 416 case TS_UNUSED: 417 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 418 break; 419 default: 420 pr_err("Tape device %s is busy, refusing to " 421 "suspend\n", dev_name(&cdev->dev)); 422 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 423 return -EBUSY; 424 } 425 426 DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id); 427 return 0; 428} 429 430/* 431 * Set device offline. 432 * 433 * Called by the common I/O layer if the drive should set offline on user 434 * request. We may prevent this by returning an error. 435 * Manual offline is only allowed while the drive is not in use. 436 */ 437int 438tape_generic_offline(struct ccw_device *cdev) 439{ 440 struct tape_device *device; 441 442 device = dev_get_drvdata(&cdev->dev); 443 if (!device) { 444 return -ENODEV; 445 } 446 447 DBF_LH(3, "(%08x): tape_generic_offline(%p)\n", 448 device->cdev_id, device); 449 450 spin_lock_irq(get_ccwdev_lock(device->cdev)); 451 switch (device->tape_state) { 452 case TS_INIT: 453 case TS_NOT_OPER: 454 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 455 break; 456 case TS_UNUSED: 457 tape_state_set(device, TS_INIT); 458 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 459 tape_cleanup_device(device); 460 break; 461 default: 462 DBF_EVENT(3, "(%08x): Set offline failed " 463 "- drive in use.\n", 464 device->cdev_id); 465 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 466 return -EBUSY; 467 } 468 469 DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id); 470 return 0; 471} 472 473/* 474 * Allocate memory for a new device structure. 475 */ 476static struct tape_device * 477tape_alloc_device(void) 478{ 479 struct tape_device *device; 480 481 device = kzalloc(sizeof(struct tape_device), GFP_KERNEL); 482 if (device == NULL) { 483 DBF_EXCEPTION(2, "ti:no mem\n"); 484 return ERR_PTR(-ENOMEM); 485 } 486 device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA); 487 if (device->modeset_byte == NULL) { 488 DBF_EXCEPTION(2, "ti:no mem\n"); 489 kfree(device); 490 return ERR_PTR(-ENOMEM); 491 } 492 mutex_init(&device->mutex); 493 INIT_LIST_HEAD(&device->req_queue); 494 INIT_LIST_HEAD(&device->node); 495 init_waitqueue_head(&device->state_change_wq); 496 init_waitqueue_head(&device->wait_queue); 497 device->tape_state = TS_INIT; 498 device->medium_state = MS_UNKNOWN; 499 *device->modeset_byte = 0; 500 device->first_minor = -1; 501 atomic_set(&device->ref_count, 1); 502 INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request); 503 504 return device; 505} 506 507/* 508 * Get a reference to an existing device structure. This will automatically 509 * increment the reference count. 510 */ 511struct tape_device * 512tape_get_device(struct tape_device *device) 513{ 514 int count; 515 516 count = atomic_inc_return(&device->ref_count); 517 DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count); 518 return device; 519} 520 521/* 522 * Decrease the reference counter of a devices structure. If the 523 * reference counter reaches zero free the device structure. 524 * The function returns a NULL pointer to be used by the caller 525 * for clearing reference pointers. 526 */ 527void 528tape_put_device(struct tape_device *device) 529{ 530 int count; 531 532 count = atomic_dec_return(&device->ref_count); 533 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count); 534 BUG_ON(count < 0); 535 if (count == 0) { 536 kfree(device->modeset_byte); 537 kfree(device); 538 } 539} 540 541/* 542 * Find tape device by a device index. 543 */ 544struct tape_device * 545tape_find_device(int devindex) 546{ 547 struct tape_device *device, *tmp; 548 549 device = ERR_PTR(-ENODEV); 550 read_lock(&tape_device_lock); 551 list_for_each_entry(tmp, &tape_device_list, node) { 552 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) { 553 device = tape_get_device(tmp); 554 break; 555 } 556 } 557 read_unlock(&tape_device_lock); 558 return device; 559} 560 561/* 562 * Driverfs tape probe function. 563 */ 564int 565tape_generic_probe(struct ccw_device *cdev) 566{ 567 struct tape_device *device; 568 int ret; 569 struct ccw_dev_id dev_id; 570 571 device = tape_alloc_device(); 572 if (IS_ERR(device)) 573 return -ENODEV; 574 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP | 575 CCWDEV_DO_MULTIPATH); 576 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); 577 if (ret) { 578 tape_put_device(device); 579 return ret; 580 } 581 dev_set_drvdata(&cdev->dev, device); 582 cdev->handler = __tape_do_irq; 583 device->cdev = cdev; 584 ccw_device_get_id(cdev, &dev_id); 585 device->cdev_id = devid_to_int(&dev_id); 586 return ret; 587} 588 589static void 590__tape_discard_requests(struct tape_device *device) 591{ 592 struct tape_request * request; 593 struct list_head * l, *n; 594 595 list_for_each_safe(l, n, &device->req_queue) { 596 request = list_entry(l, struct tape_request, list); 597 if (request->status == TAPE_REQUEST_IN_IO) 598 request->status = TAPE_REQUEST_DONE; 599 list_del(&request->list); 600 601 /* Decrease ref_count for removed request. */ 602 request->device = NULL; 603 tape_put_device(device); 604 request->rc = -EIO; 605 if (request->callback != NULL) 606 request->callback(request, request->callback_data); 607 } 608} 609 610/* 611 * Driverfs tape remove function. 612 * 613 * This function is called whenever the common I/O layer detects the device 614 * gone. This can happen at any time and we cannot refuse. 615 */ 616void 617tape_generic_remove(struct ccw_device *cdev) 618{ 619 struct tape_device * device; 620 621 device = dev_get_drvdata(&cdev->dev); 622 if (!device) { 623 return; 624 } 625 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev); 626 627 spin_lock_irq(get_ccwdev_lock(device->cdev)); 628 switch (device->tape_state) { 629 case TS_INIT: 630 tape_state_set(device, TS_NOT_OPER); 631 case TS_NOT_OPER: 632 /* 633 * Nothing to do. 634 */ 635 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 636 break; 637 case TS_UNUSED: 638 /* 639 * Need only to release the device. 640 */ 641 tape_state_set(device, TS_NOT_OPER); 642 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 643 tape_cleanup_device(device); 644 break; 645 default: 646 /* 647 * There may be requests on the queue. We will not get 648 * an interrupt for a request that was running. So we 649 * just post them all as I/O errors. 650 */ 651 DBF_EVENT(3, "(%08x): Drive in use vanished!\n", 652 device->cdev_id); 653 pr_warning("%s: A tape unit was detached while in " 654 "use\n", dev_name(&device->cdev->dev)); 655 tape_state_set(device, TS_NOT_OPER); 656 __tape_discard_requests(device); 657 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 658 tape_cleanup_device(device); 659 } 660 661 device = dev_get_drvdata(&cdev->dev); 662 if (device) { 663 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group); 664 dev_set_drvdata(&cdev->dev, NULL); 665 tape_put_device(device); 666 } 667} 668 669/* 670 * Allocate a new tape ccw request 671 */ 672struct tape_request * 673tape_alloc_request(int cplength, int datasize) 674{ 675 struct tape_request *request; 676 677 BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 678 679 DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize); 680 681 request = kzalloc(sizeof(struct tape_request), GFP_KERNEL); 682 if (request == NULL) { 683 DBF_EXCEPTION(1, "cqra nomem\n"); 684 return ERR_PTR(-ENOMEM); 685 } 686 /* allocate channel program */ 687 if (cplength > 0) { 688 request->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 689 GFP_ATOMIC | GFP_DMA); 690 if (request->cpaddr == NULL) { 691 DBF_EXCEPTION(1, "cqra nomem\n"); 692 kfree(request); 693 return ERR_PTR(-ENOMEM); 694 } 695 } 696 /* alloc small kernel buffer */ 697 if (datasize > 0) { 698 request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA); 699 if (request->cpdata == NULL) { 700 DBF_EXCEPTION(1, "cqra nomem\n"); 701 kfree(request->cpaddr); 702 kfree(request); 703 return ERR_PTR(-ENOMEM); 704 } 705 } 706 DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr, 707 request->cpdata); 708 709 return request; 710} 711 712/* 713 * Free tape ccw request 714 */ 715void 716tape_free_request (struct tape_request * request) 717{ 718 DBF_LH(6, "Free request %p\n", request); 719 720 if (request->device) 721 tape_put_device(request->device); 722 kfree(request->cpdata); 723 kfree(request->cpaddr); 724 kfree(request); 725} 726 727static int 728__tape_start_io(struct tape_device *device, struct tape_request *request) 729{ 730 int rc; 731 732#ifdef CONFIG_S390_TAPE_BLOCK 733 if (request->op == TO_BLOCK) 734 device->discipline->check_locate(device, request); 735#endif 736 rc = ccw_device_start( 737 device->cdev, 738 request->cpaddr, 739 (unsigned long) request, 740 0x00, 741 request->options 742 ); 743 if (rc == 0) { 744 request->status = TAPE_REQUEST_IN_IO; 745 } else if (rc == -EBUSY) { 746 /* The common I/O subsystem is currently busy. Retry later. */ 747 request->status = TAPE_REQUEST_QUEUED; 748 schedule_delayed_work(&device->tape_dnr, 0); 749 rc = 0; 750 } else { 751 /* Start failed. Remove request and indicate failure. */ 752 DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc); 753 } 754 return rc; 755} 756 757static void 758__tape_start_next_request(struct tape_device *device) 759{ 760 struct list_head *l, *n; 761 struct tape_request *request; 762 int rc; 763 764 DBF_LH(6, "__tape_start_next_request(%p)\n", device); 765 /* 766 * Try to start each request on request queue until one is 767 * started successful. 768 */ 769 list_for_each_safe(l, n, &device->req_queue) { 770 request = list_entry(l, struct tape_request, list); 771 772 /* 773 * Avoid race condition if bottom-half was triggered more than 774 * once. 775 */ 776 if (request->status == TAPE_REQUEST_IN_IO) 777 return; 778 /* 779 * Request has already been stopped. We have to wait until 780 * the request is removed from the queue in the interrupt 781 * handling. 782 */ 783 if (request->status == TAPE_REQUEST_DONE) 784 return; 785 786 /* 787 * We wanted to cancel the request but the common I/O layer 788 * was busy at that time. This can only happen if this 789 * function is called by delayed_next_request. 790 * Otherwise we start the next request on the queue. 791 */ 792 if (request->status == TAPE_REQUEST_CANCEL) { 793 rc = __tape_cancel_io(device, request); 794 } else { 795 rc = __tape_start_io(device, request); 796 } 797 if (rc == 0) 798 return; 799 800 /* Set ending status. */ 801 request->rc = rc; 802 request->status = TAPE_REQUEST_DONE; 803 804 /* Remove from request queue. */ 805 list_del(&request->list); 806 807 /* Do callback. */ 808 if (request->callback != NULL) 809 request->callback(request, request->callback_data); 810 } 811} 812 813static void 814tape_delayed_next_request(struct work_struct *work) 815{ 816 struct tape_device *device = 817 container_of(work, struct tape_device, tape_dnr.work); 818 819 DBF_LH(6, "tape_delayed_next_request(%p)\n", device); 820 spin_lock_irq(get_ccwdev_lock(device->cdev)); 821 __tape_start_next_request(device); 822 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 823} 824 825static void tape_long_busy_timeout(unsigned long data) 826{ 827 struct tape_request *request; 828 struct tape_device *device; 829 830 device = (struct tape_device *) data; 831 spin_lock_irq(get_ccwdev_lock(device->cdev)); 832 request = list_entry(device->req_queue.next, struct tape_request, list); 833 BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY); 834 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id); 835 __tape_start_next_request(device); 836 device->lb_timeout.data = 0UL; 837 tape_put_device(device); 838 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 839} 840 841static void 842__tape_end_request( 843 struct tape_device * device, 844 struct tape_request * request, 845 int rc) 846{ 847 DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc); 848 if (request) { 849 request->rc = rc; 850 request->status = TAPE_REQUEST_DONE; 851 852 /* Remove from request queue. */ 853 list_del(&request->list); 854 855 /* Do callback. */ 856 if (request->callback != NULL) 857 request->callback(request, request->callback_data); 858 } 859 860 /* Start next request. */ 861 if (!list_empty(&device->req_queue)) 862 __tape_start_next_request(device); 863} 864 865/* 866 * Write sense data to dbf 867 */ 868void 869tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, 870 struct irb *irb) 871{ 872 unsigned int *sptr; 873 const char* op; 874 875 if (request != NULL) 876 op = tape_op_verbose[request->op]; 877 else 878 op = "---"; 879 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", 880 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat); 881 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); 882 sptr = (unsigned int *) irb->ecw; 883 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); 884 DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]); 885 DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]); 886 DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]); 887} 888 889/* 890 * I/O helper function. Adds the request to the request queue 891 * and starts it if the tape is idle. Has to be called with 892 * the device lock held. 893 */ 894static int 895__tape_start_request(struct tape_device *device, struct tape_request *request) 896{ 897 int rc; 898 899 switch (request->op) { 900 case TO_MSEN: 901 case TO_ASSIGN: 902 case TO_UNASSIGN: 903 case TO_READ_ATTMSG: 904 case TO_RDC: 905 if (device->tape_state == TS_INIT) 906 break; 907 if (device->tape_state == TS_UNUSED) 908 break; 909 default: 910 if (device->tape_state == TS_BLKUSE) 911 break; 912 if (device->tape_state != TS_IN_USE) 913 return -ENODEV; 914 } 915 916 /* Increase use count of device for the added request. */ 917 request->device = tape_get_device(device); 918 919 if (list_empty(&device->req_queue)) { 920 /* No other requests are on the queue. Start this one. */ 921 rc = __tape_start_io(device, request); 922 if (rc) 923 return rc; 924 925 DBF_LH(5, "Request %p added for execution.\n", request); 926 list_add(&request->list, &device->req_queue); 927 } else { 928 DBF_LH(5, "Request %p add to queue.\n", request); 929 request->status = TAPE_REQUEST_QUEUED; 930 list_add_tail(&request->list, &device->req_queue); 931 } 932 return 0; 933} 934 935/* 936 * Add the request to the request queue, try to start it if the 937 * tape is idle. Return without waiting for end of i/o. 938 */ 939int 940tape_do_io_async(struct tape_device *device, struct tape_request *request) 941{ 942 int rc; 943 944 DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request); 945 946 spin_lock_irq(get_ccwdev_lock(device->cdev)); 947 /* Add request to request queue and try to start it. */ 948 rc = __tape_start_request(device, request); 949 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 950 return rc; 951} 952 953/* 954 * tape_do_io/__tape_wake_up 955 * Add the request to the request queue, try to start it if the 956 * tape is idle and wait uninterruptible for its completion. 957 */ 958static void 959__tape_wake_up(struct tape_request *request, void *data) 960{ 961 request->callback = NULL; 962 wake_up((wait_queue_head_t *) data); 963} 964 965int 966tape_do_io(struct tape_device *device, struct tape_request *request) 967{ 968 int rc; 969 970 spin_lock_irq(get_ccwdev_lock(device->cdev)); 971 /* Setup callback */ 972 request->callback = __tape_wake_up; 973 request->callback_data = &device->wait_queue; 974 /* Add request to request queue and try to start it. */ 975 rc = __tape_start_request(device, request); 976 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 977 if (rc) 978 return rc; 979 /* Request added to the queue. Wait for its completion. */ 980 wait_event(device->wait_queue, (request->callback == NULL)); 981 /* Get rc from request */ 982 return request->rc; 983} 984 985/* 986 * tape_do_io_interruptible/__tape_wake_up_interruptible 987 * Add the request to the request queue, try to start it if the 988 * tape is idle and wait uninterruptible for its completion. 989 */ 990static void 991__tape_wake_up_interruptible(struct tape_request *request, void *data) 992{ 993 request->callback = NULL; 994 wake_up_interruptible((wait_queue_head_t *) data); 995} 996 997int 998tape_do_io_interruptible(struct tape_device *device, 999 struct tape_request *request) 1000{ 1001 int rc; 1002 1003 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1004 /* Setup callback */ 1005 request->callback = __tape_wake_up_interruptible; 1006 request->callback_data = &device->wait_queue; 1007 rc = __tape_start_request(device, request); 1008 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1009 if (rc) 1010 return rc; 1011 /* Request added to the queue. Wait for its completion. */ 1012 rc = wait_event_interruptible(device->wait_queue, 1013 (request->callback == NULL)); 1014 if (rc != -ERESTARTSYS) 1015 /* Request finished normally. */ 1016 return request->rc; 1017 1018 /* Interrupted by a signal. We have to stop the current request. */ 1019 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1020 rc = __tape_cancel_io(device, request); 1021 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1022 if (rc == 0) { 1023 /* Wait for the interrupt that acknowledges the halt. */ 1024 do { 1025 rc = wait_event_interruptible( 1026 device->wait_queue, 1027 (request->callback == NULL) 1028 ); 1029 } while (rc == -ERESTARTSYS); 1030 1031 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); 1032 rc = -ERESTARTSYS; 1033 } 1034 return rc; 1035} 1036 1037/* 1038 * Stop running ccw. 1039 */ 1040int 1041tape_cancel_io(struct tape_device *device, struct tape_request *request) 1042{ 1043 int rc; 1044 1045 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1046 rc = __tape_cancel_io(device, request); 1047 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1048 return rc; 1049} 1050 1051/* 1052 * Tape interrupt routine, called from the ccw_device layer 1053 */ 1054static void 1055__tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) 1056{ 1057 struct tape_device *device; 1058 struct tape_request *request; 1059 int rc; 1060 1061 device = dev_get_drvdata(&cdev->dev); 1062 if (device == NULL) { 1063 return; 1064 } 1065 request = (struct tape_request *) intparm; 1066 1067 DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request); 1068 1069 /* On special conditions irb is an error pointer */ 1070 if (IS_ERR(irb)) { 1071 switch (PTR_ERR(irb)) { 1072 case -ETIMEDOUT: 1073 DBF_LH(1, "(%s): Request timed out\n", 1074 dev_name(&cdev->dev)); 1075 case -EIO: 1076 __tape_end_request(device, request, -EIO); 1077 break; 1078 default: 1079 DBF_LH(1, "(%s): Unexpected i/o error %li\n", 1080 dev_name(&cdev->dev), 1081 PTR_ERR(irb)); 1082 } 1083 return; 1084 } 1085 1086 /* 1087 * If the condition code is not zero and the start function bit is 1088 * still set, this is an deferred error and the last start I/O did 1089 * not succeed. At this point the condition that caused the deferred 1090 * error might still apply. So we just schedule the request to be 1091 * started later. 1092 */ 1093 if (irb->scsw.cmd.cc != 0 && 1094 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && 1095 (request->status == TAPE_REQUEST_IN_IO)) { 1096 DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", 1097 device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl); 1098 request->status = TAPE_REQUEST_QUEUED; 1099 schedule_delayed_work(&device->tape_dnr, HZ); 1100 return; 1101 } 1102 1103 /* May be an unsolicited irq */ 1104 if(request != NULL) 1105 request->rescnt = irb->scsw.cmd.count; 1106 else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) && 1107 !list_empty(&device->req_queue)) { 1108 /* Not Ready to Ready after long busy ? */ 1109 struct tape_request *req; 1110 req = list_entry(device->req_queue.next, 1111 struct tape_request, list); 1112 if (req->status == TAPE_REQUEST_LONG_BUSY) { 1113 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); 1114 if (del_timer(&device->lb_timeout)) { 1115 device->lb_timeout.data = 0UL; 1116 tape_put_device(device); 1117 __tape_start_next_request(device); 1118 } 1119 return; 1120 } 1121 } 1122 if (irb->scsw.cmd.dstat != 0x0c) { 1123 /* Set the 'ONLINE' flag depending on sense byte 1 */ 1124 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) 1125 device->tape_generic_status |= GMT_ONLINE(~0); 1126 else 1127 device->tape_generic_status &= ~GMT_ONLINE(~0); 1128 1129 /* 1130 * Any request that does not come back with channel end 1131 * and device end is unusual. Log the sense data. 1132 */ 1133 DBF_EVENT(3,"-- Tape Interrupthandler --\n"); 1134 tape_dump_sense_dbf(device, request, irb); 1135 } else { 1136 /* Upon normal completion the device _is_ online */ 1137 device->tape_generic_status |= GMT_ONLINE(~0); 1138 } 1139 if (device->tape_state == TS_NOT_OPER) { 1140 DBF_EVENT(6, "tape:device is not operational\n"); 1141 return; 1142 } 1143 1144 /* 1145 * Request that were canceled still come back with an interrupt. 1146 * To detect these request the state will be set to TAPE_REQUEST_DONE. 1147 */ 1148 if(request != NULL && request->status == TAPE_REQUEST_DONE) { 1149 __tape_end_request(device, request, -EIO); 1150 return; 1151 } 1152 1153 rc = device->discipline->irq(device, request, irb); 1154 /* 1155 * rc < 0 : request finished unsuccessfully. 1156 * rc == TAPE_IO_SUCCESS: request finished successfully. 1157 * rc == TAPE_IO_PENDING: request is still running. Ignore rc. 1158 * rc == TAPE_IO_RETRY: request finished but needs another go. 1159 * rc == TAPE_IO_STOP: request needs to get terminated. 1160 */ 1161 switch (rc) { 1162 case TAPE_IO_SUCCESS: 1163 /* Upon normal completion the device _is_ online */ 1164 device->tape_generic_status |= GMT_ONLINE(~0); 1165 __tape_end_request(device, request, rc); 1166 break; 1167 case TAPE_IO_PENDING: 1168 break; 1169 case TAPE_IO_LONG_BUSY: 1170 device->lb_timeout.data = 1171 (unsigned long) tape_get_device(device); 1172 device->lb_timeout.expires = jiffies + 1173 LONG_BUSY_TIMEOUT * HZ; 1174 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id); 1175 add_timer(&device->lb_timeout); 1176 request->status = TAPE_REQUEST_LONG_BUSY; 1177 break; 1178 case TAPE_IO_RETRY: 1179 rc = __tape_start_io(device, request); 1180 if (rc) 1181 __tape_end_request(device, request, rc); 1182 break; 1183 case TAPE_IO_STOP: 1184 rc = __tape_cancel_io(device, request); 1185 if (rc) 1186 __tape_end_request(device, request, rc); 1187 break; 1188 default: 1189 if (rc > 0) { 1190 DBF_EVENT(6, "xunknownrc\n"); 1191 __tape_end_request(device, request, -EIO); 1192 } else { 1193 __tape_end_request(device, request, rc); 1194 } 1195 break; 1196 } 1197} 1198 1199/* 1200 * Tape device open function used by tape_char & tape_block frontends. 1201 */ 1202int 1203tape_open(struct tape_device *device) 1204{ 1205 int rc; 1206 1207 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1208 if (device->tape_state == TS_NOT_OPER) { 1209 DBF_EVENT(6, "TAPE:nodev\n"); 1210 rc = -ENODEV; 1211 } else if (device->tape_state == TS_IN_USE) { 1212 DBF_EVENT(6, "TAPE:dbusy\n"); 1213 rc = -EBUSY; 1214 } else if (device->tape_state == TS_BLKUSE) { 1215 DBF_EVENT(6, "TAPE:dbusy\n"); 1216 rc = -EBUSY; 1217 } else if (device->discipline != NULL && 1218 !try_module_get(device->discipline->owner)) { 1219 DBF_EVENT(6, "TAPE:nodisc\n"); 1220 rc = -ENODEV; 1221 } else { 1222 tape_state_set(device, TS_IN_USE); 1223 rc = 0; 1224 } 1225 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1226 return rc; 1227} 1228 1229/* 1230 * Tape device release function used by tape_char & tape_block frontends. 1231 */ 1232int 1233tape_release(struct tape_device *device) 1234{ 1235 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1236 if (device->tape_state == TS_IN_USE) 1237 tape_state_set(device, TS_UNUSED); 1238 module_put(device->discipline->owner); 1239 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1240 return 0; 1241} 1242 1243/* 1244 * Execute a magnetic tape command a number of times. 1245 */ 1246int 1247tape_mtop(struct tape_device *device, int mt_op, int mt_count) 1248{ 1249 tape_mtop_fn fn; 1250 int rc; 1251 1252 DBF_EVENT(6, "TAPE:mtio\n"); 1253 DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op); 1254 DBF_EVENT(6, "TAPE:arg: %x\n", mt_count); 1255 1256 if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS) 1257 return -EINVAL; 1258 fn = device->discipline->mtop_array[mt_op]; 1259 if (fn == NULL) 1260 return -EINVAL; 1261 1262 /* We assume that the backends can handle count up to 500. */ 1263 if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF || 1264 mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) { 1265 rc = 0; 1266 for (; mt_count > 500; mt_count -= 500) 1267 if ((rc = fn(device, 500)) != 0) 1268 break; 1269 if (rc == 0) 1270 rc = fn(device, mt_count); 1271 } else 1272 rc = fn(device, mt_count); 1273 return rc; 1274 1275} 1276 1277/* 1278 * Tape init function. 1279 */ 1280static int 1281tape_init (void) 1282{ 1283 TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long)); 1284 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view); 1285#ifdef DBF_LIKE_HELL 1286 debug_set_level(TAPE_DBF_AREA, 6); 1287#endif 1288 DBF_EVENT(3, "tape init\n"); 1289 tape_proc_init(); 1290 tapechar_init (); 1291 tapeblock_init (); 1292 return 0; 1293} 1294 1295/* 1296 * Tape exit function. 1297 */ 1298static void 1299tape_exit(void) 1300{ 1301 DBF_EVENT(6, "tape exit\n"); 1302 1303 /* Get rid of the frontends */ 1304 tapechar_exit(); 1305 tapeblock_exit(); 1306 tape_proc_cleanup(); 1307 debug_unregister (TAPE_DBF_AREA); 1308} 1309 1310MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and " 1311 "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)"); 1312MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver"); 1313MODULE_LICENSE("GPL"); 1314 1315module_init(tape_init); 1316module_exit(tape_exit); 1317 1318EXPORT_SYMBOL(tape_generic_remove); 1319EXPORT_SYMBOL(tape_generic_probe); 1320EXPORT_SYMBOL(tape_generic_online); 1321EXPORT_SYMBOL(tape_generic_offline); 1322EXPORT_SYMBOL(tape_generic_pm_suspend); 1323EXPORT_SYMBOL(tape_put_device); 1324EXPORT_SYMBOL(tape_get_device); 1325EXPORT_SYMBOL(tape_state_verbose); 1326EXPORT_SYMBOL(tape_op_verbose); 1327EXPORT_SYMBOL(tape_state_set); 1328EXPORT_SYMBOL(tape_med_state_set); 1329EXPORT_SYMBOL(tape_alloc_request); 1330EXPORT_SYMBOL(tape_free_request); 1331EXPORT_SYMBOL(tape_dump_sense_dbf); 1332EXPORT_SYMBOL(tape_do_io); 1333EXPORT_SYMBOL(tape_do_io_async); 1334EXPORT_SYMBOL(tape_do_io_interruptible); 1335EXPORT_SYMBOL(tape_cancel_io); 1336EXPORT_SYMBOL(tape_mtop); 1337