1/* 2 * File...........: linux/drivers/s390/block/dasd.c 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 9 * 10 */ 11 12#include <linux/kmod.h> 13#include <linux/init.h> 14#include <linux/interrupt.h> 15#include <linux/ctype.h> 16#include <linux/major.h> 17#include <linux/slab.h> 18#include <linux/buffer_head.h> 19#include <linux/hdreg.h> 20 21#include <asm/ccwdev.h> 22#include <asm/ebcdic.h> 23#include <asm/idals.h> 24#include <asm/todclk.h> 25 26/* This is ugly... */ 27#define PRINTK_HEADER "dasd:" 28 29#include "dasd_int.h" 30/* 31 * SECTION: Constant definitions to be used within this file 32 */ 33#define DASD_CHANQ_MAX_SIZE 4 34 35/* 36 * SECTION: exported variables of dasd.c 37 */ 38debug_info_t *dasd_debug_area; 39struct dasd_discipline *dasd_diag_discipline_pointer; 40void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 41 42MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 43MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 44 " Copyright 2000 IBM Corporation"); 45MODULE_SUPPORTED_DEVICE("dasd"); 46MODULE_LICENSE("GPL"); 47 48/* 49 * SECTION: prototypes for static functions of dasd.c 50 */ 51static int dasd_alloc_queue(struct dasd_device * device); 52static void dasd_setup_queue(struct dasd_device * device); 53static void dasd_free_queue(struct dasd_device * device); 54static void dasd_flush_request_queue(struct dasd_device *); 55static int dasd_flush_ccw_queue(struct dasd_device *, int); 56static void dasd_tasklet(struct dasd_device *); 57static void do_kick_device(struct work_struct *); 58 59/* 60 * SECTION: Operations on the device structure. 61 */ 62static wait_queue_head_t dasd_init_waitq; 63static wait_queue_head_t dasd_flush_wq; 64 65/* 66 * Allocate memory for a new device structure. 67 */ 68struct dasd_device * 69dasd_alloc_device(void) 70{ 71 struct dasd_device *device; 72 73 device = kzalloc(sizeof (struct dasd_device), GFP_ATOMIC); 74 if (device == NULL) 75 return ERR_PTR(-ENOMEM); 76 /* open_count = 0 means device online but not in use */ 77 atomic_set(&device->open_count, -1); 78 79 /* Get two pages for normal block device operations. */ 80 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 81 if (device->ccw_mem == NULL) { 82 kfree(device); 83 return ERR_PTR(-ENOMEM); 84 } 85 /* Get one page for error recovery. */ 86 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 87 if (device->erp_mem == NULL) { 88 free_pages((unsigned long) device->ccw_mem, 1); 89 kfree(device); 90 return ERR_PTR(-ENOMEM); 91 } 92 93 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 94 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 95 spin_lock_init(&device->mem_lock); 96 spin_lock_init(&device->request_queue_lock); 97 atomic_set (&device->tasklet_scheduled, 0); 98 tasklet_init(&device->tasklet, 99 (void (*)(unsigned long)) dasd_tasklet, 100 (unsigned long) device); 101 INIT_LIST_HEAD(&device->ccw_queue); 102 init_timer(&device->timer); 103 INIT_WORK(&device->kick_work, do_kick_device); 104 device->state = DASD_STATE_NEW; 105 device->target = DASD_STATE_NEW; 106 107 return device; 108} 109 110/* 111 * Free memory of a device structure. 112 */ 113void 114dasd_free_device(struct dasd_device *device) 115{ 116 kfree(device->private); 117 free_page((unsigned long) device->erp_mem); 118 free_pages((unsigned long) device->ccw_mem, 1); 119 kfree(device); 120} 121 122/* 123 * Make a new device known to the system. 124 */ 125static int 126dasd_state_new_to_known(struct dasd_device *device) 127{ 128 int rc; 129 130 /* 131 * As long as the device is not in state DASD_STATE_NEW we want to 132 * keep the reference count > 0. 133 */ 134 dasd_get_device(device); 135 136 rc = dasd_alloc_queue(device); 137 if (rc) { 138 dasd_put_device(device); 139 return rc; 140 } 141 142 device->state = DASD_STATE_KNOWN; 143 return 0; 144} 145 146/* 147 * Let the system forget about a device. 148 */ 149static int 150dasd_state_known_to_new(struct dasd_device * device) 151{ 152 /* Disable extended error reporting for this device. */ 153 dasd_eer_disable(device); 154 /* Forget the discipline information. */ 155 if (device->discipline) 156 module_put(device->discipline->owner); 157 device->discipline = NULL; 158 if (device->base_discipline) 159 module_put(device->base_discipline->owner); 160 device->base_discipline = NULL; 161 device->state = DASD_STATE_NEW; 162 163 dasd_free_queue(device); 164 165 /* Give up reference we took in dasd_state_new_to_known. */ 166 dasd_put_device(device); 167 return 0; 168} 169 170/* 171 * Request the irq line for the device. 172 */ 173static int 174dasd_state_known_to_basic(struct dasd_device * device) 175{ 176 int rc; 177 178 /* Allocate and register gendisk structure. */ 179 rc = dasd_gendisk_alloc(device); 180 if (rc) 181 return rc; 182 183 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 184 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2, 185 8 * sizeof (long)); 186 debug_register_view(device->debug_area, &debug_sprintf_view); 187 debug_set_level(device->debug_area, DBF_WARNING); 188 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 189 190 device->state = DASD_STATE_BASIC; 191 return 0; 192} 193 194/* 195 * Release the irq line for the device. Terminate any running i/o. 196 */ 197static int 198dasd_state_basic_to_known(struct dasd_device * device) 199{ 200 int rc; 201 202 dasd_gendisk_free(device); 203 rc = dasd_flush_ccw_queue(device, 1); 204 if (rc) 205 return rc; 206 dasd_clear_timer(device); 207 208 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 209 if (device->debug_area != NULL) { 210 debug_unregister(device->debug_area); 211 device->debug_area = NULL; 212 } 213 device->state = DASD_STATE_KNOWN; 214 return 0; 215} 216 217/* 218 * Do the initial analysis. The do_analysis function may return 219 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 220 * until the discipline decides to continue the startup sequence 221 * by calling the function dasd_change_state. The eckd disciplines 222 * uses this to start a ccw that detects the format. The completion 223 * interrupt for this detection ccw uses the kernel event daemon to 224 * trigger the call to dasd_change_state. All this is done in the 225 * discipline code, see dasd_eckd.c. 226 * After the analysis ccw is done (do_analysis returned 0) the block 227 * device is setup. 228 * In case the analysis returns an error, the device setup is stopped 229 * (a fake disk was already added to allow formatting). 230 */ 231static int 232dasd_state_basic_to_ready(struct dasd_device * device) 233{ 234 int rc; 235 236 rc = 0; 237 if (device->discipline->do_analysis != NULL) 238 rc = device->discipline->do_analysis(device); 239 if (rc) { 240 if (rc != -EAGAIN) 241 device->state = DASD_STATE_UNFMT; 242 return rc; 243 } 244 /* make disk known with correct capacity */ 245 dasd_setup_queue(device); 246 set_capacity(device->gdp, device->blocks << device->s2b_shift); 247 device->state = DASD_STATE_READY; 248 rc = dasd_scan_partitions(device); 249 if (rc) 250 device->state = DASD_STATE_BASIC; 251 return rc; 252} 253 254/* 255 * Remove device from block device layer. Destroy dirty buffers. 256 * Forget format information. Check if the target level is basic 257 * and if it is create fake disk for formatting. 258 */ 259static int 260dasd_state_ready_to_basic(struct dasd_device * device) 261{ 262 int rc; 263 264 rc = dasd_flush_ccw_queue(device, 0); 265 if (rc) 266 return rc; 267 dasd_destroy_partitions(device); 268 dasd_flush_request_queue(device); 269 device->blocks = 0; 270 device->bp_block = 0; 271 device->s2b_shift = 0; 272 device->state = DASD_STATE_BASIC; 273 return 0; 274} 275 276/* 277 * Back to basic. 278 */ 279static int 280dasd_state_unfmt_to_basic(struct dasd_device * device) 281{ 282 device->state = DASD_STATE_BASIC; 283 return 0; 284} 285 286/* 287 * Make the device online and schedule the bottom half to start 288 * the requeueing of requests from the linux request queue to the 289 * ccw queue. 290 */ 291static int 292dasd_state_ready_to_online(struct dasd_device * device) 293{ 294 device->state = DASD_STATE_ONLINE; 295 dasd_schedule_bh(device); 296 return 0; 297} 298 299/* 300 * Stop the requeueing of requests again. 301 */ 302static int 303dasd_state_online_to_ready(struct dasd_device * device) 304{ 305 device->state = DASD_STATE_READY; 306 return 0; 307} 308 309/* 310 * Device startup state changes. 311 */ 312static int 313dasd_increase_state(struct dasd_device *device) 314{ 315 int rc; 316 317 rc = 0; 318 if (device->state == DASD_STATE_NEW && 319 device->target >= DASD_STATE_KNOWN) 320 rc = dasd_state_new_to_known(device); 321 322 if (!rc && 323 device->state == DASD_STATE_KNOWN && 324 device->target >= DASD_STATE_BASIC) 325 rc = dasd_state_known_to_basic(device); 326 327 if (!rc && 328 device->state == DASD_STATE_BASIC && 329 device->target >= DASD_STATE_READY) 330 rc = dasd_state_basic_to_ready(device); 331 332 if (!rc && 333 device->state == DASD_STATE_UNFMT && 334 device->target > DASD_STATE_UNFMT) 335 rc = -EPERM; 336 337 if (!rc && 338 device->state == DASD_STATE_READY && 339 device->target >= DASD_STATE_ONLINE) 340 rc = dasd_state_ready_to_online(device); 341 342 return rc; 343} 344 345/* 346 * Device shutdown state changes. 347 */ 348static int 349dasd_decrease_state(struct dasd_device *device) 350{ 351 int rc; 352 353 rc = 0; 354 if (device->state == DASD_STATE_ONLINE && 355 device->target <= DASD_STATE_READY) 356 rc = dasd_state_online_to_ready(device); 357 358 if (!rc && 359 device->state == DASD_STATE_READY && 360 device->target <= DASD_STATE_BASIC) 361 rc = dasd_state_ready_to_basic(device); 362 363 if (!rc && 364 device->state == DASD_STATE_UNFMT && 365 device->target <= DASD_STATE_BASIC) 366 rc = dasd_state_unfmt_to_basic(device); 367 368 if (!rc && 369 device->state == DASD_STATE_BASIC && 370 device->target <= DASD_STATE_KNOWN) 371 rc = dasd_state_basic_to_known(device); 372 373 if (!rc && 374 device->state == DASD_STATE_KNOWN && 375 device->target <= DASD_STATE_NEW) 376 rc = dasd_state_known_to_new(device); 377 378 return rc; 379} 380 381/* 382 * This is the main startup/shutdown routine. 383 */ 384static void 385dasd_change_state(struct dasd_device *device) 386{ 387 int rc; 388 389 if (device->state == device->target) 390 /* Already where we want to go today... */ 391 return; 392 if (device->state < device->target) 393 rc = dasd_increase_state(device); 394 else 395 rc = dasd_decrease_state(device); 396 if (rc && rc != -EAGAIN) 397 device->target = device->state; 398 399 if (device->state == device->target) 400 wake_up(&dasd_init_waitq); 401 402 /* let user-space know that the device status changed */ 403 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 404} 405 406/* 407 * Kick starter for devices that did not complete the startup/shutdown 408 * procedure or were sleeping because of a pending state. 409 * dasd_kick_device will schedule a call do do_kick_device to the kernel 410 * event daemon. 411 */ 412static void 413do_kick_device(struct work_struct *work) 414{ 415 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 416 dasd_change_state(device); 417 dasd_schedule_bh(device); 418 dasd_put_device(device); 419} 420 421void 422dasd_kick_device(struct dasd_device *device) 423{ 424 dasd_get_device(device); 425 /* queue call to dasd_kick_device to the kernel event daemon. */ 426 schedule_work(&device->kick_work); 427} 428 429/* 430 * Set the target state for a device and starts the state change. 431 */ 432void 433dasd_set_target_state(struct dasd_device *device, int target) 434{ 435 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 436 if (dasd_probeonly && target > DASD_STATE_READY) 437 target = DASD_STATE_READY; 438 if (device->target != target) { 439 if (device->state == target) 440 wake_up(&dasd_init_waitq); 441 device->target = target; 442 } 443 if (device->state != device->target) 444 dasd_change_state(device); 445} 446 447/* 448 * Enable devices with device numbers in [from..to]. 449 */ 450static inline int 451_wait_for_device(struct dasd_device *device) 452{ 453 return (device->state == device->target); 454} 455 456void 457dasd_enable_device(struct dasd_device *device) 458{ 459 dasd_set_target_state(device, DASD_STATE_ONLINE); 460 if (device->state <= DASD_STATE_KNOWN) 461 /* No discipline for device found. */ 462 dasd_set_target_state(device, DASD_STATE_NEW); 463 /* Now wait for the devices to come up. */ 464 wait_event(dasd_init_waitq, _wait_for_device(device)); 465} 466 467/* 468 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 469 */ 470#ifdef CONFIG_DASD_PROFILE 471 472struct dasd_profile_info_t dasd_global_profile; 473unsigned int dasd_profile_level = DASD_PROFILE_OFF; 474 475/* 476 * Increments counter in global and local profiling structures. 477 */ 478#define dasd_profile_counter(value, counter, device) \ 479{ \ 480 int index; \ 481 for (index = 0; index < 31 && value >> (2+index); index++); \ 482 dasd_global_profile.counter[index]++; \ 483 device->profile.counter[index]++; \ 484} 485 486/* 487 * Add profiling information for cqr before execution. 488 */ 489static void 490dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, 491 struct request *req) 492{ 493 struct list_head *l; 494 unsigned int counter; 495 496 if (dasd_profile_level != DASD_PROFILE_ON) 497 return; 498 499 /* count the length of the chanq for statistics */ 500 counter = 0; 501 list_for_each(l, &device->ccw_queue) 502 if (++counter >= 31) 503 break; 504 dasd_global_profile.dasd_io_nr_req[counter]++; 505 device->profile.dasd_io_nr_req[counter]++; 506} 507 508/* 509 * Add profiling information for cqr after execution. 510 */ 511static void 512dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, 513 struct request *req) 514{ 515 long strtime, irqtime, endtime, tottime; /* in microseconds */ 516 long tottimeps, sectors; 517 518 if (dasd_profile_level != DASD_PROFILE_ON) 519 return; 520 521 sectors = req->nr_sectors; 522 if (!cqr->buildclk || !cqr->startclk || 523 !cqr->stopclk || !cqr->endclk || 524 !sectors) 525 return; 526 527 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 528 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 529 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 530 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 531 tottimeps = tottime / sectors; 532 533 if (!dasd_global_profile.dasd_io_reqs) 534 memset(&dasd_global_profile, 0, 535 sizeof (struct dasd_profile_info_t)); 536 dasd_global_profile.dasd_io_reqs++; 537 dasd_global_profile.dasd_io_sects += sectors; 538 539 if (!device->profile.dasd_io_reqs) 540 memset(&device->profile, 0, 541 sizeof (struct dasd_profile_info_t)); 542 device->profile.dasd_io_reqs++; 543 device->profile.dasd_io_sects += sectors; 544 545 dasd_profile_counter(sectors, dasd_io_secs, device); 546 dasd_profile_counter(tottime, dasd_io_times, device); 547 dasd_profile_counter(tottimeps, dasd_io_timps, device); 548 dasd_profile_counter(strtime, dasd_io_time1, device); 549 dasd_profile_counter(irqtime, dasd_io_time2, device); 550 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device); 551 dasd_profile_counter(endtime, dasd_io_time3, device); 552} 553#else 554#define dasd_profile_start(device, cqr, req) do {} while (0) 555#define dasd_profile_end(device, cqr, req) do {} while (0) 556#endif /* CONFIG_DASD_PROFILE */ 557 558/* 559 * Allocate memory for a channel program with 'cplength' channel 560 * command words and 'datasize' additional space. There are two 561 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 562 * memory and 2) dasd_smalloc_request uses the static ccw memory 563 * that gets allocated for each device. 564 */ 565struct dasd_ccw_req * 566dasd_kmalloc_request(char *magic, int cplength, int datasize, 567 struct dasd_device * device) 568{ 569 struct dasd_ccw_req *cqr; 570 571 /* Sanity checks */ 572 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 573 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 574 575 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 576 if (cqr == NULL) 577 return ERR_PTR(-ENOMEM); 578 cqr->cpaddr = NULL; 579 if (cplength > 0) { 580 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 581 GFP_ATOMIC | GFP_DMA); 582 if (cqr->cpaddr == NULL) { 583 kfree(cqr); 584 return ERR_PTR(-ENOMEM); 585 } 586 } 587 cqr->data = NULL; 588 if (datasize > 0) { 589 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 590 if (cqr->data == NULL) { 591 kfree(cqr->cpaddr); 592 kfree(cqr); 593 return ERR_PTR(-ENOMEM); 594 } 595 } 596 strncpy((char *) &cqr->magic, magic, 4); 597 ASCEBC((char *) &cqr->magic, 4); 598 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 599 dasd_get_device(device); 600 return cqr; 601} 602 603struct dasd_ccw_req * 604dasd_smalloc_request(char *magic, int cplength, int datasize, 605 struct dasd_device * device) 606{ 607 unsigned long flags; 608 struct dasd_ccw_req *cqr; 609 char *data; 610 int size; 611 612 /* Sanity checks */ 613 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 614 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 615 616 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 617 if (cplength > 0) 618 size += cplength * sizeof(struct ccw1); 619 if (datasize > 0) 620 size += datasize; 621 spin_lock_irqsave(&device->mem_lock, flags); 622 cqr = (struct dasd_ccw_req *) 623 dasd_alloc_chunk(&device->ccw_chunks, size); 624 spin_unlock_irqrestore(&device->mem_lock, flags); 625 if (cqr == NULL) 626 return ERR_PTR(-ENOMEM); 627 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 628 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 629 cqr->cpaddr = NULL; 630 if (cplength > 0) { 631 cqr->cpaddr = (struct ccw1 *) data; 632 data += cplength*sizeof(struct ccw1); 633 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 634 } 635 cqr->data = NULL; 636 if (datasize > 0) { 637 cqr->data = data; 638 memset(cqr->data, 0, datasize); 639 } 640 strncpy((char *) &cqr->magic, magic, 4); 641 ASCEBC((char *) &cqr->magic, 4); 642 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 643 dasd_get_device(device); 644 return cqr; 645} 646 647/* 648 * Free memory of a channel program. This function needs to free all the 649 * idal lists that might have been created by dasd_set_cda and the 650 * struct dasd_ccw_req itself. 651 */ 652void 653dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) 654{ 655#ifdef CONFIG_64BIT 656 struct ccw1 *ccw; 657 658 /* Clear any idals used for the request. */ 659 ccw = cqr->cpaddr; 660 do { 661 clear_normalized_cda(ccw); 662 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 663#endif 664 kfree(cqr->cpaddr); 665 kfree(cqr->data); 666 kfree(cqr); 667 dasd_put_device(device); 668} 669 670void 671dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) 672{ 673 unsigned long flags; 674 675 spin_lock_irqsave(&device->mem_lock, flags); 676 dasd_free_chunk(&device->ccw_chunks, cqr); 677 spin_unlock_irqrestore(&device->mem_lock, flags); 678 dasd_put_device(device); 679} 680 681/* 682 * Check discipline magic in cqr. 683 */ 684static inline int 685dasd_check_cqr(struct dasd_ccw_req *cqr) 686{ 687 struct dasd_device *device; 688 689 if (cqr == NULL) 690 return -EINVAL; 691 device = cqr->device; 692 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 693 DEV_MESSAGE(KERN_WARNING, device, 694 " dasd_ccw_req 0x%08x magic doesn't match" 695 " discipline 0x%08x", 696 cqr->magic, 697 *(unsigned int *) device->discipline->name); 698 return -EINVAL; 699 } 700 return 0; 701} 702 703/* 704 * Terminate the current i/o and set the request to clear_pending. 705 * Timer keeps device runnig. 706 * ccw_device_clear can fail if the i/o subsystem 707 * is in a bad mood. 708 */ 709int 710dasd_term_IO(struct dasd_ccw_req * cqr) 711{ 712 struct dasd_device *device; 713 int retries, rc; 714 715 /* Check the cqr */ 716 rc = dasd_check_cqr(cqr); 717 if (rc) 718 return rc; 719 retries = 0; 720 device = (struct dasd_device *) cqr->device; 721 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 722 rc = ccw_device_clear(device->cdev, (long) cqr); 723 switch (rc) { 724 case 0: /* termination successful */ 725 cqr->retries--; 726 cqr->status = DASD_CQR_CLEAR; 727 cqr->stopclk = get_clock(); 728 cqr->starttime = 0; 729 DBF_DEV_EVENT(DBF_DEBUG, device, 730 "terminate cqr %p successful", 731 cqr); 732 break; 733 case -ENODEV: 734 DBF_DEV_EVENT(DBF_ERR, device, "%s", 735 "device gone, retry"); 736 break; 737 case -EIO: 738 DBF_DEV_EVENT(DBF_ERR, device, "%s", 739 "I/O error, retry"); 740 break; 741 case -EINVAL: 742 case -EBUSY: 743 DBF_DEV_EVENT(DBF_ERR, device, "%s", 744 "device busy, retry later"); 745 break; 746 default: 747 DEV_MESSAGE(KERN_ERR, device, 748 "line %d unknown RC=%d, please " 749 "report to linux390@de.ibm.com", 750 __LINE__, rc); 751 BUG(); 752 break; 753 } 754 retries++; 755 } 756 dasd_schedule_bh(device); 757 return rc; 758} 759 760/* 761 * Start the i/o. This start_IO can fail if the channel is really busy. 762 * In that case set up a timer to start the request later. 763 */ 764int 765dasd_start_IO(struct dasd_ccw_req * cqr) 766{ 767 struct dasd_device *device; 768 int rc; 769 770 /* Check the cqr */ 771 rc = dasd_check_cqr(cqr); 772 if (rc) 773 return rc; 774 device = (struct dasd_device *) cqr->device; 775 if (cqr->retries < 0) { 776 DEV_MESSAGE(KERN_DEBUG, device, 777 "start_IO: request %p (%02x/%i) - no retry left.", 778 cqr, cqr->status, cqr->retries); 779 cqr->status = DASD_CQR_FAILED; 780 return -EIO; 781 } 782 cqr->startclk = get_clock(); 783 cqr->starttime = jiffies; 784 cqr->retries--; 785 rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr, 786 cqr->lpm, 0); 787 switch (rc) { 788 case 0: 789 cqr->status = DASD_CQR_IN_IO; 790 DBF_DEV_EVENT(DBF_DEBUG, device, 791 "start_IO: request %p started successful", 792 cqr); 793 break; 794 case -EBUSY: 795 DBF_DEV_EVENT(DBF_ERR, device, "%s", 796 "start_IO: device busy, retry later"); 797 break; 798 case -ETIMEDOUT: 799 DBF_DEV_EVENT(DBF_ERR, device, "%s", 800 "start_IO: request timeout, retry later"); 801 break; 802 case -EACCES: 803 /* -EACCES indicates that the request used only a 804 * subset of the available pathes and all these 805 * pathes are gone. 806 * Do a retry with all available pathes. 807 */ 808 cqr->lpm = LPM_ANYPATH; 809 DBF_DEV_EVENT(DBF_ERR, device, "%s", 810 "start_IO: selected pathes gone," 811 " retry on all pathes"); 812 break; 813 case -ENODEV: 814 case -EIO: 815 DBF_DEV_EVENT(DBF_ERR, device, "%s", 816 "start_IO: device gone, retry"); 817 break; 818 default: 819 DEV_MESSAGE(KERN_ERR, device, 820 "line %d unknown RC=%d, please report" 821 " to linux390@de.ibm.com", __LINE__, rc); 822 BUG(); 823 break; 824 } 825 return rc; 826} 827 828/* 829 * Timeout function for dasd devices. This is used for different purposes 830 * 1) missing interrupt handler for normal operation 831 * 2) delayed start of request where start_IO failed with -EBUSY 832 * 3) timeout for missing state change interrupts 833 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 834 * DASD_CQR_QUEUED for 2) and 3). 835 */ 836static void 837dasd_timeout_device(unsigned long ptr) 838{ 839 unsigned long flags; 840 struct dasd_device *device; 841 842 device = (struct dasd_device *) ptr; 843 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 844 /* re-activate request queue */ 845 device->stopped &= ~DASD_STOPPED_PENDING; 846 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 847 dasd_schedule_bh(device); 848} 849 850/* 851 * Setup timeout for a device in jiffies. 852 */ 853void 854dasd_set_timer(struct dasd_device *device, int expires) 855{ 856 if (expires == 0) { 857 if (timer_pending(&device->timer)) 858 del_timer(&device->timer); 859 return; 860 } 861 if (timer_pending(&device->timer)) { 862 if (mod_timer(&device->timer, jiffies + expires)) 863 return; 864 } 865 device->timer.function = dasd_timeout_device; 866 device->timer.data = (unsigned long) device; 867 device->timer.expires = jiffies + expires; 868 add_timer(&device->timer); 869} 870 871/* 872 * Clear timeout for a device. 873 */ 874void 875dasd_clear_timer(struct dasd_device *device) 876{ 877 if (timer_pending(&device->timer)) 878 del_timer(&device->timer); 879} 880 881static void 882dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm) 883{ 884 struct dasd_ccw_req *cqr; 885 struct dasd_device *device; 886 887 cqr = (struct dasd_ccw_req *) intparm; 888 if (cqr->status != DASD_CQR_IN_IO) { 889 MESSAGE(KERN_DEBUG, 890 "invalid status in handle_killed_request: " 891 "bus_id %s, status %02x", 892 cdev->dev.bus_id, cqr->status); 893 return; 894 } 895 896 device = (struct dasd_device *) cqr->device; 897 if (device == NULL || 898 device != dasd_device_from_cdev_locked(cdev) || 899 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 900 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 901 cdev->dev.bus_id); 902 return; 903 } 904 905 /* Schedule request to be retried. */ 906 cqr->status = DASD_CQR_QUEUED; 907 908 dasd_clear_timer(device); 909 dasd_schedule_bh(device); 910 dasd_put_device(device); 911} 912 913static void 914dasd_handle_state_change_pending(struct dasd_device *device) 915{ 916 struct dasd_ccw_req *cqr; 917 struct list_head *l, *n; 918 919 /* First of all start sense subsystem status request. */ 920 dasd_eer_snss(device); 921 922 device->stopped &= ~DASD_STOPPED_PENDING; 923 924 /* restart all 'running' IO on queue */ 925 list_for_each_safe(l, n, &device->ccw_queue) { 926 cqr = list_entry(l, struct dasd_ccw_req, list); 927 if (cqr->status == DASD_CQR_IN_IO) { 928 cqr->status = DASD_CQR_QUEUED; 929 } 930 } 931 dasd_clear_timer(device); 932 dasd_schedule_bh(device); 933} 934 935/* 936 * Interrupt handler for "normal" ssch-io based dasd devices. 937 */ 938void 939dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 940 struct irb *irb) 941{ 942 struct dasd_ccw_req *cqr, *next; 943 struct dasd_device *device; 944 unsigned long long now; 945 int expires; 946 dasd_era_t era; 947 char mask; 948 949 if (IS_ERR(irb)) { 950 switch (PTR_ERR(irb)) { 951 case -EIO: 952 dasd_handle_killed_request(cdev, intparm); 953 break; 954 case -ETIMEDOUT: 955 printk(KERN_WARNING"%s(%s): request timed out\n", 956 __FUNCTION__, cdev->dev.bus_id); 957 break; 958 default: 959 printk(KERN_WARNING"%s(%s): unknown error %ld\n", 960 __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb)); 961 } 962 return; 963 } 964 965 now = get_clock(); 966 967 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", 968 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), 969 (unsigned int) intparm); 970 971 /* first of all check for state change pending interrupt */ 972 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 973 if ((irb->scsw.dstat & mask) == mask) { 974 device = dasd_device_from_cdev_locked(cdev); 975 if (!IS_ERR(device)) { 976 dasd_handle_state_change_pending(device); 977 dasd_put_device(device); 978 } 979 return; 980 } 981 982 cqr = (struct dasd_ccw_req *) intparm; 983 984 /* check for unsolicited interrupts */ 985 if (cqr == NULL) { 986 MESSAGE(KERN_DEBUG, 987 "unsolicited interrupt received: bus_id %s", 988 cdev->dev.bus_id); 989 return; 990 } 991 992 device = (struct dasd_device *) cqr->device; 993 if (device == NULL || 994 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 995 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 996 cdev->dev.bus_id); 997 return; 998 } 999 1000 /* Check for clear pending */ 1001 if (cqr->status == DASD_CQR_CLEAR && 1002 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1003 cqr->status = DASD_CQR_QUEUED; 1004 dasd_clear_timer(device); 1005 wake_up(&dasd_flush_wq); 1006 dasd_schedule_bh(device); 1007 return; 1008 } 1009 1010 /* check status - the request might have been killed by dyn detach */ 1011 if (cqr->status != DASD_CQR_IN_IO) { 1012 MESSAGE(KERN_DEBUG, 1013 "invalid status: bus_id %s, status %02x", 1014 cdev->dev.bus_id, cqr->status); 1015 return; 1016 } 1017 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1018 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); 1019 1020 /* Find out the appropriate era_action. */ 1021 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) 1022 era = dasd_era_fatal; 1023 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1024 irb->scsw.cstat == 0 && 1025 !irb->esw.esw0.erw.cons) 1026 era = dasd_era_none; 1027 else if (irb->esw.esw0.erw.cons) 1028 era = device->discipline->examine_error(cqr, irb); 1029 else 1030 era = dasd_era_recover; 1031 1032 DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era); 1033 expires = 0; 1034 if (era == dasd_era_none) { 1035 cqr->status = DASD_CQR_DONE; 1036 cqr->stopclk = now; 1037 /* Start first request on queue if possible -> fast_io. */ 1038 if (cqr->list.next != &device->ccw_queue) { 1039 next = list_entry(cqr->list.next, 1040 struct dasd_ccw_req, list); 1041 if ((next->status == DASD_CQR_QUEUED) && 1042 (!device->stopped)) { 1043 if (device->discipline->start_IO(next) == 0) 1044 expires = next->expires; 1045 else 1046 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1047 "Interrupt fastpath " 1048 "failed!"); 1049 } 1050 } 1051 } else { /* error */ 1052 memcpy(&cqr->irb, irb, sizeof (struct irb)); 1053 if (device->features & DASD_FEATURE_ERPLOG) { 1054 /* dump sense data */ 1055 dasd_log_sense(cqr, irb); 1056 } 1057 switch (era) { 1058 case dasd_era_fatal: 1059 cqr->status = DASD_CQR_FAILED; 1060 cqr->stopclk = now; 1061 break; 1062 case dasd_era_recover: 1063 cqr->status = DASD_CQR_ERROR; 1064 break; 1065 default: 1066 BUG(); 1067 } 1068 } 1069 if (expires != 0) 1070 dasd_set_timer(device, expires); 1071 else 1072 dasd_clear_timer(device); 1073 dasd_schedule_bh(device); 1074} 1075 1076/* 1077 * posts the buffer_cache about a finalized request 1078 */ 1079static inline void 1080dasd_end_request(struct request *req, int uptodate) 1081{ 1082 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 1083 BUG(); 1084 add_disk_randomness(req->rq_disk); 1085 end_that_request_last(req, uptodate); 1086} 1087 1088/* 1089 * Process finished error recovery ccw. 1090 */ 1091static inline void 1092__dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr) 1093{ 1094 dasd_erp_fn_t erp_fn; 1095 1096 if (cqr->status == DASD_CQR_DONE) 1097 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1098 else 1099 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); 1100 erp_fn = device->discipline->erp_postaction(cqr); 1101 erp_fn(cqr); 1102} 1103 1104/* 1105 * Process ccw request queue. 1106 */ 1107static void 1108__dasd_process_ccw_queue(struct dasd_device * device, 1109 struct list_head *final_queue) 1110{ 1111 struct list_head *l, *n; 1112 struct dasd_ccw_req *cqr; 1113 dasd_erp_fn_t erp_fn; 1114 1115restart: 1116 /* Process request with final status. */ 1117 list_for_each_safe(l, n, &device->ccw_queue) { 1118 cqr = list_entry(l, struct dasd_ccw_req, list); 1119 /* Stop list processing at the first non-final request. */ 1120 if (cqr->status != DASD_CQR_DONE && 1121 cqr->status != DASD_CQR_FAILED && 1122 cqr->status != DASD_CQR_ERROR) 1123 break; 1124 /* Process requests with DASD_CQR_ERROR */ 1125 if (cqr->status == DASD_CQR_ERROR) { 1126 if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) { 1127 cqr->status = DASD_CQR_FAILED; 1128 cqr->stopclk = get_clock(); 1129 } else { 1130 if (cqr->irb.esw.esw0.erw.cons && 1131 test_bit(DASD_CQR_FLAGS_USE_ERP, 1132 &cqr->flags)) { 1133 erp_fn = device->discipline-> 1134 erp_action(cqr); 1135 erp_fn(cqr); 1136 } else 1137 dasd_default_erp_action(cqr); 1138 } 1139 goto restart; 1140 } 1141 1142 /* First of all call extended error reporting. */ 1143 if (dasd_eer_enabled(device) && 1144 cqr->status == DASD_CQR_FAILED) { 1145 dasd_eer_write(device, cqr, DASD_EER_FATALERROR); 1146 1147 /* restart request */ 1148 cqr->status = DASD_CQR_QUEUED; 1149 cqr->retries = 255; 1150 device->stopped |= DASD_STOPPED_QUIESCE; 1151 goto restart; 1152 } 1153 1154 /* Process finished ERP request. */ 1155 if (cqr->refers) { 1156 __dasd_process_erp(device, cqr); 1157 goto restart; 1158 } 1159 1160 /* Rechain finished requests to final queue */ 1161 cqr->endclk = get_clock(); 1162 list_move_tail(&cqr->list, final_queue); 1163 } 1164} 1165 1166static void 1167dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data) 1168{ 1169 struct request *req; 1170 struct dasd_device *device; 1171 int status; 1172 1173 req = (struct request *) data; 1174 device = cqr->device; 1175 dasd_profile_end(device, cqr, req); 1176 status = cqr->device->discipline->free_cp(cqr,req); 1177 spin_lock_irq(&device->request_queue_lock); 1178 dasd_end_request(req, status); 1179 spin_unlock_irq(&device->request_queue_lock); 1180} 1181 1182 1183/* 1184 * Fetch requests from the block device queue. 1185 */ 1186static void 1187__dasd_process_blk_queue(struct dasd_device * device) 1188{ 1189 request_queue_t *queue; 1190 struct request *req; 1191 struct dasd_ccw_req *cqr; 1192 int nr_queued; 1193 1194 queue = device->request_queue; 1195 /* No queue ? Then there is nothing to do. */ 1196 if (queue == NULL) 1197 return; 1198 1199 /* 1200 * We requeue request from the block device queue to the ccw 1201 * queue only in two states. In state DASD_STATE_READY the 1202 * partition detection is done and we need to requeue requests 1203 * for that. State DASD_STATE_ONLINE is normal block device 1204 * operation. 1205 */ 1206 if (device->state != DASD_STATE_READY && 1207 device->state != DASD_STATE_ONLINE) 1208 return; 1209 nr_queued = 0; 1210 /* Now we try to fetch requests from the request queue */ 1211 list_for_each_entry(cqr, &device->ccw_queue, list) 1212 if (cqr->status == DASD_CQR_QUEUED) 1213 nr_queued++; 1214 while (!blk_queue_plugged(queue) && 1215 elv_next_request(queue) && 1216 nr_queued < DASD_CHANQ_MAX_SIZE) { 1217 req = elv_next_request(queue); 1218 1219 if (device->features & DASD_FEATURE_READONLY && 1220 rq_data_dir(req) == WRITE) { 1221 DBF_DEV_EVENT(DBF_ERR, device, 1222 "Rejecting write request %p", 1223 req); 1224 blkdev_dequeue_request(req); 1225 dasd_end_request(req, 0); 1226 continue; 1227 } 1228 if (device->stopped & DASD_STOPPED_DC_EIO) { 1229 blkdev_dequeue_request(req); 1230 dasd_end_request(req, 0); 1231 continue; 1232 } 1233 cqr = device->discipline->build_cp(device, req); 1234 if (IS_ERR(cqr)) { 1235 if (PTR_ERR(cqr) == -ENOMEM) 1236 break; /* terminate request queue loop */ 1237 if (PTR_ERR(cqr) == -EAGAIN) { 1238 /* 1239 * The current request cannot be build right 1240 * now, we have to try later. If this request 1241 * is the head-of-queue we stop the device 1242 * for 1/2 second. 1243 */ 1244 if (!list_empty(&device->ccw_queue)) 1245 break; 1246 device->stopped |= DASD_STOPPED_PENDING; 1247 dasd_set_timer(device, HZ/2); 1248 break; 1249 } 1250 DBF_DEV_EVENT(DBF_ERR, device, 1251 "CCW creation failed (rc=%ld) " 1252 "on request %p", 1253 PTR_ERR(cqr), req); 1254 blkdev_dequeue_request(req); 1255 dasd_end_request(req, 0); 1256 continue; 1257 } 1258 cqr->callback = dasd_end_request_cb; 1259 cqr->callback_data = (void *) req; 1260 cqr->status = DASD_CQR_QUEUED; 1261 blkdev_dequeue_request(req); 1262 list_add_tail(&cqr->list, &device->ccw_queue); 1263 dasd_profile_start(device, cqr, req); 1264 nr_queued++; 1265 } 1266} 1267 1268/* 1269 * Take a look at the first request on the ccw queue and check 1270 * if it reached its expire time. If so, terminate the IO. 1271 */ 1272static void 1273__dasd_check_expire(struct dasd_device * device) 1274{ 1275 struct dasd_ccw_req *cqr; 1276 1277 if (list_empty(&device->ccw_queue)) 1278 return; 1279 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1280 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1281 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1282 if (device->discipline->term_IO(cqr) != 0) { 1283 /* Hmpf, try again in 5 sec */ 1284 dasd_set_timer(device, 5*HZ); 1285 DEV_MESSAGE(KERN_ERR, device, 1286 "internal error - timeout (%is) expired " 1287 "for cqr %p, termination failed, " 1288 "retrying in 5s", 1289 (cqr->expires/HZ), cqr); 1290 } else { 1291 DEV_MESSAGE(KERN_ERR, device, 1292 "internal error - timeout (%is) expired " 1293 "for cqr %p (%i retries left)", 1294 (cqr->expires/HZ), cqr, cqr->retries); 1295 } 1296 } 1297} 1298 1299/* 1300 * Take a look at the first request on the ccw queue and check 1301 * if it needs to be started. 1302 */ 1303static void 1304__dasd_start_head(struct dasd_device * device) 1305{ 1306 struct dasd_ccw_req *cqr; 1307 int rc; 1308 1309 if (list_empty(&device->ccw_queue)) 1310 return; 1311 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1312 if (cqr->status != DASD_CQR_QUEUED) 1313 return; 1314 /* Non-temporary stop condition will trigger fail fast */ 1315 if (device->stopped & ~DASD_STOPPED_PENDING && 1316 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1317 (!dasd_eer_enabled(device))) { 1318 cqr->status = DASD_CQR_FAILED; 1319 dasd_schedule_bh(device); 1320 return; 1321 } 1322 /* Don't try to start requests if device is stopped */ 1323 if (device->stopped) 1324 return; 1325 1326 rc = device->discipline->start_IO(cqr); 1327 if (rc == 0) 1328 dasd_set_timer(device, cqr->expires); 1329 else if (rc == -EACCES) { 1330 dasd_schedule_bh(device); 1331 } else 1332 /* Hmpf, try again in 1/2 sec */ 1333 dasd_set_timer(device, 50); 1334} 1335 1336static inline int 1337_wait_for_clear(struct dasd_ccw_req *cqr) 1338{ 1339 return (cqr->status == DASD_CQR_QUEUED); 1340} 1341 1342/* 1343 * Remove all requests from the ccw queue (all = '1') or only block device 1344 * requests in case all = '0'. 1345 * Take care of the erp-chain (chained via cqr->refers) and remove either 1346 * the whole erp-chain or none of the erp-requests. 1347 * If a request is currently running, term_IO is called and the request 1348 * is re-queued. Prior to removing the terminated request we need to wait 1349 * for the clear-interrupt. 1350 * In case termination is not possible we stop processing and just finishing 1351 * the already moved requests. 1352 */ 1353static int 1354dasd_flush_ccw_queue(struct dasd_device * device, int all) 1355{ 1356 struct dasd_ccw_req *cqr, *orig, *n; 1357 int rc, i; 1358 1359 struct list_head flush_queue; 1360 1361 INIT_LIST_HEAD(&flush_queue); 1362 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1363 rc = 0; 1364restart: 1365 list_for_each_entry_safe(cqr, n, &device->ccw_queue, list) { 1366 /* get original request of erp request-chain */ 1367 for (orig = cqr; orig->refers != NULL; orig = orig->refers); 1368 1369 /* Flush all request or only block device requests? */ 1370 if (all == 0 && cqr->callback != dasd_end_request_cb && 1371 orig->callback != dasd_end_request_cb) { 1372 continue; 1373 } 1374 /* Check status and move request to flush_queue */ 1375 switch (cqr->status) { 1376 case DASD_CQR_IN_IO: 1377 rc = device->discipline->term_IO(cqr); 1378 if (rc) { 1379 /* unable to terminate requeust */ 1380 DEV_MESSAGE(KERN_ERR, device, 1381 "dasd flush ccw_queue is unable " 1382 " to terminate request %p", 1383 cqr); 1384 /* stop flush processing */ 1385 goto finished; 1386 } 1387 break; 1388 case DASD_CQR_QUEUED: 1389 case DASD_CQR_ERROR: 1390 /* set request to FAILED */ 1391 cqr->stopclk = get_clock(); 1392 cqr->status = DASD_CQR_FAILED; 1393 break; 1394 default: /* do not touch the others */ 1395 break; 1396 } 1397 /* Rechain request (including erp chain) */ 1398 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) { 1399 cqr->endclk = get_clock(); 1400 list_move_tail(&cqr->list, &flush_queue); 1401 } 1402 if (i > 1) 1403 /* moved more than one request - need to restart */ 1404 goto restart; 1405 } 1406 1407finished: 1408 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1409 /* Now call the callback function of flushed requests */ 1410restart_cb: 1411 list_for_each_entry_safe(cqr, n, &flush_queue, list) { 1412 if (cqr->status == DASD_CQR_CLEAR) { 1413 /* wait for clear interrupt! */ 1414 wait_event(dasd_flush_wq, _wait_for_clear(cqr)); 1415 cqr->status = DASD_CQR_FAILED; 1416 } 1417 /* Process finished ERP request. */ 1418 if (cqr->refers) { 1419 __dasd_process_erp(device, cqr); 1420 /* restart list_for_xx loop since dasd_process_erp 1421 * might remove multiple elements */ 1422 goto restart_cb; 1423 } 1424 /* call the callback function */ 1425 cqr->endclk = get_clock(); 1426 if (cqr->callback != NULL) 1427 (cqr->callback)(cqr, cqr->callback_data); 1428 } 1429 return rc; 1430} 1431 1432/* 1433 * Acquire the device lock and process queues for the device. 1434 */ 1435static void 1436dasd_tasklet(struct dasd_device * device) 1437{ 1438 struct list_head final_queue; 1439 struct list_head *l, *n; 1440 struct dasd_ccw_req *cqr; 1441 1442 atomic_set (&device->tasklet_scheduled, 0); 1443 INIT_LIST_HEAD(&final_queue); 1444 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1445 /* Check expire time of first request on the ccw queue. */ 1446 __dasd_check_expire(device); 1447 /* Finish off requests on ccw queue */ 1448 __dasd_process_ccw_queue(device, &final_queue); 1449 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1450 /* Now call the callback function of requests with final status */ 1451 list_for_each_safe(l, n, &final_queue) { 1452 cqr = list_entry(l, struct dasd_ccw_req, list); 1453 list_del_init(&cqr->list); 1454 if (cqr->callback != NULL) 1455 (cqr->callback)(cqr, cqr->callback_data); 1456 } 1457 spin_lock_irq(&device->request_queue_lock); 1458 spin_lock(get_ccwdev_lock(device->cdev)); 1459 /* Get new request from the block device request queue */ 1460 __dasd_process_blk_queue(device); 1461 /* Now check if the head of the ccw queue needs to be started. */ 1462 __dasd_start_head(device); 1463 spin_unlock(get_ccwdev_lock(device->cdev)); 1464 spin_unlock_irq(&device->request_queue_lock); 1465 dasd_put_device(device); 1466} 1467 1468/* 1469 * Schedules a call to dasd_tasklet over the device tasklet. 1470 */ 1471void 1472dasd_schedule_bh(struct dasd_device * device) 1473{ 1474 /* Protect against rescheduling. */ 1475 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 1476 return; 1477 dasd_get_device(device); 1478 tasklet_hi_schedule(&device->tasklet); 1479} 1480 1481/* 1482 * Queue a request to the head of the ccw_queue. Start the I/O if 1483 * possible. 1484 */ 1485void 1486dasd_add_request_head(struct dasd_ccw_req *req) 1487{ 1488 struct dasd_device *device; 1489 unsigned long flags; 1490 1491 device = req->device; 1492 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1493 req->status = DASD_CQR_QUEUED; 1494 req->device = device; 1495 list_add(&req->list, &device->ccw_queue); 1496 /* let the bh start the request to keep them in order */ 1497 dasd_schedule_bh(device); 1498 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1499} 1500 1501/* 1502 * Queue a request to the tail of the ccw_queue. Start the I/O if 1503 * possible. 1504 */ 1505void 1506dasd_add_request_tail(struct dasd_ccw_req *req) 1507{ 1508 struct dasd_device *device; 1509 unsigned long flags; 1510 1511 device = req->device; 1512 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1513 req->status = DASD_CQR_QUEUED; 1514 req->device = device; 1515 list_add_tail(&req->list, &device->ccw_queue); 1516 /* let the bh start the request to keep them in order */ 1517 dasd_schedule_bh(device); 1518 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1519} 1520 1521/* 1522 * Wakeup callback. 1523 */ 1524static void 1525dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 1526{ 1527 wake_up((wait_queue_head_t *) data); 1528} 1529 1530static inline int 1531_wait_for_wakeup(struct dasd_ccw_req *cqr) 1532{ 1533 struct dasd_device *device; 1534 int rc; 1535 1536 device = cqr->device; 1537 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1538 rc = ((cqr->status == DASD_CQR_DONE || 1539 cqr->status == DASD_CQR_FAILED) && 1540 list_empty(&cqr->list)); 1541 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1542 return rc; 1543} 1544 1545/* 1546 * Attempts to start a special ccw queue and waits for its completion. 1547 */ 1548int 1549dasd_sleep_on(struct dasd_ccw_req * cqr) 1550{ 1551 wait_queue_head_t wait_q; 1552 struct dasd_device *device; 1553 int rc; 1554 1555 device = cqr->device; 1556 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1557 1558 init_waitqueue_head (&wait_q); 1559 cqr->callback = dasd_wakeup_cb; 1560 cqr->callback_data = (void *) &wait_q; 1561 cqr->status = DASD_CQR_QUEUED; 1562 list_add_tail(&cqr->list, &device->ccw_queue); 1563 1564 /* let the bh start the request to keep them in order */ 1565 dasd_schedule_bh(device); 1566 1567 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1568 1569 wait_event(wait_q, _wait_for_wakeup(cqr)); 1570 1571 /* Request status is either done or failed. */ 1572 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; 1573 return rc; 1574} 1575 1576/* 1577 * Attempts to start a special ccw queue and wait interruptible 1578 * for its completion. 1579 */ 1580int 1581dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr) 1582{ 1583 wait_queue_head_t wait_q; 1584 struct dasd_device *device; 1585 int rc, finished; 1586 1587 device = cqr->device; 1588 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1589 1590 init_waitqueue_head (&wait_q); 1591 cqr->callback = dasd_wakeup_cb; 1592 cqr->callback_data = (void *) &wait_q; 1593 cqr->status = DASD_CQR_QUEUED; 1594 list_add_tail(&cqr->list, &device->ccw_queue); 1595 1596 /* let the bh start the request to keep them in order */ 1597 dasd_schedule_bh(device); 1598 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1599 1600 finished = 0; 1601 while (!finished) { 1602 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr)); 1603 if (rc != -ERESTARTSYS) { 1604 /* Request is final (done or failed) */ 1605 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1606 break; 1607 } 1608 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1609 switch (cqr->status) { 1610 case DASD_CQR_IN_IO: 1611 /* terminate runnig cqr */ 1612 if (device->discipline->term_IO) { 1613 cqr->retries = -1; 1614 device->discipline->term_IO(cqr); 1615 /* wait (non-interruptible) for final status 1616 * because signal ist still pending */ 1617 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1618 wait_event(wait_q, _wait_for_wakeup(cqr)); 1619 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1620 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1621 finished = 1; 1622 } 1623 break; 1624 case DASD_CQR_QUEUED: 1625 /* request */ 1626 list_del_init(&cqr->list); 1627 rc = -EIO; 1628 finished = 1; 1629 break; 1630 default: 1631 /* cqr with 'non-interruptable' status - just wait */ 1632 break; 1633 } 1634 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1635 } 1636 return rc; 1637} 1638 1639/* 1640 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 1641 * for eckd devices) the currently running request has to be terminated 1642 * and be put back to status queued, before the special request is added 1643 * to the head of the queue. Then the special request is waited on normally. 1644 */ 1645static inline int 1646_dasd_term_running_cqr(struct dasd_device *device) 1647{ 1648 struct dasd_ccw_req *cqr; 1649 1650 if (list_empty(&device->ccw_queue)) 1651 return 0; 1652 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1653 return device->discipline->term_IO(cqr); 1654} 1655 1656int 1657dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr) 1658{ 1659 wait_queue_head_t wait_q; 1660 struct dasd_device *device; 1661 int rc; 1662 1663 device = cqr->device; 1664 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1665 rc = _dasd_term_running_cqr(device); 1666 if (rc) { 1667 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1668 return rc; 1669 } 1670 1671 init_waitqueue_head (&wait_q); 1672 cqr->callback = dasd_wakeup_cb; 1673 cqr->callback_data = (void *) &wait_q; 1674 cqr->status = DASD_CQR_QUEUED; 1675 list_add(&cqr->list, &device->ccw_queue); 1676 1677 /* let the bh start the request to keep them in order */ 1678 dasd_schedule_bh(device); 1679 1680 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1681 1682 wait_event(wait_q, _wait_for_wakeup(cqr)); 1683 1684 /* Request status is either done or failed. */ 1685 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; 1686 return rc; 1687} 1688 1689/* 1690 * Cancels a request that was started with dasd_sleep_on_req. 1691 * This is useful to timeout requests. The request will be 1692 * terminated if it is currently in i/o. 1693 * Returns 1 if the request has been terminated. 1694 */ 1695int 1696dasd_cancel_req(struct dasd_ccw_req *cqr) 1697{ 1698 struct dasd_device *device = cqr->device; 1699 unsigned long flags; 1700 int rc; 1701 1702 rc = 0; 1703 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1704 switch (cqr->status) { 1705 case DASD_CQR_QUEUED: 1706 /* request was not started - just set to failed */ 1707 cqr->status = DASD_CQR_FAILED; 1708 break; 1709 case DASD_CQR_IN_IO: 1710 /* request in IO - terminate IO and release again */ 1711 if (device->discipline->term_IO(cqr) != 0) 1712 /* what to do if unable to terminate ?????? 1713 e.g. not _IN_IO */ 1714 cqr->status = DASD_CQR_FAILED; 1715 cqr->stopclk = get_clock(); 1716 rc = 1; 1717 break; 1718 case DASD_CQR_DONE: 1719 case DASD_CQR_FAILED: 1720 /* already finished - do nothing */ 1721 break; 1722 default: 1723 DEV_MESSAGE(KERN_ALERT, device, 1724 "invalid status %02x in request", 1725 cqr->status); 1726 BUG(); 1727 1728 } 1729 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1730 dasd_schedule_bh(device); 1731 return rc; 1732} 1733 1734/* 1735 * SECTION: Block device operations (request queue, partitions, open, release). 1736 */ 1737 1738/* 1739 * Dasd request queue function. Called from ll_rw_blk.c 1740 */ 1741static void 1742do_dasd_request(request_queue_t * queue) 1743{ 1744 struct dasd_device *device; 1745 1746 device = (struct dasd_device *) queue->queuedata; 1747 spin_lock(get_ccwdev_lock(device->cdev)); 1748 /* Get new request from the block device request queue */ 1749 __dasd_process_blk_queue(device); 1750 /* Now check if the head of the ccw queue needs to be started. */ 1751 __dasd_start_head(device); 1752 spin_unlock(get_ccwdev_lock(device->cdev)); 1753} 1754 1755/* 1756 * Allocate and initialize request queue and default I/O scheduler. 1757 */ 1758static int 1759dasd_alloc_queue(struct dasd_device * device) 1760{ 1761 int rc; 1762 1763 device->request_queue = blk_init_queue(do_dasd_request, 1764 &device->request_queue_lock); 1765 if (device->request_queue == NULL) 1766 return -ENOMEM; 1767 1768 device->request_queue->queuedata = device; 1769 1770 elevator_exit(device->request_queue->elevator); 1771 rc = elevator_init(device->request_queue, "deadline"); 1772 if (rc) { 1773 blk_cleanup_queue(device->request_queue); 1774 return rc; 1775 } 1776 return 0; 1777} 1778 1779/* 1780 * Allocate and initialize request queue. 1781 */ 1782static void 1783dasd_setup_queue(struct dasd_device * device) 1784{ 1785 int max; 1786 1787 blk_queue_hardsect_size(device->request_queue, device->bp_block); 1788 max = device->discipline->max_blocks << device->s2b_shift; 1789 blk_queue_max_sectors(device->request_queue, max); 1790 blk_queue_max_phys_segments(device->request_queue, -1L); 1791 blk_queue_max_hw_segments(device->request_queue, -1L); 1792 blk_queue_max_segment_size(device->request_queue, -1L); 1793 blk_queue_segment_boundary(device->request_queue, -1L); 1794 blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL); 1795} 1796 1797/* 1798 * Deactivate and free request queue. 1799 */ 1800static void 1801dasd_free_queue(struct dasd_device * device) 1802{ 1803 if (device->request_queue) { 1804 blk_cleanup_queue(device->request_queue); 1805 device->request_queue = NULL; 1806 } 1807} 1808 1809/* 1810 * Flush request on the request queue. 1811 */ 1812static void 1813dasd_flush_request_queue(struct dasd_device * device) 1814{ 1815 struct request *req; 1816 1817 if (!device->request_queue) 1818 return; 1819 1820 spin_lock_irq(&device->request_queue_lock); 1821 while ((req = elv_next_request(device->request_queue))) { 1822 blkdev_dequeue_request(req); 1823 dasd_end_request(req, 0); 1824 } 1825 spin_unlock_irq(&device->request_queue_lock); 1826} 1827 1828static int 1829dasd_open(struct inode *inp, struct file *filp) 1830{ 1831 struct gendisk *disk = inp->i_bdev->bd_disk; 1832 struct dasd_device *device = disk->private_data; 1833 int rc; 1834 1835 atomic_inc(&device->open_count); 1836 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 1837 rc = -ENODEV; 1838 goto unlock; 1839 } 1840 1841 if (!try_module_get(device->discipline->owner)) { 1842 rc = -EINVAL; 1843 goto unlock; 1844 } 1845 1846 if (dasd_probeonly) { 1847 DEV_MESSAGE(KERN_INFO, device, "%s", 1848 "No access to device due to probeonly mode"); 1849 rc = -EPERM; 1850 goto out; 1851 } 1852 1853 if (device->state <= DASD_STATE_BASIC) { 1854 DBF_DEV_EVENT(DBF_ERR, device, " %s", 1855 " Cannot open unrecognized device"); 1856 rc = -ENODEV; 1857 goto out; 1858 } 1859 1860 return 0; 1861 1862out: 1863 module_put(device->discipline->owner); 1864unlock: 1865 atomic_dec(&device->open_count); 1866 return rc; 1867} 1868 1869static int 1870dasd_release(struct inode *inp, struct file *filp) 1871{ 1872 struct gendisk *disk = inp->i_bdev->bd_disk; 1873 struct dasd_device *device = disk->private_data; 1874 1875 atomic_dec(&device->open_count); 1876 module_put(device->discipline->owner); 1877 return 0; 1878} 1879 1880/* 1881 * Return disk geometry. 1882 */ 1883static int 1884dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1885{ 1886 struct dasd_device *device; 1887 1888 device = bdev->bd_disk->private_data; 1889 if (!device) 1890 return -ENODEV; 1891 1892 if (!device->discipline || 1893 !device->discipline->fill_geometry) 1894 return -EINVAL; 1895 1896 device->discipline->fill_geometry(device, geo); 1897 geo->start = get_start_sect(bdev) >> device->s2b_shift; 1898 return 0; 1899} 1900 1901struct block_device_operations 1902dasd_device_operations = { 1903 .owner = THIS_MODULE, 1904 .open = dasd_open, 1905 .release = dasd_release, 1906 .ioctl = dasd_ioctl, 1907 .compat_ioctl = dasd_compat_ioctl, 1908 .getgeo = dasd_getgeo, 1909}; 1910 1911 1912static void 1913dasd_exit(void) 1914{ 1915#ifdef CONFIG_PROC_FS 1916 dasd_proc_exit(); 1917#endif 1918 dasd_eer_exit(); 1919 if (dasd_page_cache != NULL) { 1920 kmem_cache_destroy(dasd_page_cache); 1921 dasd_page_cache = NULL; 1922 } 1923 dasd_gendisk_exit(); 1924 dasd_devmap_exit(); 1925 if (dasd_debug_area != NULL) { 1926 debug_unregister(dasd_debug_area); 1927 dasd_debug_area = NULL; 1928 } 1929} 1930 1931/* 1932 * SECTION: common functions for ccw_driver use 1933 */ 1934 1935/* 1936 * Initial attempt at a probe function. this can be simplified once 1937 * the other detection code is gone. 1938 */ 1939int 1940dasd_generic_probe (struct ccw_device *cdev, 1941 struct dasd_discipline *discipline) 1942{ 1943 int ret; 1944 1945 ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 1946 if (ret) { 1947 printk(KERN_WARNING 1948 "dasd_generic_probe: could not set ccw-device options " 1949 "for %s\n", cdev->dev.bus_id); 1950 return ret; 1951 } 1952 ret = dasd_add_sysfs_files(cdev); 1953 if (ret) { 1954 printk(KERN_WARNING 1955 "dasd_generic_probe: could not add sysfs entries " 1956 "for %s\n", cdev->dev.bus_id); 1957 return ret; 1958 } 1959 cdev->handler = &dasd_int_handler; 1960 1961 /* 1962 * Automatically online either all dasd devices (dasd_autodetect) 1963 * or all devices specified with dasd= parameters during 1964 * initial probe. 1965 */ 1966 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 1967 (dasd_autodetect && dasd_busid_known(cdev->dev.bus_id) != 0)) 1968 ret = ccw_device_set_online(cdev); 1969 if (ret) 1970 printk(KERN_WARNING 1971 "dasd_generic_probe: could not initially online " 1972 "ccw-device %s\n", cdev->dev.bus_id); 1973 return ret; 1974} 1975 1976/* 1977 * This will one day be called from a global not_oper handler. 1978 * It is also used by driver_unregister during module unload. 1979 */ 1980void 1981dasd_generic_remove (struct ccw_device *cdev) 1982{ 1983 struct dasd_device *device; 1984 1985 cdev->handler = NULL; 1986 1987 dasd_remove_sysfs_files(cdev); 1988 device = dasd_device_from_cdev(cdev); 1989 if (IS_ERR(device)) 1990 return; 1991 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 1992 /* Already doing offline processing */ 1993 dasd_put_device(device); 1994 return; 1995 } 1996 /* 1997 * This device is removed unconditionally. Set offline 1998 * flag to prevent dasd_open from opening it while it is 1999 * no quite down yet. 2000 */ 2001 dasd_set_target_state(device, DASD_STATE_NEW); 2002 /* dasd_delete_device destroys the device reference. */ 2003 dasd_delete_device(device); 2004} 2005 2006/* 2007 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 2008 * the device is detected for the first time and is supposed to be used 2009 * or the user has started activation through sysfs. 2010 */ 2011int 2012dasd_generic_set_online (struct ccw_device *cdev, 2013 struct dasd_discipline *base_discipline) 2014 2015{ 2016 struct dasd_discipline *discipline; 2017 struct dasd_device *device; 2018 int rc; 2019 2020 /* first online clears initial online feature flag */ 2021 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 2022 device = dasd_create_device(cdev); 2023 if (IS_ERR(device)) 2024 return PTR_ERR(device); 2025 2026 discipline = base_discipline; 2027 if (device->features & DASD_FEATURE_USEDIAG) { 2028 if (!dasd_diag_discipline_pointer) { 2029 printk (KERN_WARNING 2030 "dasd_generic couldn't online device %s " 2031 "- discipline DIAG not available\n", 2032 cdev->dev.bus_id); 2033 dasd_delete_device(device); 2034 return -ENODEV; 2035 } 2036 discipline = dasd_diag_discipline_pointer; 2037 } 2038 if (!try_module_get(base_discipline->owner)) { 2039 dasd_delete_device(device); 2040 return -EINVAL; 2041 } 2042 if (!try_module_get(discipline->owner)) { 2043 module_put(base_discipline->owner); 2044 dasd_delete_device(device); 2045 return -EINVAL; 2046 } 2047 device->base_discipline = base_discipline; 2048 device->discipline = discipline; 2049 2050 rc = discipline->check_device(device); 2051 if (rc) { 2052 printk (KERN_WARNING 2053 "dasd_generic couldn't online device %s " 2054 "with discipline %s rc=%i\n", 2055 cdev->dev.bus_id, discipline->name, rc); 2056 module_put(discipline->owner); 2057 module_put(base_discipline->owner); 2058 dasd_delete_device(device); 2059 return rc; 2060 } 2061 2062 dasd_set_target_state(device, DASD_STATE_ONLINE); 2063 if (device->state <= DASD_STATE_KNOWN) { 2064 printk (KERN_WARNING 2065 "dasd_generic discipline not found for %s\n", 2066 cdev->dev.bus_id); 2067 rc = -ENODEV; 2068 dasd_set_target_state(device, DASD_STATE_NEW); 2069 dasd_delete_device(device); 2070 } else 2071 pr_debug("dasd_generic device %s found\n", 2072 cdev->dev.bus_id); 2073 2074 wait_event(dasd_init_waitq, _wait_for_device(device)); 2075 2076 dasd_put_device(device); 2077 2078 return rc; 2079} 2080 2081int 2082dasd_generic_set_offline (struct ccw_device *cdev) 2083{ 2084 struct dasd_device *device; 2085 int max_count, open_count; 2086 2087 device = dasd_device_from_cdev(cdev); 2088 if (IS_ERR(device)) 2089 return PTR_ERR(device); 2090 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2091 /* Already doing offline processing */ 2092 dasd_put_device(device); 2093 return 0; 2094 } 2095 /* 2096 * We must make sure that this device is currently not in use. 2097 * The open_count is increased for every opener, that includes 2098 * the blkdev_get in dasd_scan_partitions. We are only interested 2099 * in the other openers. 2100 */ 2101 max_count = device->bdev ? 0 : -1; 2102 open_count = (int) atomic_read(&device->open_count); 2103 if (open_count > max_count) { 2104 if (open_count > 0) 2105 printk (KERN_WARNING "Can't offline dasd device with " 2106 "open count = %i.\n", 2107 open_count); 2108 else 2109 printk (KERN_WARNING "%s", 2110 "Can't offline dasd device due to internal " 2111 "use\n"); 2112 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2113 dasd_put_device(device); 2114 return -EBUSY; 2115 } 2116 dasd_set_target_state(device, DASD_STATE_NEW); 2117 /* dasd_delete_device destroys the device reference. */ 2118 dasd_delete_device(device); 2119 2120 return 0; 2121} 2122 2123int 2124dasd_generic_notify(struct ccw_device *cdev, int event) 2125{ 2126 struct dasd_device *device; 2127 struct dasd_ccw_req *cqr; 2128 unsigned long flags; 2129 int ret; 2130 2131 device = dasd_device_from_cdev(cdev); 2132 if (IS_ERR(device)) 2133 return 0; 2134 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 2135 ret = 0; 2136 switch (event) { 2137 case CIO_GONE: 2138 case CIO_NO_PATH: 2139 /* First of all call extended error reporting. */ 2140 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 2141 2142 if (device->state < DASD_STATE_BASIC) 2143 break; 2144 /* Device is active. We want to keep it. */ 2145 if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) { 2146 list_for_each_entry(cqr, &device->ccw_queue, list) 2147 if (cqr->status == DASD_CQR_IN_IO) 2148 cqr->status = DASD_CQR_FAILED; 2149 device->stopped |= DASD_STOPPED_DC_EIO; 2150 } else { 2151 list_for_each_entry(cqr, &device->ccw_queue, list) 2152 if (cqr->status == DASD_CQR_IN_IO) { 2153 cqr->status = DASD_CQR_QUEUED; 2154 cqr->retries++; 2155 } 2156 device->stopped |= DASD_STOPPED_DC_WAIT; 2157 dasd_set_timer(device, 0); 2158 } 2159 dasd_schedule_bh(device); 2160 ret = 1; 2161 break; 2162 case CIO_OPER: 2163 device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO); 2164 dasd_schedule_bh(device); 2165 ret = 1; 2166 break; 2167 } 2168 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 2169 dasd_put_device(device); 2170 return ret; 2171} 2172 2173static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 2174 void *rdc_buffer, 2175 int rdc_buffer_size, 2176 char *magic) 2177{ 2178 struct dasd_ccw_req *cqr; 2179 struct ccw1 *ccw; 2180 2181 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 2182 2183 if (IS_ERR(cqr)) { 2184 DEV_MESSAGE(KERN_WARNING, device, "%s", 2185 "Could not allocate RDC request"); 2186 return cqr; 2187 } 2188 2189 ccw = cqr->cpaddr; 2190 ccw->cmd_code = CCW_CMD_RDC; 2191 ccw->cda = (__u32)(addr_t)rdc_buffer; 2192 ccw->count = rdc_buffer_size; 2193 2194 cqr->device = device; 2195 cqr->expires = 10*HZ; 2196 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2197 cqr->retries = 2; 2198 cqr->buildclk = get_clock(); 2199 cqr->status = DASD_CQR_FILLED; 2200 return cqr; 2201} 2202 2203 2204int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic, 2205 void **rdc_buffer, int rdc_buffer_size) 2206{ 2207 int ret; 2208 struct dasd_ccw_req *cqr; 2209 2210 cqr = dasd_generic_build_rdc(device, *rdc_buffer, rdc_buffer_size, 2211 magic); 2212 if (IS_ERR(cqr)) 2213 return PTR_ERR(cqr); 2214 2215 ret = dasd_sleep_on(cqr); 2216 dasd_sfree_request(cqr, cqr->device); 2217 return ret; 2218} 2219EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 2220 2221static int __init 2222dasd_init(void) 2223{ 2224 int rc; 2225 2226 init_waitqueue_head(&dasd_init_waitq); 2227 init_waitqueue_head(&dasd_flush_wq); 2228 2229 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2230 dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long)); 2231 if (dasd_debug_area == NULL) { 2232 rc = -ENOMEM; 2233 goto failed; 2234 } 2235 debug_register_view(dasd_debug_area, &debug_sprintf_view); 2236 debug_set_level(dasd_debug_area, DBF_WARNING); 2237 2238 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 2239 2240 dasd_diag_discipline_pointer = NULL; 2241 2242 rc = dasd_devmap_init(); 2243 if (rc) 2244 goto failed; 2245 rc = dasd_gendisk_init(); 2246 if (rc) 2247 goto failed; 2248 rc = dasd_parse(); 2249 if (rc) 2250 goto failed; 2251 rc = dasd_eer_init(); 2252 if (rc) 2253 goto failed; 2254#ifdef CONFIG_PROC_FS 2255 rc = dasd_proc_init(); 2256 if (rc) 2257 goto failed; 2258#endif 2259 2260 return 0; 2261failed: 2262 MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors"); 2263 dasd_exit(); 2264 return rc; 2265} 2266 2267module_init(dasd_init); 2268module_exit(dasd_exit); 2269 2270EXPORT_SYMBOL(dasd_debug_area); 2271EXPORT_SYMBOL(dasd_diag_discipline_pointer); 2272 2273EXPORT_SYMBOL(dasd_add_request_head); 2274EXPORT_SYMBOL(dasd_add_request_tail); 2275EXPORT_SYMBOL(dasd_cancel_req); 2276EXPORT_SYMBOL(dasd_clear_timer); 2277EXPORT_SYMBOL(dasd_enable_device); 2278EXPORT_SYMBOL(dasd_int_handler); 2279EXPORT_SYMBOL(dasd_kfree_request); 2280EXPORT_SYMBOL(dasd_kick_device); 2281EXPORT_SYMBOL(dasd_kmalloc_request); 2282EXPORT_SYMBOL(dasd_schedule_bh); 2283EXPORT_SYMBOL(dasd_set_target_state); 2284EXPORT_SYMBOL(dasd_set_timer); 2285EXPORT_SYMBOL(dasd_sfree_request); 2286EXPORT_SYMBOL(dasd_sleep_on); 2287EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2288EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2289EXPORT_SYMBOL(dasd_smalloc_request); 2290EXPORT_SYMBOL(dasd_start_IO); 2291EXPORT_SYMBOL(dasd_term_IO); 2292 2293EXPORT_SYMBOL_GPL(dasd_generic_probe); 2294EXPORT_SYMBOL_GPL(dasd_generic_remove); 2295EXPORT_SYMBOL_GPL(dasd_generic_notify); 2296EXPORT_SYMBOL_GPL(dasd_generic_set_online); 2297EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 2298