1/* 2 * drivers/s390/cio/cio.c 3 * S/390 common I/O routines -- low level i/o calls 4 * 5 * Copyright IBM Corp. 1999,2008 6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com) 9 * Martin Schwidefsky (schwidefsky@de.ibm.com) 10 */ 11 12#define KMSG_COMPONENT "cio" 13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 15#include <linux/ftrace.h> 16#include <linux/module.h> 17#include <linux/init.h> 18#include <linux/slab.h> 19#include <linux/device.h> 20#include <linux/kernel_stat.h> 21#include <linux/interrupt.h> 22#include <asm/cio.h> 23#include <asm/delay.h> 24#include <asm/irq.h> 25#include <asm/irq_regs.h> 26#include <asm/setup.h> 27#include <asm/reset.h> 28#include <asm/ipl.h> 29#include <asm/chpid.h> 30#include <asm/airq.h> 31#include <asm/isc.h> 32#include <asm/cputime.h> 33#include <asm/fcx.h> 34#include <asm/nmi.h> 35#include <asm/crw.h> 36#include "cio.h" 37#include "css.h" 38#include "chsc.h" 39#include "ioasm.h" 40#include "io_sch.h" 41#include "blacklist.h" 42#include "cio_debug.h" 43#include "chp.h" 44 45debug_info_t *cio_debug_msg_id; 46debug_info_t *cio_debug_trace_id; 47debug_info_t *cio_debug_crw_id; 48 49/* 50 * Function: cio_debug_init 51 * Initializes three debug logs for common I/O: 52 * - cio_msg logs generic cio messages 53 * - cio_trace logs the calling of different functions 54 * - cio_crw logs machine check related cio messages 55 */ 56static int __init cio_debug_init(void) 57{ 58 cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long)); 59 if (!cio_debug_msg_id) 60 goto out_unregister; 61 debug_register_view(cio_debug_msg_id, &debug_sprintf_view); 62 debug_set_level(cio_debug_msg_id, 2); 63 cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16); 64 if (!cio_debug_trace_id) 65 goto out_unregister; 66 debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view); 67 debug_set_level(cio_debug_trace_id, 2); 68 cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long)); 69 if (!cio_debug_crw_id) 70 goto out_unregister; 71 debug_register_view(cio_debug_crw_id, &debug_sprintf_view); 72 debug_set_level(cio_debug_crw_id, 4); 73 return 0; 74 75out_unregister: 76 if (cio_debug_msg_id) 77 debug_unregister(cio_debug_msg_id); 78 if (cio_debug_trace_id) 79 debug_unregister(cio_debug_trace_id); 80 if (cio_debug_crw_id) 81 debug_unregister(cio_debug_crw_id); 82 return -1; 83} 84 85arch_initcall (cio_debug_init); 86 87int 88cio_set_options (struct subchannel *sch, int flags) 89{ 90 sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0; 91 sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0; 92 sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0; 93 return 0; 94} 95 96int 97cio_get_options (struct subchannel *sch) 98{ 99 int flags; 100 101 flags = 0; 102 if (sch->options.suspend) 103 flags |= DOIO_ALLOW_SUSPEND; 104 if (sch->options.prefetch) 105 flags |= DOIO_DENY_PREFETCH; 106 if (sch->options.inter) 107 flags |= DOIO_SUPPRESS_INTER; 108 return flags; 109} 110 111static int 112cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) 113{ 114 char dbf_text[15]; 115 116 if (lpm != 0) 117 sch->lpm &= ~lpm; 118 else 119 sch->lpm = 0; 120 121 CIO_MSG_EVENT(2, "cio_start: 'not oper' status for " 122 "subchannel 0.%x.%04x!\n", sch->schid.ssid, 123 sch->schid.sch_no); 124 125 if (cio_update_schib(sch)) 126 return -ENODEV; 127 128 sprintf(dbf_text, "no%s", dev_name(&sch->dev)); 129 CIO_TRACE_EVENT(0, dbf_text); 130 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); 131 132 return (sch->lpm ? -EACCES : -ENODEV); 133} 134 135int 136cio_start_key (struct subchannel *sch, /* subchannel structure */ 137 struct ccw1 * cpa, /* logical channel prog addr */ 138 __u8 lpm, /* logical path mask */ 139 __u8 key) /* storage key */ 140{ 141 int ccode; 142 union orb *orb; 143 144 CIO_TRACE_EVENT(5, "stIO"); 145 CIO_TRACE_EVENT(5, dev_name(&sch->dev)); 146 147 orb = &to_io_private(sch)->orb; 148 memset(orb, 0, sizeof(union orb)); 149 /* sch is always under 2G. */ 150 orb->cmd.intparm = (u32)(addr_t)sch; 151 orb->cmd.fmt = 1; 152 153 orb->cmd.pfch = sch->options.prefetch == 0; 154 orb->cmd.spnd = sch->options.suspend; 155 orb->cmd.ssic = sch->options.suspend && sch->options.inter; 156 orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm; 157#ifdef CONFIG_64BIT 158 /* 159 * for 64 bit we always support 64 bit IDAWs with 4k page size only 160 */ 161 orb->cmd.c64 = 1; 162 orb->cmd.i2k = 0; 163#endif 164 orb->cmd.key = key >> 4; 165 /* issue "Start Subchannel" */ 166 orb->cmd.cpa = (__u32) __pa(cpa); 167 ccode = ssch(sch->schid, orb); 168 169 /* process condition code */ 170 CIO_HEX_EVENT(5, &ccode, sizeof(ccode)); 171 172 switch (ccode) { 173 case 0: 174 /* 175 * initialize device status information 176 */ 177 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; 178 return 0; 179 case 1: /* status pending */ 180 case 2: /* busy */ 181 return -EBUSY; 182 case 3: /* device/path not operational */ 183 return cio_start_handle_notoper(sch, lpm); 184 default: 185 return ccode; 186 } 187} 188 189int 190cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm) 191{ 192 return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY); 193} 194 195/* 196 * resume suspended I/O operation 197 */ 198int 199cio_resume (struct subchannel *sch) 200{ 201 int ccode; 202 203 CIO_TRACE_EVENT(4, "resIO"); 204 CIO_TRACE_EVENT(4, dev_name(&sch->dev)); 205 206 ccode = rsch (sch->schid); 207 208 CIO_HEX_EVENT(4, &ccode, sizeof(ccode)); 209 210 switch (ccode) { 211 case 0: 212 sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND; 213 return 0; 214 case 1: 215 return -EBUSY; 216 case 2: 217 return -EINVAL; 218 default: 219 /* 220 * useless to wait for request completion 221 * as device is no longer operational ! 222 */ 223 return -ENODEV; 224 } 225} 226 227/* 228 * halt I/O operation 229 */ 230int 231cio_halt(struct subchannel *sch) 232{ 233 int ccode; 234 235 if (!sch) 236 return -ENODEV; 237 238 CIO_TRACE_EVENT(2, "haltIO"); 239 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 240 241 /* 242 * Issue "Halt subchannel" and process condition code 243 */ 244 ccode = hsch (sch->schid); 245 246 CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); 247 248 switch (ccode) { 249 case 0: 250 sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND; 251 return 0; 252 case 1: /* status pending */ 253 case 2: /* busy */ 254 return -EBUSY; 255 default: /* device not operational */ 256 return -ENODEV; 257 } 258} 259 260/* 261 * Clear I/O operation 262 */ 263int 264cio_clear(struct subchannel *sch) 265{ 266 int ccode; 267 268 if (!sch) 269 return -ENODEV; 270 271 CIO_TRACE_EVENT(2, "clearIO"); 272 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 273 274 /* 275 * Issue "Clear subchannel" and process condition code 276 */ 277 ccode = csch (sch->schid); 278 279 CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); 280 281 switch (ccode) { 282 case 0: 283 sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND; 284 return 0; 285 default: /* device not operational */ 286 return -ENODEV; 287 } 288} 289 290/* 291 * Function: cio_cancel 292 * Issues a "Cancel Subchannel" on the specified subchannel 293 * Note: We don't need any fancy intparms and flags here 294 * since xsch is executed synchronously. 295 * Only for common I/O internal use as for now. 296 */ 297int 298cio_cancel (struct subchannel *sch) 299{ 300 int ccode; 301 302 if (!sch) 303 return -ENODEV; 304 305 CIO_TRACE_EVENT(2, "cancelIO"); 306 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 307 308 ccode = xsch (sch->schid); 309 310 CIO_HEX_EVENT(2, &ccode, sizeof(ccode)); 311 312 switch (ccode) { 313 case 0: /* success */ 314 /* Update information in scsw. */ 315 if (cio_update_schib(sch)) 316 return -ENODEV; 317 return 0; 318 case 1: /* status pending */ 319 return -EBUSY; 320 case 2: /* not applicable */ 321 return -EINVAL; 322 default: /* not oper */ 323 return -ENODEV; 324 } 325} 326 327 328static void cio_apply_config(struct subchannel *sch, struct schib *schib) 329{ 330 schib->pmcw.intparm = sch->config.intparm; 331 schib->pmcw.mbi = sch->config.mbi; 332 schib->pmcw.isc = sch->config.isc; 333 schib->pmcw.ena = sch->config.ena; 334 schib->pmcw.mme = sch->config.mme; 335 schib->pmcw.mp = sch->config.mp; 336 schib->pmcw.csense = sch->config.csense; 337 schib->pmcw.mbfc = sch->config.mbfc; 338 if (sch->config.mbfc) 339 schib->mba = sch->config.mba; 340} 341 342static int cio_check_config(struct subchannel *sch, struct schib *schib) 343{ 344 return (schib->pmcw.intparm == sch->config.intparm) && 345 (schib->pmcw.mbi == sch->config.mbi) && 346 (schib->pmcw.isc == sch->config.isc) && 347 (schib->pmcw.ena == sch->config.ena) && 348 (schib->pmcw.mme == sch->config.mme) && 349 (schib->pmcw.mp == sch->config.mp) && 350 (schib->pmcw.csense == sch->config.csense) && 351 (schib->pmcw.mbfc == sch->config.mbfc) && 352 (!sch->config.mbfc || (schib->mba == sch->config.mba)); 353} 354 355/* 356 * cio_commit_config - apply configuration to the subchannel 357 */ 358int cio_commit_config(struct subchannel *sch) 359{ 360 struct schib schib; 361 int ccode, retry, ret = 0; 362 363 if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) 364 return -ENODEV; 365 366 for (retry = 0; retry < 5; retry++) { 367 /* copy desired changes to local schib */ 368 cio_apply_config(sch, &schib); 369 ccode = msch_err(sch->schid, &schib); 370 if (ccode < 0) /* -EIO if msch gets a program check. */ 371 return ccode; 372 switch (ccode) { 373 case 0: /* successful */ 374 if (stsch_err(sch->schid, &schib) || 375 !css_sch_is_valid(&schib)) 376 return -ENODEV; 377 if (cio_check_config(sch, &schib)) { 378 /* commit changes from local schib */ 379 memcpy(&sch->schib, &schib, sizeof(schib)); 380 return 0; 381 } 382 ret = -EAGAIN; 383 break; 384 case 1: /* status pending */ 385 return -EBUSY; 386 case 2: /* busy */ 387 udelay(100); /* allow for recovery */ 388 ret = -EBUSY; 389 break; 390 case 3: /* not operational */ 391 return -ENODEV; 392 } 393 } 394 return ret; 395} 396 397/** 398 * cio_update_schib - Perform stsch and update schib if subchannel is valid. 399 * @sch: subchannel on which to perform stsch 400 * Return zero on success, -ENODEV otherwise. 401 */ 402int cio_update_schib(struct subchannel *sch) 403{ 404 struct schib schib; 405 406 if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) 407 return -ENODEV; 408 409 memcpy(&sch->schib, &schib, sizeof(schib)); 410 return 0; 411} 412EXPORT_SYMBOL_GPL(cio_update_schib); 413 414/** 415 * cio_enable_subchannel - enable a subchannel. 416 * @sch: subchannel to be enabled 417 * @intparm: interruption parameter to set 418 */ 419int cio_enable_subchannel(struct subchannel *sch, u32 intparm) 420{ 421 int retry; 422 int ret; 423 424 CIO_TRACE_EVENT(2, "ensch"); 425 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 426 427 if (sch_is_pseudo_sch(sch)) 428 return -EINVAL; 429 if (cio_update_schib(sch)) 430 return -ENODEV; 431 432 sch->config.ena = 1; 433 sch->config.isc = sch->isc; 434 sch->config.intparm = intparm; 435 436 for (retry = 0; retry < 3; retry++) { 437 ret = cio_commit_config(sch); 438 if (ret == -EIO) { 439 /* 440 * Got a program check in msch. Try without 441 * the concurrent sense bit the next time. 442 */ 443 sch->config.csense = 0; 444 } else if (ret == -EBUSY) { 445 struct irb irb; 446 if (tsch(sch->schid, &irb) != 0) 447 break; 448 } else 449 break; 450 } 451 CIO_HEX_EVENT(2, &ret, sizeof(ret)); 452 return ret; 453} 454EXPORT_SYMBOL_GPL(cio_enable_subchannel); 455 456/** 457 * cio_disable_subchannel - disable a subchannel. 458 * @sch: subchannel to disable 459 */ 460int cio_disable_subchannel(struct subchannel *sch) 461{ 462 int retry; 463 int ret; 464 465 CIO_TRACE_EVENT(2, "dissch"); 466 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 467 468 if (sch_is_pseudo_sch(sch)) 469 return 0; 470 if (cio_update_schib(sch)) 471 return -ENODEV; 472 473 sch->config.ena = 0; 474 475 for (retry = 0; retry < 3; retry++) { 476 ret = cio_commit_config(sch); 477 if (ret == -EBUSY) { 478 struct irb irb; 479 if (tsch(sch->schid, &irb) != 0) 480 break; 481 } else 482 break; 483 } 484 CIO_HEX_EVENT(2, &ret, sizeof(ret)); 485 return ret; 486} 487EXPORT_SYMBOL_GPL(cio_disable_subchannel); 488 489int cio_create_sch_lock(struct subchannel *sch) 490{ 491 sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL); 492 if (!sch->lock) 493 return -ENOMEM; 494 spin_lock_init(sch->lock); 495 return 0; 496} 497 498static int cio_check_devno_blacklisted(struct subchannel *sch) 499{ 500 if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) { 501 /* 502 * This device must not be known to Linux. So we simply 503 * say that there is no device and return ENODEV. 504 */ 505 CIO_MSG_EVENT(6, "Blacklisted device detected " 506 "at devno %04X, subchannel set %x\n", 507 sch->schib.pmcw.dev, sch->schid.ssid); 508 return -ENODEV; 509 } 510 return 0; 511} 512 513static int cio_validate_io_subchannel(struct subchannel *sch) 514{ 515 /* Initialization for io subchannels. */ 516 if (!css_sch_is_valid(&sch->schib)) 517 return -ENODEV; 518 519 /* Devno is valid. */ 520 return cio_check_devno_blacklisted(sch); 521} 522 523static int cio_validate_msg_subchannel(struct subchannel *sch) 524{ 525 /* Initialization for message subchannels. */ 526 if (!css_sch_is_valid(&sch->schib)) 527 return -ENODEV; 528 529 /* Devno is valid. */ 530 return cio_check_devno_blacklisted(sch); 531} 532 533/** 534 * cio_validate_subchannel - basic validation of subchannel 535 * @sch: subchannel structure to be filled out 536 * @schid: subchannel id 537 * 538 * Find out subchannel type and initialize struct subchannel. 539 * Return codes: 540 * 0 on success 541 * -ENXIO for non-defined subchannels 542 * -ENODEV for invalid subchannels or blacklisted devices 543 * -EIO for subchannels in an invalid subchannel set 544 */ 545int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) 546{ 547 char dbf_txt[15]; 548 int ccode; 549 int err; 550 551 sprintf(dbf_txt, "valsch%x", schid.sch_no); 552 CIO_TRACE_EVENT(4, dbf_txt); 553 554 /* Nuke all fields. */ 555 memset(sch, 0, sizeof(struct subchannel)); 556 557 sch->schid = schid; 558 if (cio_is_console(schid)) { 559 sch->lock = cio_get_console_lock(); 560 } else { 561 err = cio_create_sch_lock(sch); 562 if (err) 563 goto out; 564 } 565 mutex_init(&sch->reg_mutex); 566 567 /* 568 * The first subchannel that is not-operational (ccode==3) 569 * indicates that there aren't any more devices available. 570 * If stsch gets an exception, it means the current subchannel set 571 * is not valid. 572 */ 573 ccode = stsch_err (schid, &sch->schib); 574 if (ccode) { 575 err = (ccode == 3) ? -ENXIO : ccode; 576 goto out; 577 } 578 /* Copy subchannel type from path management control word. */ 579 sch->st = sch->schib.pmcw.st; 580 581 switch (sch->st) { 582 case SUBCHANNEL_TYPE_IO: 583 err = cio_validate_io_subchannel(sch); 584 break; 585 case SUBCHANNEL_TYPE_MSG: 586 err = cio_validate_msg_subchannel(sch); 587 break; 588 default: 589 err = 0; 590 } 591 if (err) 592 goto out; 593 594 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", 595 sch->schid.ssid, sch->schid.sch_no, sch->st); 596 return 0; 597out: 598 if (!cio_is_console(schid)) 599 kfree(sch->lock); 600 sch->lock = NULL; 601 return err; 602} 603 604/* 605 * do_IRQ() handles all normal I/O device IRQ's (the special 606 * SMP cross-CPU interrupts have their own specific 607 * handlers). 608 * 609 */ 610void __irq_entry do_IRQ(struct pt_regs *regs) 611{ 612 struct tpi_info *tpi_info; 613 struct subchannel *sch; 614 struct irb *irb; 615 struct pt_regs *old_regs; 616 617 old_regs = set_irq_regs(regs); 618 s390_idle_check(regs, S390_lowcore.int_clock, 619 S390_lowcore.async_enter_timer); 620 irq_enter(); 621 __get_cpu_var(s390_idle).nohz_delay = 1; 622 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 623 /* Serve timer interrupts first. */ 624 clock_comparator_work(); 625 /* 626 * Get interrupt information from lowcore 627 */ 628 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; 629 irb = (struct irb *)&S390_lowcore.irb; 630 do { 631 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; 632 /* 633 * Non I/O-subchannel thin interrupts are processed differently 634 */ 635 if (tpi_info->adapter_IO == 1 && 636 tpi_info->int_type == IO_INTERRUPT_TYPE) { 637 do_adapter_IO(tpi_info->isc); 638 continue; 639 } 640 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 641 if (!sch) { 642 /* Clear pending interrupt condition. */ 643 tsch(tpi_info->schid, irb); 644 continue; 645 } 646 spin_lock(sch->lock); 647 /* Store interrupt response block to lowcore. */ 648 if (tsch(tpi_info->schid, irb) == 0) { 649 /* Keep subchannel information word up to date. */ 650 memcpy (&sch->schib.scsw, &irb->scsw, 651 sizeof (irb->scsw)); 652 /* Call interrupt handler if there is one. */ 653 if (sch->driver && sch->driver->irq) 654 sch->driver->irq(sch); 655 } 656 spin_unlock(sch->lock); 657 /* 658 * Are more interrupts pending? 659 * If so, the tpi instruction will update the lowcore 660 * to hold the info for the next interrupt. 661 * We don't do this for VM because a tpi drops the cpu 662 * out of the sie which costs more cycles than it saves. 663 */ 664 } while (MACHINE_IS_LPAR && tpi(NULL) != 0); 665 irq_exit(); 666 set_irq_regs(old_regs); 667} 668 669#ifdef CONFIG_CCW_CONSOLE 670static struct subchannel console_subchannel; 671static struct io_subchannel_private console_priv; 672static int console_subchannel_in_use; 673 674/* 675 * Use tpi to get a pending interrupt, call the interrupt handler and 676 * return a pointer to the subchannel structure. 677 */ 678static int cio_tpi(void) 679{ 680 struct tpi_info *tpi_info; 681 struct subchannel *sch; 682 struct irb *irb; 683 int irq_context; 684 685 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; 686 if (tpi(NULL) != 1) 687 return 0; 688 irb = (struct irb *)&S390_lowcore.irb; 689 /* Store interrupt response block to lowcore. */ 690 if (tsch(tpi_info->schid, irb) != 0) 691 /* Not status pending or not operational. */ 692 return 1; 693 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 694 if (!sch) 695 return 1; 696 irq_context = in_interrupt(); 697 if (!irq_context) 698 local_bh_disable(); 699 irq_enter(); 700 spin_lock(sch->lock); 701 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); 702 if (sch->driver && sch->driver->irq) 703 sch->driver->irq(sch); 704 spin_unlock(sch->lock); 705 irq_exit(); 706 if (!irq_context) 707 _local_bh_enable(); 708 return 1; 709} 710 711void *cio_get_console_priv(void) 712{ 713 return &console_priv; 714} 715 716/* 717 * busy wait for the next interrupt on the console 718 */ 719void wait_cons_dev(void) 720 __releases(console_subchannel.lock) 721 __acquires(console_subchannel.lock) 722{ 723 unsigned long cr6 __attribute__ ((aligned (8))); 724 unsigned long save_cr6 __attribute__ ((aligned (8))); 725 726 /* 727 * before entering the spinlock we may already have 728 * processed the interrupt on a different CPU... 729 */ 730 if (!console_subchannel_in_use) 731 return; 732 733 /* disable all but the console isc */ 734 __ctl_store (save_cr6, 6, 6); 735 cr6 = 1UL << (31 - CONSOLE_ISC); 736 __ctl_load (cr6, 6, 6); 737 738 do { 739 spin_unlock(console_subchannel.lock); 740 if (!cio_tpi()) 741 cpu_relax(); 742 spin_lock(console_subchannel.lock); 743 } while (console_subchannel.schib.scsw.cmd.actl != 0); 744 /* 745 * restore previous isc value 746 */ 747 __ctl_load (save_cr6, 6, 6); 748} 749 750static int 751cio_test_for_console(struct subchannel_id schid, void *data) 752{ 753 if (stsch_err(schid, &console_subchannel.schib) != 0) 754 return -ENXIO; 755 if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) && 756 console_subchannel.schib.pmcw.dnv && 757 (console_subchannel.schib.pmcw.dev == console_devno)) { 758 console_irq = schid.sch_no; 759 return 1; /* found */ 760 } 761 return 0; 762} 763 764 765static int 766cio_get_console_sch_no(void) 767{ 768 struct subchannel_id schid; 769 770 init_subchannel_id(&schid); 771 if (console_irq != -1) { 772 /* VM provided us with the irq number of the console. */ 773 schid.sch_no = console_irq; 774 if (stsch_err(schid, &console_subchannel.schib) != 0 || 775 (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || 776 !console_subchannel.schib.pmcw.dnv) 777 return -1; 778 console_devno = console_subchannel.schib.pmcw.dev; 779 } else if (console_devno != -1) { 780 /* At least the console device number is known. */ 781 for_each_subchannel(cio_test_for_console, NULL); 782 if (console_irq == -1) 783 return -1; 784 } else { 785 /* unlike in 2.4, we cannot autoprobe here, since 786 * the channel subsystem is not fully initialized. 787 * With some luck, the HWC console can take over */ 788 return -1; 789 } 790 return console_irq; 791} 792 793struct subchannel * 794cio_probe_console(void) 795{ 796 int sch_no, ret; 797 struct subchannel_id schid; 798 799 if (xchg(&console_subchannel_in_use, 1) != 0) 800 return ERR_PTR(-EBUSY); 801 sch_no = cio_get_console_sch_no(); 802 if (sch_no == -1) { 803 console_subchannel_in_use = 0; 804 pr_warning("No CCW console was found\n"); 805 return ERR_PTR(-ENODEV); 806 } 807 memset(&console_subchannel, 0, sizeof(struct subchannel)); 808 init_subchannel_id(&schid); 809 schid.sch_no = sch_no; 810 ret = cio_validate_subchannel(&console_subchannel, schid); 811 if (ret) { 812 console_subchannel_in_use = 0; 813 return ERR_PTR(-ENODEV); 814 } 815 816 /* 817 * enable console I/O-interrupt subclass 818 */ 819 isc_register(CONSOLE_ISC); 820 console_subchannel.config.isc = CONSOLE_ISC; 821 console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel; 822 ret = cio_commit_config(&console_subchannel); 823 if (ret) { 824 isc_unregister(CONSOLE_ISC); 825 console_subchannel_in_use = 0; 826 return ERR_PTR(ret); 827 } 828 return &console_subchannel; 829} 830 831void 832cio_release_console(void) 833{ 834 console_subchannel.config.intparm = 0; 835 cio_commit_config(&console_subchannel); 836 isc_unregister(CONSOLE_ISC); 837 console_subchannel_in_use = 0; 838} 839 840/* Bah... hack to catch console special sausages. */ 841int 842cio_is_console(struct subchannel_id schid) 843{ 844 if (!console_subchannel_in_use) 845 return 0; 846 return schid_equal(&schid, &console_subchannel.schid); 847} 848 849struct subchannel * 850cio_get_console_subchannel(void) 851{ 852 if (!console_subchannel_in_use) 853 return NULL; 854 return &console_subchannel; 855} 856 857#endif 858static int 859__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) 860{ 861 int retry, cc; 862 863 cc = 0; 864 for (retry=0;retry<3;retry++) { 865 schib->pmcw.ena = 0; 866 cc = msch_err(schid, schib); 867 if (cc) 868 return (cc==3?-ENODEV:-EBUSY); 869 if (stsch_err(schid, schib) || !css_sch_is_valid(schib)) 870 return -ENODEV; 871 if (!schib->pmcw.ena) 872 return 0; 873 } 874 return -EBUSY; /* uhm... */ 875} 876 877static int 878__clear_io_subchannel_easy(struct subchannel_id schid) 879{ 880 int retry; 881 882 if (csch(schid)) 883 return -ENODEV; 884 for (retry=0;retry<20;retry++) { 885 struct tpi_info ti; 886 887 if (tpi(&ti)) { 888 tsch(ti.schid, (struct irb *)&S390_lowcore.irb); 889 if (schid_equal(&ti.schid, &schid)) 890 return 0; 891 } 892 udelay_simple(100); 893 } 894 return -EBUSY; 895} 896 897static void __clear_chsc_subchannel_easy(void) 898{ 899 /* It seems we can only wait for a bit here :/ */ 900 udelay_simple(100); 901} 902 903static int pgm_check_occured; 904 905static void cio_reset_pgm_check_handler(void) 906{ 907 pgm_check_occured = 1; 908} 909 910static int stsch_reset(struct subchannel_id schid, struct schib *addr) 911{ 912 int rc; 913 914 pgm_check_occured = 0; 915 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; 916 rc = stsch_err(schid, addr); 917 s390_base_pgm_handler_fn = NULL; 918 919 /* The program check handler could have changed pgm_check_occured. */ 920 barrier(); 921 922 if (pgm_check_occured) 923 return -EIO; 924 else 925 return rc; 926} 927 928static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) 929{ 930 struct schib schib; 931 932 if (stsch_reset(schid, &schib)) 933 return -ENXIO; 934 if (!schib.pmcw.ena) 935 return 0; 936 switch(__disable_subchannel_easy(schid, &schib)) { 937 case 0: 938 case -ENODEV: 939 break; 940 default: /* -EBUSY */ 941 switch (schib.pmcw.st) { 942 case SUBCHANNEL_TYPE_IO: 943 if (__clear_io_subchannel_easy(schid)) 944 goto out; /* give up... */ 945 break; 946 case SUBCHANNEL_TYPE_CHSC: 947 __clear_chsc_subchannel_easy(); 948 break; 949 default: 950 /* No default clear strategy */ 951 break; 952 } 953 stsch_err(schid, &schib); 954 __disable_subchannel_easy(schid, &schib); 955 } 956out: 957 return 0; 958} 959 960static atomic_t chpid_reset_count; 961 962static void s390_reset_chpids_mcck_handler(void) 963{ 964 struct crw crw; 965 struct mci *mci; 966 967 /* Check for pending channel report word. */ 968 mci = (struct mci *)&S390_lowcore.mcck_interruption_code; 969 if (!mci->cp) 970 return; 971 /* Process channel report words. */ 972 while (stcrw(&crw) == 0) { 973 /* Check for responses to RCHP. */ 974 if (crw.slct && crw.rsc == CRW_RSC_CPATH) 975 atomic_dec(&chpid_reset_count); 976 } 977} 978 979#define RCHP_TIMEOUT (30 * USEC_PER_SEC) 980static void css_reset(void) 981{ 982 int i, ret; 983 unsigned long long timeout; 984 struct chp_id chpid; 985 986 /* Reset subchannels. */ 987 for_each_subchannel(__shutdown_subchannel_easy, NULL); 988 /* Reset channel paths. */ 989 s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler; 990 /* Enable channel report machine checks. */ 991 __ctl_set_bit(14, 28); 992 /* Temporarily reenable machine checks. */ 993 local_mcck_enable(); 994 chp_id_init(&chpid); 995 for (i = 0; i <= __MAX_CHPID; i++) { 996 chpid.id = i; 997 ret = rchp(chpid); 998 if ((ret == 0) || (ret == 2)) 999 /* 1000 * rchp either succeeded, or another rchp is already 1001 * in progress. In either case, we'll get a crw. 1002 */ 1003 atomic_inc(&chpid_reset_count); 1004 } 1005 /* Wait for machine check for all channel paths. */ 1006 timeout = get_clock() + (RCHP_TIMEOUT << 12); 1007 while (atomic_read(&chpid_reset_count) != 0) { 1008 if (get_clock() > timeout) 1009 break; 1010 cpu_relax(); 1011 } 1012 /* Disable machine checks again. */ 1013 local_mcck_disable(); 1014 /* Disable channel report machine checks. */ 1015 __ctl_clear_bit(14, 28); 1016 s390_base_mcck_handler_fn = NULL; 1017} 1018 1019static struct reset_call css_reset_call = { 1020 .fn = css_reset, 1021}; 1022 1023static int __init init_css_reset_call(void) 1024{ 1025 atomic_set(&chpid_reset_count, 0); 1026 register_reset_call(&css_reset_call); 1027 return 0; 1028} 1029 1030arch_initcall(init_css_reset_call); 1031 1032struct sch_match_id { 1033 struct subchannel_id schid; 1034 struct ccw_dev_id devid; 1035 int rc; 1036}; 1037 1038static int __reipl_subchannel_match(struct subchannel_id schid, void *data) 1039{ 1040 struct schib schib; 1041 struct sch_match_id *match_id = data; 1042 1043 if (stsch_reset(schid, &schib)) 1044 return -ENXIO; 1045 if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv && 1046 (schib.pmcw.dev == match_id->devid.devno) && 1047 (schid.ssid == match_id->devid.ssid)) { 1048 match_id->schid = schid; 1049 match_id->rc = 0; 1050 return 1; 1051 } 1052 return 0; 1053} 1054 1055static int reipl_find_schid(struct ccw_dev_id *devid, 1056 struct subchannel_id *schid) 1057{ 1058 struct sch_match_id match_id; 1059 1060 match_id.devid = *devid; 1061 match_id.rc = -ENODEV; 1062 for_each_subchannel(__reipl_subchannel_match, &match_id); 1063 if (match_id.rc == 0) 1064 *schid = match_id.schid; 1065 return match_id.rc; 1066} 1067 1068extern void do_reipl_asm(__u32 schid); 1069 1070/* Make sure all subchannels are quiet before we re-ipl an lpar. */ 1071void reipl_ccw_dev(struct ccw_dev_id *devid) 1072{ 1073 struct subchannel_id schid; 1074 1075 s390_reset_system(); 1076 if (reipl_find_schid(devid, &schid) != 0) 1077 panic("IPL Device not found\n"); 1078 do_reipl_asm(*((__u32*)&schid)); 1079} 1080 1081int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) 1082{ 1083 struct subchannel_id schid; 1084 struct schib schib; 1085 1086 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; 1087 if (!schid.one) 1088 return -ENODEV; 1089 if (stsch_err(schid, &schib)) 1090 return -ENODEV; 1091 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) 1092 return -ENODEV; 1093 if (!schib.pmcw.dnv) 1094 return -ENODEV; 1095 iplinfo->devno = schib.pmcw.dev; 1096 iplinfo->is_qdio = schib.pmcw.qf; 1097 return 0; 1098} 1099 1100/** 1101 * cio_tm_start_key - perform start function 1102 * @sch: subchannel on which to perform the start function 1103 * @tcw: transport-command word to be started 1104 * @lpm: mask of paths to use 1105 * @key: storage key to use for storage access 1106 * 1107 * Start the tcw on the given subchannel. Return zero on success, non-zero 1108 * otherwise. 1109 */ 1110int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key) 1111{ 1112 int cc; 1113 union orb *orb = &to_io_private(sch)->orb; 1114 1115 memset(orb, 0, sizeof(union orb)); 1116 orb->tm.intparm = (u32) (addr_t) sch; 1117 orb->tm.key = key >> 4; 1118 orb->tm.b = 1; 1119 orb->tm.lpm = lpm ? lpm : sch->lpm; 1120 orb->tm.tcw = (u32) (addr_t) tcw; 1121 cc = ssch(sch->schid, orb); 1122 switch (cc) { 1123 case 0: 1124 return 0; 1125 case 1: 1126 case 2: 1127 return -EBUSY; 1128 default: 1129 return cio_start_handle_notoper(sch, lpm); 1130 } 1131} 1132 1133/** 1134 * cio_tm_intrg - perform interrogate function 1135 * @sch - subchannel on which to perform the interrogate function 1136 * 1137 * If the specified subchannel is running in transport-mode, perform the 1138 * interrogate function. Return zero on success, non-zero otherwie. 1139 */ 1140int cio_tm_intrg(struct subchannel *sch) 1141{ 1142 int cc; 1143 1144 if (!to_io_private(sch)->orb.tm.b) 1145 return -EINVAL; 1146 cc = xsch(sch->schid); 1147 switch (cc) { 1148 case 0: 1149 case 2: 1150 return 0; 1151 case 1: 1152 return -EBUSY; 1153 default: 1154 return -ENODEV; 1155 } 1156} 1157