1/* 2 * drivers/s390/cio/chsc.c 3 * S/390 common I/O routines -- channel subsystem call 4 * 5 * Copyright IBM Corp. 1999,2008 6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com) 9 */ 10 11#define KMSG_COMPONENT "cio" 12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14#include <linux/module.h> 15#include <linux/slab.h> 16#include <linux/init.h> 17#include <linux/device.h> 18 19#include <asm/cio.h> 20#include <asm/chpid.h> 21#include <asm/chsc.h> 22#include <asm/crw.h> 23 24#include "css.h" 25#include "cio.h" 26#include "cio_debug.h" 27#include "ioasm.h" 28#include "chp.h" 29#include "chsc.h" 30 31static void *sei_page; 32static DEFINE_SPINLOCK(siosl_lock); 33static DEFINE_SPINLOCK(sda_lock); 34 35/** 36 * chsc_error_from_response() - convert a chsc response to an error 37 * @response: chsc response code 38 * 39 * Returns an appropriate Linux error code for @response. 40 */ 41int chsc_error_from_response(int response) 42{ 43 switch (response) { 44 case 0x0001: 45 return 0; 46 case 0x0002: 47 case 0x0003: 48 case 0x0006: 49 case 0x0007: 50 case 0x0008: 51 case 0x000a: 52 case 0x0104: 53 return -EINVAL; 54 case 0x0004: 55 return -EOPNOTSUPP; 56 default: 57 return -EIO; 58 } 59} 60EXPORT_SYMBOL_GPL(chsc_error_from_response); 61 62struct chsc_ssd_area { 63 struct chsc_header request; 64 u16 :10; 65 u16 ssid:2; 66 u16 :4; 67 u16 f_sch; /* first subchannel */ 68 u16 :16; 69 u16 l_sch; /* last subchannel */ 70 u32 :32; 71 struct chsc_header response; 72 u32 :32; 73 u8 sch_valid : 1; 74 u8 dev_valid : 1; 75 u8 st : 3; /* subchannel type */ 76 u8 zeroes : 3; 77 u8 unit_addr; /* unit address */ 78 u16 devno; /* device number */ 79 u8 path_mask; 80 u8 fla_valid_mask; 81 u16 sch; /* subchannel */ 82 u8 chpid[8]; /* chpids 0-7 */ 83 u16 fla[8]; /* full link addresses 0-7 */ 84} __attribute__ ((packed)); 85 86int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) 87{ 88 unsigned long page; 89 struct chsc_ssd_area *ssd_area; 90 int ccode; 91 int ret; 92 int i; 93 int mask; 94 95 page = get_zeroed_page(GFP_KERNEL | GFP_DMA); 96 if (!page) 97 return -ENOMEM; 98 ssd_area = (struct chsc_ssd_area *) page; 99 ssd_area->request.length = 0x0010; 100 ssd_area->request.code = 0x0004; 101 ssd_area->ssid = schid.ssid; 102 ssd_area->f_sch = schid.sch_no; 103 ssd_area->l_sch = schid.sch_no; 104 105 ccode = chsc(ssd_area); 106 /* Check response. */ 107 if (ccode > 0) { 108 ret = (ccode == 3) ? -ENODEV : -EBUSY; 109 goto out_free; 110 } 111 ret = chsc_error_from_response(ssd_area->response.code); 112 if (ret != 0) { 113 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 114 schid.ssid, schid.sch_no, 115 ssd_area->response.code); 116 goto out_free; 117 } 118 if (!ssd_area->sch_valid) { 119 ret = -ENODEV; 120 goto out_free; 121 } 122 /* Copy data */ 123 ret = 0; 124 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 125 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && 126 (ssd_area->st != SUBCHANNEL_TYPE_MSG)) 127 goto out_free; 128 ssd->path_mask = ssd_area->path_mask; 129 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 130 for (i = 0; i < 8; i++) { 131 mask = 0x80 >> i; 132 if (ssd_area->path_mask & mask) { 133 chp_id_init(&ssd->chpid[i]); 134 ssd->chpid[i].id = ssd_area->chpid[i]; 135 } 136 if (ssd_area->fla_valid_mask & mask) 137 ssd->fla[i] = ssd_area->fla[i]; 138 } 139out_free: 140 free_page(page); 141 return ret; 142} 143 144static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 145{ 146 spin_lock_irq(sch->lock); 147 if (sch->driver && sch->driver->chp_event) 148 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) 149 goto out_unreg; 150 spin_unlock_irq(sch->lock); 151 return 0; 152 153out_unreg: 154 sch->lpm = 0; 155 spin_unlock_irq(sch->lock); 156 css_schedule_eval(sch->schid); 157 return 0; 158} 159 160void chsc_chp_offline(struct chp_id chpid) 161{ 162 char dbf_txt[15]; 163 struct chp_link link; 164 165 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 166 CIO_TRACE_EVENT(2, dbf_txt); 167 168 if (chp_get_status(chpid) <= 0) 169 return; 170 memset(&link, 0, sizeof(struct chp_link)); 171 link.chpid = chpid; 172 /* Wait until previous actions have settled. */ 173 css_wait_for_slow_path(); 174 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); 175} 176 177static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) 178{ 179 struct schib schib; 180 /* 181 * We don't know the device yet, but since a path 182 * may be available now to the device we'll have 183 * to do recognition again. 184 * Since we don't have any idea about which chpid 185 * that beast may be on we'll have to do a stsch 186 * on all devices, grr... 187 */ 188 if (stsch_err(schid, &schib)) 189 /* We're through */ 190 return -ENXIO; 191 192 /* Put it on the slow path. */ 193 css_schedule_eval(schid); 194 return 0; 195} 196 197static int __s390_process_res_acc(struct subchannel *sch, void *data) 198{ 199 spin_lock_irq(sch->lock); 200 if (sch->driver && sch->driver->chp_event) 201 sch->driver->chp_event(sch, data, CHP_ONLINE); 202 spin_unlock_irq(sch->lock); 203 204 return 0; 205} 206 207static void s390_process_res_acc(struct chp_link *link) 208{ 209 char dbf_txt[15]; 210 211 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, 212 link->chpid.id); 213 CIO_TRACE_EVENT( 2, dbf_txt); 214 if (link->fla != 0) { 215 sprintf(dbf_txt, "fla%x", link->fla); 216 CIO_TRACE_EVENT( 2, dbf_txt); 217 } 218 /* Wait until previous actions have settled. */ 219 css_wait_for_slow_path(); 220 /* 221 * I/O resources may have become accessible. 222 * Scan through all subchannels that may be concerned and 223 * do a validation on those. 224 * The more information we have (info), the less scanning 225 * will we have to do. 226 */ 227 for_each_subchannel_staged(__s390_process_res_acc, 228 s390_process_res_acc_new_sch, link); 229} 230 231static int 232__get_chpid_from_lir(void *data) 233{ 234 struct lir { 235 u8 iq; 236 u8 ic; 237 u16 sci; 238 /* incident-node descriptor */ 239 u32 indesc[28]; 240 /* attached-node descriptor */ 241 u32 andesc[28]; 242 /* incident-specific information */ 243 u32 isinfo[28]; 244 } __attribute__ ((packed)) *lir; 245 246 lir = data; 247 if (!(lir->iq&0x80)) 248 /* NULL link incident record */ 249 return -EINVAL; 250 if (!(lir->indesc[0]&0xc0000000)) 251 /* node descriptor not valid */ 252 return -EINVAL; 253 if (!(lir->indesc[0]&0x10000000)) 254 return -EINVAL; 255 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ 256 257 return (u16) (lir->indesc[0]&0x000000ff); 258} 259 260struct chsc_sei_area { 261 struct chsc_header request; 262 u32 reserved1; 263 u32 reserved2; 264 u32 reserved3; 265 struct chsc_header response; 266 u32 reserved4; 267 u8 flags; 268 u8 vf; /* validity flags */ 269 u8 rs; /* reporting source */ 270 u8 cc; /* content code */ 271 u16 fla; /* full link address */ 272 u16 rsid; /* reporting source id */ 273 u32 reserved5; 274 u32 reserved6; 275 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */ 276 /* ccdf has to be big enough for a link-incident record */ 277} __attribute__ ((packed)); 278 279static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) 280{ 281 struct chp_id chpid; 282 int id; 283 284 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 285 sei_area->rs, sei_area->rsid); 286 if (sei_area->rs != 4) 287 return; 288 id = __get_chpid_from_lir(sei_area->ccdf); 289 if (id < 0) 290 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 291 else { 292 chp_id_init(&chpid); 293 chpid.id = id; 294 chsc_chp_offline(chpid); 295 } 296} 297 298static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 299{ 300 struct chp_link link; 301 struct chp_id chpid; 302 int status; 303 304 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 305 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 306 if (sei_area->rs != 4) 307 return; 308 chp_id_init(&chpid); 309 chpid.id = sei_area->rsid; 310 /* allocate a new channel path structure, if needed */ 311 status = chp_get_status(chpid); 312 if (status < 0) 313 chp_new(chpid); 314 else if (!status) 315 return; 316 memset(&link, 0, sizeof(struct chp_link)); 317 link.chpid = chpid; 318 if ((sei_area->vf & 0xc0) != 0) { 319 link.fla = sei_area->fla; 320 if ((sei_area->vf & 0xc0) == 0xc0) 321 /* full link address */ 322 link.fla_mask = 0xffff; 323 else 324 /* link address */ 325 link.fla_mask = 0xff00; 326 } 327 s390_process_res_acc(&link); 328} 329 330struct chp_config_data { 331 u8 map[32]; 332 u8 op; 333 u8 pc; 334}; 335 336static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) 337{ 338 struct chp_config_data *data; 339 struct chp_id chpid; 340 int num; 341 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; 342 343 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); 344 if (sei_area->rs != 0) 345 return; 346 data = (struct chp_config_data *) &(sei_area->ccdf); 347 chp_id_init(&chpid); 348 for (num = 0; num <= __MAX_CHPID; num++) { 349 if (!chp_test_bit(data->map, num)) 350 continue; 351 chpid.id = num; 352 pr_notice("Processing %s for channel path %x.%02x\n", 353 events[data->op], chpid.cssid, chpid.id); 354 switch (data->op) { 355 case 0: 356 chp_cfg_schedule(chpid, 1); 357 break; 358 case 1: 359 chp_cfg_schedule(chpid, 0); 360 break; 361 case 2: 362 chp_cfg_cancel_deconfigure(chpid); 363 break; 364 } 365 } 366} 367 368static void chsc_process_sei(struct chsc_sei_area *sei_area) 369{ 370 /* Check if we might have lost some information. */ 371 if (sei_area->flags & 0x40) { 372 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 373 css_schedule_eval_all(); 374 } 375 /* which kind of information was stored? */ 376 switch (sei_area->cc) { 377 case 1: /* link incident*/ 378 chsc_process_sei_link_incident(sei_area); 379 break; 380 case 2: /* i/o resource accessibiliy */ 381 chsc_process_sei_res_acc(sei_area); 382 break; 383 case 8: /* channel-path-configuration notification */ 384 chsc_process_sei_chp_config(sei_area); 385 break; 386 default: /* other stuff */ 387 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 388 sei_area->cc); 389 break; 390 } 391} 392 393static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 394{ 395 struct chsc_sei_area *sei_area; 396 397 if (overflow) { 398 css_schedule_eval_all(); 399 return; 400 } 401 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " 402 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 403 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 404 crw0->erc, crw0->rsid); 405 if (!sei_page) 406 return; 407 /* Access to sei_page is serialized through machine check handler 408 * thread, so no need for locking. */ 409 sei_area = sei_page; 410 411 CIO_TRACE_EVENT(2, "prcss"); 412 do { 413 memset(sei_area, 0, sizeof(*sei_area)); 414 sei_area->request.length = 0x0010; 415 sei_area->request.code = 0x000e; 416 if (chsc(sei_area)) 417 break; 418 419 if (sei_area->response.code == 0x0001) { 420 CIO_CRW_EVENT(4, "chsc: sei successful\n"); 421 chsc_process_sei(sei_area); 422 } else { 423 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 424 sei_area->response.code); 425 break; 426 } 427 } while (sei_area->flags & 0x80); 428} 429 430void chsc_chp_online(struct chp_id chpid) 431{ 432 char dbf_txt[15]; 433 struct chp_link link; 434 435 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 436 CIO_TRACE_EVENT(2, dbf_txt); 437 438 if (chp_get_status(chpid) != 0) { 439 memset(&link, 0, sizeof(struct chp_link)); 440 link.chpid = chpid; 441 /* Wait until previous actions have settled. */ 442 css_wait_for_slow_path(); 443 for_each_subchannel_staged(__s390_process_res_acc, NULL, 444 &link); 445 } 446} 447 448static void __s390_subchannel_vary_chpid(struct subchannel *sch, 449 struct chp_id chpid, int on) 450{ 451 unsigned long flags; 452 struct chp_link link; 453 454 memset(&link, 0, sizeof(struct chp_link)); 455 link.chpid = chpid; 456 spin_lock_irqsave(sch->lock, flags); 457 if (sch->driver && sch->driver->chp_event) 458 sch->driver->chp_event(sch, &link, 459 on ? CHP_VARY_ON : CHP_VARY_OFF); 460 spin_unlock_irqrestore(sch->lock, flags); 461} 462 463static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) 464{ 465 struct chp_id *chpid = data; 466 467 __s390_subchannel_vary_chpid(sch, *chpid, 0); 468 return 0; 469} 470 471static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) 472{ 473 struct chp_id *chpid = data; 474 475 __s390_subchannel_vary_chpid(sch, *chpid, 1); 476 return 0; 477} 478 479static int 480__s390_vary_chpid_on(struct subchannel_id schid, void *data) 481{ 482 struct schib schib; 483 484 if (stsch_err(schid, &schib)) 485 /* We're through */ 486 return -ENXIO; 487 /* Put it on the slow path. */ 488 css_schedule_eval(schid); 489 return 0; 490} 491 492/** 493 * chsc_chp_vary - propagate channel-path vary operation to subchannels 494 * @chpid: channl-path ID 495 * @on: non-zero for vary online, zero for vary offline 496 */ 497int chsc_chp_vary(struct chp_id chpid, int on) 498{ 499 struct chp_link link; 500 501 memset(&link, 0, sizeof(struct chp_link)); 502 link.chpid = chpid; 503 /* Wait until previous actions have settled. */ 504 css_wait_for_slow_path(); 505 /* 506 * Redo PathVerification on the devices the chpid connects to 507 */ 508 509 if (on) 510 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 511 __s390_vary_chpid_on, &link); 512 else 513 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 514 NULL, &link); 515 516 return 0; 517} 518 519static void 520chsc_remove_cmg_attr(struct channel_subsystem *css) 521{ 522 int i; 523 524 for (i = 0; i <= __MAX_CHPID; i++) { 525 if (!css->chps[i]) 526 continue; 527 chp_remove_cmg_attr(css->chps[i]); 528 } 529} 530 531static int 532chsc_add_cmg_attr(struct channel_subsystem *css) 533{ 534 int i, ret; 535 536 ret = 0; 537 for (i = 0; i <= __MAX_CHPID; i++) { 538 if (!css->chps[i]) 539 continue; 540 ret = chp_add_cmg_attr(css->chps[i]); 541 if (ret) 542 goto cleanup; 543 } 544 return ret; 545cleanup: 546 for (--i; i >= 0; i--) { 547 if (!css->chps[i]) 548 continue; 549 chp_remove_cmg_attr(css->chps[i]); 550 } 551 return ret; 552} 553 554int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) 555{ 556 struct { 557 struct chsc_header request; 558 u32 operation_code : 2; 559 u32 : 30; 560 u32 key : 4; 561 u32 : 28; 562 u32 zeroes1; 563 u32 cub_addr1; 564 u32 zeroes2; 565 u32 cub_addr2; 566 u32 reserved[13]; 567 struct chsc_header response; 568 u32 status : 8; 569 u32 : 4; 570 u32 fmt : 4; 571 u32 : 16; 572 } __attribute__ ((packed)) *secm_area; 573 int ret, ccode; 574 575 secm_area = page; 576 secm_area->request.length = 0x0050; 577 secm_area->request.code = 0x0016; 578 579 secm_area->key = PAGE_DEFAULT_KEY >> 4; 580 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 581 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 582 583 secm_area->operation_code = enable ? 0 : 1; 584 585 ccode = chsc(secm_area); 586 if (ccode > 0) 587 return (ccode == 3) ? -ENODEV : -EBUSY; 588 589 switch (secm_area->response.code) { 590 case 0x0102: 591 case 0x0103: 592 ret = -EINVAL; 593 break; 594 default: 595 ret = chsc_error_from_response(secm_area->response.code); 596 } 597 if (ret != 0) 598 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 599 secm_area->response.code); 600 return ret; 601} 602 603int 604chsc_secm(struct channel_subsystem *css, int enable) 605{ 606 void *secm_area; 607 int ret; 608 609 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 610 if (!secm_area) 611 return -ENOMEM; 612 613 if (enable && !css->cm_enabled) { 614 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 615 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 616 if (!css->cub_addr1 || !css->cub_addr2) { 617 free_page((unsigned long)css->cub_addr1); 618 free_page((unsigned long)css->cub_addr2); 619 free_page((unsigned long)secm_area); 620 return -ENOMEM; 621 } 622 } 623 ret = __chsc_do_secm(css, enable, secm_area); 624 if (!ret) { 625 css->cm_enabled = enable; 626 if (css->cm_enabled) { 627 ret = chsc_add_cmg_attr(css); 628 if (ret) { 629 memset(secm_area, 0, PAGE_SIZE); 630 __chsc_do_secm(css, 0, secm_area); 631 css->cm_enabled = 0; 632 } 633 } else 634 chsc_remove_cmg_attr(css); 635 } 636 if (!css->cm_enabled) { 637 free_page((unsigned long)css->cub_addr1); 638 free_page((unsigned long)css->cub_addr2); 639 } 640 free_page((unsigned long)secm_area); 641 return ret; 642} 643 644int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 645 int c, int m, 646 struct chsc_response_struct *resp) 647{ 648 int ccode, ret; 649 650 struct { 651 struct chsc_header request; 652 u32 : 2; 653 u32 m : 1; 654 u32 c : 1; 655 u32 fmt : 4; 656 u32 cssid : 8; 657 u32 : 4; 658 u32 rfmt : 4; 659 u32 first_chpid : 8; 660 u32 : 24; 661 u32 last_chpid : 8; 662 u32 zeroes1; 663 struct chsc_header response; 664 u8 data[PAGE_SIZE - 20]; 665 } __attribute__ ((packed)) *scpd_area; 666 667 if ((rfmt == 1) && !css_general_characteristics.fcs) 668 return -EINVAL; 669 if ((rfmt == 2) && !css_general_characteristics.cib) 670 return -EINVAL; 671 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 672 if (!scpd_area) 673 return -ENOMEM; 674 675 scpd_area->request.length = 0x0010; 676 scpd_area->request.code = 0x0002; 677 678 scpd_area->cssid = chpid.cssid; 679 scpd_area->first_chpid = chpid.id; 680 scpd_area->last_chpid = chpid.id; 681 scpd_area->m = m; 682 scpd_area->c = c; 683 scpd_area->fmt = fmt; 684 scpd_area->rfmt = rfmt; 685 686 ccode = chsc(scpd_area); 687 if (ccode > 0) { 688 ret = (ccode == 3) ? -ENODEV : -EBUSY; 689 goto out; 690 } 691 692 ret = chsc_error_from_response(scpd_area->response.code); 693 if (ret == 0) 694 /* Success. */ 695 memcpy(resp, &scpd_area->response, scpd_area->response.length); 696 else 697 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 698 scpd_area->response.code); 699out: 700 free_page((unsigned long)scpd_area); 701 return ret; 702} 703EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 704 705int chsc_determine_base_channel_path_desc(struct chp_id chpid, 706 struct channel_path_desc *desc) 707{ 708 struct chsc_response_struct *chsc_resp; 709 int ret; 710 711 chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL); 712 if (!chsc_resp) 713 return -ENOMEM; 714 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp); 715 if (ret) 716 goto out_free; 717 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 718out_free: 719 kfree(chsc_resp); 720 return ret; 721} 722 723static void 724chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 725 struct cmg_chars *chars) 726{ 727 switch (chp->cmg) { 728 case 2: 729 case 3: 730 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), 731 GFP_KERNEL); 732 if (chp->cmg_chars) { 733 int i, mask; 734 struct cmg_chars *cmg_chars; 735 736 cmg_chars = chp->cmg_chars; 737 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 738 mask = 0x80 >> (i + 3); 739 if (cmcv & mask) 740 cmg_chars->values[i] = chars->values[i]; 741 else 742 cmg_chars->values[i] = 0; 743 } 744 } 745 break; 746 default: 747 /* No cmg-dependent data. */ 748 break; 749 } 750} 751 752int chsc_get_channel_measurement_chars(struct channel_path *chp) 753{ 754 int ccode, ret; 755 756 struct { 757 struct chsc_header request; 758 u32 : 24; 759 u32 first_chpid : 8; 760 u32 : 24; 761 u32 last_chpid : 8; 762 u32 zeroes1; 763 struct chsc_header response; 764 u32 zeroes2; 765 u32 not_valid : 1; 766 u32 shared : 1; 767 u32 : 22; 768 u32 chpid : 8; 769 u32 cmcv : 5; 770 u32 : 11; 771 u32 cmgq : 8; 772 u32 cmg : 8; 773 u32 zeroes3; 774 u32 data[NR_MEASUREMENT_CHARS]; 775 } __attribute__ ((packed)) *scmc_area; 776 777 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 778 if (!scmc_area) 779 return -ENOMEM; 780 781 scmc_area->request.length = 0x0010; 782 scmc_area->request.code = 0x0022; 783 784 scmc_area->first_chpid = chp->chpid.id; 785 scmc_area->last_chpid = chp->chpid.id; 786 787 ccode = chsc(scmc_area); 788 if (ccode > 0) { 789 ret = (ccode == 3) ? -ENODEV : -EBUSY; 790 goto out; 791 } 792 793 ret = chsc_error_from_response(scmc_area->response.code); 794 if (ret == 0) { 795 /* Success. */ 796 if (!scmc_area->not_valid) { 797 chp->cmg = scmc_area->cmg; 798 chp->shared = scmc_area->shared; 799 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 800 (struct cmg_chars *) 801 &scmc_area->data); 802 } else { 803 chp->cmg = -1; 804 chp->shared = -1; 805 } 806 } else { 807 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 808 scmc_area->response.code); 809 } 810out: 811 free_page((unsigned long)scmc_area); 812 return ret; 813} 814 815int __init chsc_alloc_sei_area(void) 816{ 817 int ret; 818 819 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 820 if (!sei_page) { 821 CIO_MSG_EVENT(0, "Can't allocate page for processing of " 822 "chsc machine checks!\n"); 823 return -ENOMEM; 824 } 825 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); 826 if (ret) 827 kfree(sei_page); 828 return ret; 829} 830 831void __init chsc_free_sei_area(void) 832{ 833 crw_unregister_handler(CRW_RSC_CSS); 834 kfree(sei_page); 835} 836 837int chsc_enable_facility(int operation_code) 838{ 839 int ret; 840 static struct { 841 struct chsc_header request; 842 u8 reserved1:4; 843 u8 format:4; 844 u8 reserved2; 845 u16 operation_code; 846 u32 reserved3; 847 u32 reserved4; 848 u32 operation_data_area[252]; 849 struct chsc_header response; 850 u32 reserved5:4; 851 u32 format2:4; 852 u32 reserved6:24; 853 } __attribute__ ((packed, aligned(4096))) sda_area; 854 855 spin_lock(&sda_lock); 856 memset(&sda_area, 0, sizeof(sda_area)); 857 sda_area.request.length = 0x0400; 858 sda_area.request.code = 0x0031; 859 sda_area.operation_code = operation_code; 860 861 ret = chsc(&sda_area); 862 if (ret > 0) { 863 ret = (ret == 3) ? -ENODEV : -EBUSY; 864 goto out; 865 } 866 867 switch (sda_area.response.code) { 868 case 0x0101: 869 ret = -EOPNOTSUPP; 870 break; 871 default: 872 ret = chsc_error_from_response(sda_area.response.code); 873 } 874 if (ret != 0) 875 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 876 operation_code, sda_area.response.code); 877 out: 878 spin_unlock(&sda_lock); 879 return ret; 880} 881 882struct css_general_char css_general_characteristics; 883struct css_chsc_char css_chsc_characteristics; 884 885int __init 886chsc_determine_css_characteristics(void) 887{ 888 int result; 889 struct { 890 struct chsc_header request; 891 u32 reserved1; 892 u32 reserved2; 893 u32 reserved3; 894 struct chsc_header response; 895 u32 reserved4; 896 u32 general_char[510]; 897 u32 chsc_char[518]; 898 } __attribute__ ((packed)) *scsc_area; 899 900 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 901 if (!scsc_area) 902 return -ENOMEM; 903 904 scsc_area->request.length = 0x0010; 905 scsc_area->request.code = 0x0010; 906 907 result = chsc(scsc_area); 908 if (result) { 909 result = (result == 3) ? -ENODEV : -EBUSY; 910 goto exit; 911 } 912 913 result = chsc_error_from_response(scsc_area->response.code); 914 if (result == 0) { 915 memcpy(&css_general_characteristics, scsc_area->general_char, 916 sizeof(css_general_characteristics)); 917 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 918 sizeof(css_chsc_characteristics)); 919 } else 920 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 921 scsc_area->response.code); 922exit: 923 free_page ((unsigned long) scsc_area); 924 return result; 925} 926 927EXPORT_SYMBOL_GPL(css_general_characteristics); 928EXPORT_SYMBOL_GPL(css_chsc_characteristics); 929 930int chsc_sstpc(void *page, unsigned int op, u16 ctrl) 931{ 932 struct { 933 struct chsc_header request; 934 unsigned int rsvd0; 935 unsigned int op : 8; 936 unsigned int rsvd1 : 8; 937 unsigned int ctrl : 16; 938 unsigned int rsvd2[5]; 939 struct chsc_header response; 940 unsigned int rsvd3[7]; 941 } __attribute__ ((packed)) *rr; 942 int rc; 943 944 memset(page, 0, PAGE_SIZE); 945 rr = page; 946 rr->request.length = 0x0020; 947 rr->request.code = 0x0033; 948 rr->op = op; 949 rr->ctrl = ctrl; 950 rc = chsc(rr); 951 if (rc) 952 return -EIO; 953 rc = (rr->response.code == 0x0001) ? 0 : -EIO; 954 return rc; 955} 956 957int chsc_sstpi(void *page, void *result, size_t size) 958{ 959 struct { 960 struct chsc_header request; 961 unsigned int rsvd0[3]; 962 struct chsc_header response; 963 char data[size]; 964 } __attribute__ ((packed)) *rr; 965 int rc; 966 967 memset(page, 0, PAGE_SIZE); 968 rr = page; 969 rr->request.length = 0x0010; 970 rr->request.code = 0x0038; 971 rc = chsc(rr); 972 if (rc) 973 return -EIO; 974 memcpy(result, &rr->data, size); 975 return (rr->response.code == 0x0001) ? 0 : -EIO; 976} 977 978static struct { 979 struct chsc_header request; 980 u32 word1; 981 struct subchannel_id sid; 982 u32 word3; 983 struct chsc_header response; 984 u32 word[11]; 985} __attribute__ ((packed)) siosl_area __attribute__ ((__aligned__(PAGE_SIZE))); 986 987int chsc_siosl(struct subchannel_id schid) 988{ 989 unsigned long flags; 990 int ccode; 991 int rc; 992 993 spin_lock_irqsave(&siosl_lock, flags); 994 memset(&siosl_area, 0, sizeof(siosl_area)); 995 siosl_area.request.length = 0x0010; 996 siosl_area.request.code = 0x0046; 997 siosl_area.word1 = 0x80000000; 998 siosl_area.sid = schid; 999 1000 ccode = chsc(&siosl_area); 1001 if (ccode > 0) { 1002 if (ccode == 3) 1003 rc = -ENODEV; 1004 else 1005 rc = -EBUSY; 1006 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n", 1007 schid.ssid, schid.sch_no, ccode); 1008 goto out; 1009 } 1010 rc = chsc_error_from_response(siosl_area.response.code); 1011 if (rc) 1012 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", 1013 schid.ssid, schid.sch_no, 1014 siosl_area.response.code); 1015 else 1016 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", 1017 schid.ssid, schid.sch_no); 1018out: 1019 spin_unlock_irqrestore(&siosl_lock, flags); 1020 1021 return rc; 1022} 1023EXPORT_SYMBOL_GPL(chsc_siosl); 1024