1/* 2 * linux/drivers/s390/cio/qdio_main.c 3 * 4 * Linux for s390 qdio support, buffer handling, qdio API and module support. 5 * 6 * Copyright 2000,2008 IBM Corp. 7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 8 * Jan Glauber <jang@linux.vnet.ibm.com> 9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> 10 */ 11#include <linux/module.h> 12#include <linux/init.h> 13#include <linux/kernel.h> 14#include <linux/timer.h> 15#include <linux/delay.h> 16#include <linux/gfp.h> 17#include <asm/atomic.h> 18#include <asm/debug.h> 19#include <asm/qdio.h> 20 21#include "cio.h" 22#include "css.h" 23#include "device.h" 24#include "qdio.h" 25#include "qdio_debug.h" 26 27MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\ 28 "Jan Glauber <jang@linux.vnet.ibm.com>"); 29MODULE_DESCRIPTION("QDIO base support"); 30MODULE_LICENSE("GPL"); 31 32static inline int do_siga_sync(unsigned long schid, 33 unsigned int out_mask, unsigned int in_mask, 34 unsigned int fc) 35{ 36 register unsigned long __fc asm ("0") = fc; 37 register unsigned long __schid asm ("1") = schid; 38 register unsigned long out asm ("2") = out_mask; 39 register unsigned long in asm ("3") = in_mask; 40 int cc; 41 42 asm volatile( 43 " siga 0\n" 44 " ipm %0\n" 45 " srl %0,28\n" 46 : "=d" (cc) 47 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc"); 48 return cc; 49} 50 51static inline int do_siga_input(unsigned long schid, unsigned int mask, 52 unsigned int fc) 53{ 54 register unsigned long __fc asm ("0") = fc; 55 register unsigned long __schid asm ("1") = schid; 56 register unsigned long __mask asm ("2") = mask; 57 int cc; 58 59 asm volatile( 60 " siga 0\n" 61 " ipm %0\n" 62 " srl %0,28\n" 63 : "=d" (cc) 64 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory"); 65 return cc; 66} 67 68/** 69 * do_siga_output - perform SIGA-w/wt function 70 * @schid: subchannel id or in case of QEBSM the subchannel token 71 * @mask: which output queues to process 72 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer 73 * @fc: function code to perform 74 * 75 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION. 76 * Note: For IQDC unicast queues only the highest priority queue is processed. 77 */ 78static inline int do_siga_output(unsigned long schid, unsigned long mask, 79 unsigned int *bb, unsigned int fc) 80{ 81 register unsigned long __fc asm("0") = fc; 82 register unsigned long __schid asm("1") = schid; 83 register unsigned long __mask asm("2") = mask; 84 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION; 85 86 asm volatile( 87 " siga 0\n" 88 "0: ipm %0\n" 89 " srl %0,28\n" 90 "1:\n" 91 EX_TABLE(0b, 1b) 92 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask) 93 : : "cc", "memory"); 94 *bb = ((unsigned int) __fc) >> 31; 95 return cc; 96} 97 98static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) 99{ 100 /* all done or next buffer state different */ 101 if (ccq == 0 || ccq == 32) 102 return 0; 103 /* not all buffers processed */ 104 if (ccq == 96 || ccq == 97) 105 return 1; 106 /* notify devices immediately */ 107 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); 108 return -EIO; 109} 110 111/** 112 * qdio_do_eqbs - extract buffer states for QEBSM 113 * @q: queue to manipulate 114 * @state: state of the extracted buffers 115 * @start: buffer number to start at 116 * @count: count of buffers to examine 117 * @auto_ack: automatically acknowledge buffers 118 * 119 * Returns the number of successfully extracted equal buffer states. 120 * Stops processing if a state is different from the last buffers state. 121 */ 122static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, 123 int start, int count, int auto_ack) 124{ 125 unsigned int ccq = 0; 126 int tmp_count = count, tmp_start = start; 127 int nr = q->nr; 128 int rc; 129 130 BUG_ON(!q->irq_ptr->sch_token); 131 qperf_inc(q, eqbs); 132 133 if (!q->is_input_q) 134 nr += q->irq_ptr->nr_input_qs; 135again: 136 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, 137 auto_ack); 138 rc = qdio_check_ccq(q, ccq); 139 140 /* At least one buffer was processed, return and extract the remaining 141 * buffers later. 142 */ 143 if ((ccq == 96) && (count != tmp_count)) { 144 qperf_inc(q, eqbs_partial); 145 return (count - tmp_count); 146 } 147 148 if (rc == 1) { 149 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); 150 goto again; 151 } 152 153 if (rc < 0) { 154 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); 155 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); 156 q->handler(q->irq_ptr->cdev, 157 QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 158 0, -1, -1, q->irq_ptr->int_parm); 159 return 0; 160 } 161 return count - tmp_count; 162} 163 164/** 165 * qdio_do_sqbs - set buffer states for QEBSM 166 * @q: queue to manipulate 167 * @state: new state of the buffers 168 * @start: first buffer number to change 169 * @count: how many buffers to change 170 * 171 * Returns the number of successfully changed buffers. 172 * Does retrying until the specified count of buffer states is set or an 173 * error occurs. 174 */ 175static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, 176 int count) 177{ 178 unsigned int ccq = 0; 179 int tmp_count = count, tmp_start = start; 180 int nr = q->nr; 181 int rc; 182 183 if (!count) 184 return 0; 185 186 BUG_ON(!q->irq_ptr->sch_token); 187 qperf_inc(q, sqbs); 188 189 if (!q->is_input_q) 190 nr += q->irq_ptr->nr_input_qs; 191again: 192 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); 193 rc = qdio_check_ccq(q, ccq); 194 if (rc == 1) { 195 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); 196 qperf_inc(q, sqbs_partial); 197 goto again; 198 } 199 if (rc < 0) { 200 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); 201 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); 202 q->handler(q->irq_ptr->cdev, 203 QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 204 0, -1, -1, q->irq_ptr->int_parm); 205 return 0; 206 } 207 WARN_ON(tmp_count); 208 return count - tmp_count; 209} 210 211/* returns number of examined buffers and their common state in *state */ 212static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, 213 unsigned char *state, unsigned int count, 214 int auto_ack) 215{ 216 unsigned char __state = 0; 217 int i; 218 219 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); 220 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); 221 222 if (is_qebsm(q)) 223 return qdio_do_eqbs(q, state, bufnr, count, auto_ack); 224 225 for (i = 0; i < count; i++) { 226 if (!__state) 227 __state = q->slsb.val[bufnr]; 228 else if (q->slsb.val[bufnr] != __state) 229 break; 230 bufnr = next_buf(bufnr); 231 } 232 *state = __state; 233 return i; 234} 235 236static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, 237 unsigned char *state, int auto_ack) 238{ 239 return get_buf_states(q, bufnr, state, 1, auto_ack); 240} 241 242/* wrap-around safe setting of slsb states, returns number of changed buffers */ 243static inline int set_buf_states(struct qdio_q *q, int bufnr, 244 unsigned char state, int count) 245{ 246 int i; 247 248 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); 249 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); 250 251 if (is_qebsm(q)) 252 return qdio_do_sqbs(q, state, bufnr, count); 253 254 for (i = 0; i < count; i++) { 255 xchg(&q->slsb.val[bufnr], state); 256 bufnr = next_buf(bufnr); 257 } 258 return count; 259} 260 261static inline int set_buf_state(struct qdio_q *q, int bufnr, 262 unsigned char state) 263{ 264 return set_buf_states(q, bufnr, state, 1); 265} 266 267/* set slsb states to initial state */ 268void qdio_init_buf_states(struct qdio_irq *irq_ptr) 269{ 270 struct qdio_q *q; 271 int i; 272 273 for_each_input_queue(irq_ptr, q, i) 274 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT, 275 QDIO_MAX_BUFFERS_PER_Q); 276 for_each_output_queue(irq_ptr, q, i) 277 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT, 278 QDIO_MAX_BUFFERS_PER_Q); 279} 280 281static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, 282 unsigned int input) 283{ 284 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 285 unsigned int fc = QDIO_SIGA_SYNC; 286 int cc; 287 288 if (!need_siga_sync(q)) 289 return 0; 290 291 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); 292 qperf_inc(q, siga_sync); 293 294 if (is_qebsm(q)) { 295 schid = q->irq_ptr->sch_token; 296 fc |= QDIO_SIGA_QEBSM_FLAG; 297 } 298 299 cc = do_siga_sync(schid, output, input, fc); 300 if (cc) 301 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); 302 return cc; 303} 304 305static inline int qdio_siga_sync_q(struct qdio_q *q) 306{ 307 if (q->is_input_q) 308 return qdio_siga_sync(q, 0, q->mask); 309 else 310 return qdio_siga_sync(q, q->mask, 0); 311} 312 313static inline int qdio_siga_sync_out(struct qdio_q *q) 314{ 315 return qdio_siga_sync(q, ~0U, 0); 316} 317 318static inline int qdio_siga_sync_all(struct qdio_q *q) 319{ 320 return qdio_siga_sync(q, ~0U, ~0U); 321} 322 323static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) 324{ 325 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 326 unsigned int fc = QDIO_SIGA_WRITE; 327 u64 start_time = 0; 328 int cc; 329 330 if (q->u.out.use_enh_siga) 331 fc = 3; 332 333 if (is_qebsm(q)) { 334 schid = q->irq_ptr->sch_token; 335 fc |= QDIO_SIGA_QEBSM_FLAG; 336 } 337again: 338 cc = do_siga_output(schid, q->mask, busy_bit, fc); 339 340 /* hipersocket busy condition */ 341 if (*busy_bit) { 342 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); 343 344 if (!start_time) { 345 start_time = get_clock(); 346 goto again; 347 } 348 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) 349 goto again; 350 } 351 return cc; 352} 353 354static inline int qdio_siga_input(struct qdio_q *q) 355{ 356 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 357 unsigned int fc = QDIO_SIGA_READ; 358 int cc; 359 360 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); 361 qperf_inc(q, siga_read); 362 363 if (is_qebsm(q)) { 364 schid = q->irq_ptr->sch_token; 365 fc |= QDIO_SIGA_QEBSM_FLAG; 366 } 367 368 cc = do_siga_input(schid, q->mask, fc); 369 if (cc) 370 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); 371 return cc; 372} 373 374static inline void qdio_sync_after_thinint(struct qdio_q *q) 375{ 376 if (pci_out_supported(q)) { 377 if (need_siga_sync_thinint(q)) 378 qdio_siga_sync_all(q); 379 else if (need_siga_sync_out_thinint(q)) 380 qdio_siga_sync_out(q); 381 } else 382 qdio_siga_sync_q(q); 383} 384 385int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, 386 unsigned char *state) 387{ 388 qdio_siga_sync_q(q); 389 return get_buf_states(q, bufnr, state, 1, 0); 390} 391 392static inline void qdio_stop_polling(struct qdio_q *q) 393{ 394 if (!q->u.in.polling) 395 return; 396 397 q->u.in.polling = 0; 398 qperf_inc(q, stop_polling); 399 400 /* show the card that we are not polling anymore */ 401 if (is_qebsm(q)) { 402 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, 403 q->u.in.ack_count); 404 q->u.in.ack_count = 0; 405 } else 406 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); 407} 408 409static inline void account_sbals(struct qdio_q *q, int count) 410{ 411 int pos = 0; 412 413 q->q_stats.nr_sbal_total += count; 414 if (count == QDIO_MAX_BUFFERS_MASK) { 415 q->q_stats.nr_sbals[7]++; 416 return; 417 } 418 while (count >>= 1) 419 pos++; 420 q->q_stats.nr_sbals[pos]++; 421} 422 423static void announce_buffer_error(struct qdio_q *q, int count) 424{ 425 q->qdio_error |= QDIO_ERROR_SLSB_STATE; 426 427 /* special handling for no target buffer empty */ 428 if ((!q->is_input_q && 429 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { 430 qperf_inc(q, target_full); 431 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", 432 q->first_to_check); 433 return; 434 } 435 436 DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); 437 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); 438 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count); 439 DBF_ERROR("F14:%2x F15:%2x", 440 q->sbal[q->first_to_check]->element[14].flags & 0xff, 441 q->sbal[q->first_to_check]->element[15].flags & 0xff); 442} 443 444static inline void inbound_primed(struct qdio_q *q, int count) 445{ 446 int new; 447 448 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count); 449 450 /* for QEBSM the ACK was already set by EQBS */ 451 if (is_qebsm(q)) { 452 if (!q->u.in.polling) { 453 q->u.in.polling = 1; 454 q->u.in.ack_count = count; 455 q->u.in.ack_start = q->first_to_check; 456 return; 457 } 458 459 /* delete the previous ACK's */ 460 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, 461 q->u.in.ack_count); 462 q->u.in.ack_count = count; 463 q->u.in.ack_start = q->first_to_check; 464 return; 465 } 466 467 /* 468 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling 469 * or by the next inbound run. 470 */ 471 new = add_buf(q->first_to_check, count - 1); 472 if (q->u.in.polling) { 473 /* reset the previous ACK but first set the new one */ 474 set_buf_state(q, new, SLSB_P_INPUT_ACK); 475 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); 476 } else { 477 q->u.in.polling = 1; 478 set_buf_state(q, new, SLSB_P_INPUT_ACK); 479 } 480 481 q->u.in.ack_start = new; 482 count--; 483 if (!count) 484 return; 485 /* need to change ALL buffers to get more interrupts */ 486 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count); 487} 488 489static int get_inbound_buffer_frontier(struct qdio_q *q) 490{ 491 int count, stop; 492 unsigned char state; 493 494 /* 495 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 496 * would return 0. 497 */ 498 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 499 stop = add_buf(q->first_to_check, count); 500 501 if (q->first_to_check == stop) 502 goto out; 503 504 /* 505 * No siga sync here, as a PCI or we after a thin interrupt 506 * already sync'ed the queues. 507 */ 508 count = get_buf_states(q, q->first_to_check, &state, count, 1); 509 if (!count) 510 goto out; 511 512 switch (state) { 513 case SLSB_P_INPUT_PRIMED: 514 inbound_primed(q, count); 515 q->first_to_check = add_buf(q->first_to_check, count); 516 if (atomic_sub(count, &q->nr_buf_used) == 0) 517 qperf_inc(q, inbound_queue_full); 518 if (q->irq_ptr->perf_stat_enabled) 519 account_sbals(q, count); 520 break; 521 case SLSB_P_INPUT_ERROR: 522 announce_buffer_error(q, count); 523 /* process the buffer, the upper layer will take care of it */ 524 q->first_to_check = add_buf(q->first_to_check, count); 525 atomic_sub(count, &q->nr_buf_used); 526 if (q->irq_ptr->perf_stat_enabled) 527 account_sbals_error(q, count); 528 break; 529 case SLSB_CU_INPUT_EMPTY: 530 case SLSB_P_INPUT_NOT_INIT: 531 case SLSB_P_INPUT_ACK: 532 if (q->irq_ptr->perf_stat_enabled) 533 q->q_stats.nr_sbal_nop++; 534 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); 535 break; 536 default: 537 BUG(); 538 } 539out: 540 return q->first_to_check; 541} 542 543static int qdio_inbound_q_moved(struct qdio_q *q) 544{ 545 int bufnr; 546 547 bufnr = get_inbound_buffer_frontier(q); 548 549 if ((bufnr != q->last_move) || q->qdio_error) { 550 q->last_move = bufnr; 551 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) 552 q->u.in.timestamp = get_clock(); 553 return 1; 554 } else 555 return 0; 556} 557 558static inline int qdio_inbound_q_done(struct qdio_q *q) 559{ 560 unsigned char state = 0; 561 562 if (!atomic_read(&q->nr_buf_used)) 563 return 1; 564 565 qdio_siga_sync_q(q); 566 get_buf_state(q, q->first_to_check, &state, 0); 567 568 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) 569 /* more work coming */ 570 return 0; 571 572 if (is_thinint_irq(q->irq_ptr)) 573 return 1; 574 575 /* don't poll under z/VM */ 576 if (MACHINE_IS_VM) 577 return 1; 578 579 /* 580 * At this point we know, that inbound first_to_check 581 * has (probably) not moved (see qdio_inbound_processing). 582 */ 583 if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 584 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", 585 q->first_to_check); 586 return 1; 587 } else 588 return 0; 589} 590 591static void qdio_kick_handler(struct qdio_q *q) 592{ 593 int start = q->first_to_kick; 594 int end = q->first_to_check; 595 int count; 596 597 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) 598 return; 599 600 count = sub_buf(end, start); 601 602 if (q->is_input_q) { 603 qperf_inc(q, inbound_handler); 604 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); 605 } else { 606 qperf_inc(q, outbound_handler); 607 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", 608 start, count); 609 } 610 611 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, 612 q->irq_ptr->int_parm); 613 614 /* for the next time */ 615 q->first_to_kick = end; 616 q->qdio_error = 0; 617} 618 619static void __qdio_inbound_processing(struct qdio_q *q) 620{ 621 qperf_inc(q, tasklet_inbound); 622 623 if (!qdio_inbound_q_moved(q)) 624 return; 625 626 qdio_kick_handler(q); 627 628 if (!qdio_inbound_q_done(q)) { 629 /* means poll time is not yet over */ 630 qperf_inc(q, tasklet_inbound_resched); 631 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { 632 tasklet_schedule(&q->tasklet); 633 return; 634 } 635 } 636 637 qdio_stop_polling(q); 638 /* 639 * We need to check again to not lose initiative after 640 * resetting the ACK state. 641 */ 642 if (!qdio_inbound_q_done(q)) { 643 qperf_inc(q, tasklet_inbound_resched2); 644 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) 645 tasklet_schedule(&q->tasklet); 646 } 647} 648 649void qdio_inbound_processing(unsigned long data) 650{ 651 struct qdio_q *q = (struct qdio_q *)data; 652 __qdio_inbound_processing(q); 653} 654 655static int get_outbound_buffer_frontier(struct qdio_q *q) 656{ 657 int count, stop; 658 unsigned char state; 659 660 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) || 661 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q))) 662 qdio_siga_sync_q(q); 663 664 /* 665 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 666 * would return 0. 667 */ 668 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 669 stop = add_buf(q->first_to_check, count); 670 671 if (q->first_to_check == stop) 672 return q->first_to_check; 673 674 count = get_buf_states(q, q->first_to_check, &state, count, 0); 675 if (!count) 676 return q->first_to_check; 677 678 switch (state) { 679 case SLSB_P_OUTPUT_EMPTY: 680 /* the adapter got it */ 681 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count); 682 683 atomic_sub(count, &q->nr_buf_used); 684 q->first_to_check = add_buf(q->first_to_check, count); 685 if (q->irq_ptr->perf_stat_enabled) 686 account_sbals(q, count); 687 break; 688 case SLSB_P_OUTPUT_ERROR: 689 announce_buffer_error(q, count); 690 /* process the buffer, the upper layer will take care of it */ 691 q->first_to_check = add_buf(q->first_to_check, count); 692 atomic_sub(count, &q->nr_buf_used); 693 if (q->irq_ptr->perf_stat_enabled) 694 account_sbals_error(q, count); 695 break; 696 case SLSB_CU_OUTPUT_PRIMED: 697 /* the adapter has not fetched the output yet */ 698 if (q->irq_ptr->perf_stat_enabled) 699 q->q_stats.nr_sbal_nop++; 700 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); 701 break; 702 case SLSB_P_OUTPUT_NOT_INIT: 703 case SLSB_P_OUTPUT_HALTED: 704 break; 705 default: 706 BUG(); 707 } 708 return q->first_to_check; 709} 710 711/* all buffers processed? */ 712static inline int qdio_outbound_q_done(struct qdio_q *q) 713{ 714 return atomic_read(&q->nr_buf_used) == 0; 715} 716 717static inline int qdio_outbound_q_moved(struct qdio_q *q) 718{ 719 int bufnr; 720 721 bufnr = get_outbound_buffer_frontier(q); 722 723 if ((bufnr != q->last_move) || q->qdio_error) { 724 q->last_move = bufnr; 725 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); 726 return 1; 727 } else 728 return 0; 729} 730 731static int qdio_kick_outbound_q(struct qdio_q *q) 732{ 733 unsigned int busy_bit; 734 int cc; 735 736 if (!need_siga_out(q)) 737 return 0; 738 739 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); 740 qperf_inc(q, siga_write); 741 742 cc = qdio_siga_output(q, &busy_bit); 743 switch (cc) { 744 case 0: 745 break; 746 case 2: 747 if (busy_bit) { 748 DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr); 749 cc |= QDIO_ERROR_SIGA_BUSY; 750 } else 751 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr); 752 break; 753 case 1: 754 case 3: 755 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); 756 break; 757 } 758 return cc; 759} 760 761static void __qdio_outbound_processing(struct qdio_q *q) 762{ 763 qperf_inc(q, tasklet_outbound); 764 BUG_ON(atomic_read(&q->nr_buf_used) < 0); 765 766 if (qdio_outbound_q_moved(q)) 767 qdio_kick_handler(q); 768 769 if (queue_type(q) == QDIO_ZFCP_QFMT) 770 if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) 771 goto sched; 772 773 /* bail out for HiperSockets unicast queues */ 774 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) 775 return; 776 777 if ((queue_type(q) == QDIO_IQDIO_QFMT) && 778 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL) 779 goto sched; 780 781 if (q->u.out.pci_out_enabled) 782 return; 783 784 /* 785 * Now we know that queue type is either qeth without pci enabled 786 * or HiperSockets multicast. Make sure buffer switch from PRIMED to 787 * EMPTY is noticed and outbound_handler is called after some time. 788 */ 789 if (qdio_outbound_q_done(q)) 790 del_timer(&q->u.out.timer); 791 else 792 if (!timer_pending(&q->u.out.timer)) 793 mod_timer(&q->u.out.timer, jiffies + 10 * HZ); 794 return; 795 796sched: 797 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 798 return; 799 tasklet_schedule(&q->tasklet); 800} 801 802/* outbound tasklet */ 803void qdio_outbound_processing(unsigned long data) 804{ 805 struct qdio_q *q = (struct qdio_q *)data; 806 __qdio_outbound_processing(q); 807} 808 809void qdio_outbound_timer(unsigned long data) 810{ 811 struct qdio_q *q = (struct qdio_q *)data; 812 813 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 814 return; 815 tasklet_schedule(&q->tasklet); 816} 817 818static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) 819{ 820 struct qdio_q *out; 821 int i; 822 823 if (!pci_out_supported(q)) 824 return; 825 826 for_each_output_queue(q->irq_ptr, out, i) 827 if (!qdio_outbound_q_done(out)) 828 tasklet_schedule(&out->tasklet); 829} 830 831static void __tiqdio_inbound_processing(struct qdio_q *q) 832{ 833 qperf_inc(q, tasklet_inbound); 834 qdio_sync_after_thinint(q); 835 836 /* 837 * The interrupt could be caused by a PCI request. Check the 838 * PCI capable outbound queues. 839 */ 840 qdio_check_outbound_after_thinint(q); 841 842 if (!qdio_inbound_q_moved(q)) 843 return; 844 845 qdio_kick_handler(q); 846 847 if (!qdio_inbound_q_done(q)) { 848 qperf_inc(q, tasklet_inbound_resched); 849 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { 850 tasklet_schedule(&q->tasklet); 851 return; 852 } 853 } 854 855 qdio_stop_polling(q); 856 /* 857 * We need to check again to not lose initiative after 858 * resetting the ACK state. 859 */ 860 if (!qdio_inbound_q_done(q)) { 861 qperf_inc(q, tasklet_inbound_resched2); 862 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) 863 tasklet_schedule(&q->tasklet); 864 } 865} 866 867void tiqdio_inbound_processing(unsigned long data) 868{ 869 struct qdio_q *q = (struct qdio_q *)data; 870 __tiqdio_inbound_processing(q); 871} 872 873static inline void qdio_set_state(struct qdio_irq *irq_ptr, 874 enum qdio_irq_states state) 875{ 876 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state); 877 878 irq_ptr->state = state; 879 mb(); 880} 881 882static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb) 883{ 884 if (irb->esw.esw0.erw.cons) { 885 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no); 886 DBF_ERROR_HEX(irb, 64); 887 DBF_ERROR_HEX(irb->ecw, 64); 888 } 889} 890 891/* PCI interrupt handler */ 892static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) 893{ 894 int i; 895 struct qdio_q *q; 896 897 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 898 return; 899 900 for_each_input_queue(irq_ptr, q, i) 901 tasklet_schedule(&q->tasklet); 902 903 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) 904 return; 905 906 for_each_output_queue(irq_ptr, q, i) { 907 if (qdio_outbound_q_done(q)) 908 continue; 909 910 if (!siga_syncs_out_pci(q)) 911 qdio_siga_sync_q(q); 912 913 tasklet_schedule(&q->tasklet); 914 } 915} 916 917static void qdio_handle_activate_check(struct ccw_device *cdev, 918 unsigned long intparm, int cstat, int dstat) 919{ 920 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 921 struct qdio_q *q; 922 923 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); 924 DBF_ERROR("intp :%lx", intparm); 925 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); 926 927 if (irq_ptr->nr_input_qs) { 928 q = irq_ptr->input_qs[0]; 929 } else if (irq_ptr->nr_output_qs) { 930 q = irq_ptr->output_qs[0]; 931 } else { 932 dump_stack(); 933 goto no_handler; 934 } 935 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 936 0, -1, -1, irq_ptr->int_parm); 937no_handler: 938 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 939} 940 941static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, 942 int dstat) 943{ 944 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 945 946 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); 947 948 if (cstat) 949 goto error; 950 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END)) 951 goto error; 952 if (!(dstat & DEV_STAT_DEV_END)) 953 goto error; 954 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); 955 return; 956 957error: 958 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no); 959 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); 960 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 961} 962 963/* qdio interrupt handler */ 964void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, 965 struct irb *irb) 966{ 967 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 968 int cstat, dstat; 969 970 if (!intparm || !irq_ptr) { 971 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); 972 return; 973 } 974 975 if (irq_ptr->perf_stat_enabled) 976 irq_ptr->perf_stat.qdio_int++; 977 978 if (IS_ERR(irb)) { 979 switch (PTR_ERR(irb)) { 980 case -EIO: 981 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); 982 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 983 wake_up(&cdev->private->wait_q); 984 return; 985 default: 986 WARN_ON(1); 987 return; 988 } 989 } 990 qdio_irq_check_sense(irq_ptr, irb); 991 cstat = irb->scsw.cmd.cstat; 992 dstat = irb->scsw.cmd.dstat; 993 994 switch (irq_ptr->state) { 995 case QDIO_IRQ_STATE_INACTIVE: 996 qdio_establish_handle_irq(cdev, cstat, dstat); 997 break; 998 case QDIO_IRQ_STATE_CLEANUP: 999 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1000 break; 1001 case QDIO_IRQ_STATE_ESTABLISHED: 1002 case QDIO_IRQ_STATE_ACTIVE: 1003 if (cstat & SCHN_STAT_PCI) { 1004 qdio_int_handler_pci(irq_ptr); 1005 return; 1006 } 1007 if (cstat || dstat) 1008 qdio_handle_activate_check(cdev, intparm, cstat, 1009 dstat); 1010 break; 1011 case QDIO_IRQ_STATE_STOPPED: 1012 break; 1013 default: 1014 WARN_ON(1); 1015 } 1016 wake_up(&cdev->private->wait_q); 1017} 1018 1019/** 1020 * qdio_get_ssqd_desc - get qdio subchannel description 1021 * @cdev: ccw device to get description for 1022 * @data: where to store the ssqd 1023 * 1024 * Returns 0 or an error code. The results of the chsc are stored in the 1025 * specified structure. 1026 */ 1027int qdio_get_ssqd_desc(struct ccw_device *cdev, 1028 struct qdio_ssqd_desc *data) 1029{ 1030 1031 if (!cdev || !cdev->private) 1032 return -EINVAL; 1033 1034 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no); 1035 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data); 1036} 1037EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); 1038 1039static void qdio_shutdown_queues(struct ccw_device *cdev) 1040{ 1041 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1042 struct qdio_q *q; 1043 int i; 1044 1045 for_each_input_queue(irq_ptr, q, i) 1046 tasklet_kill(&q->tasklet); 1047 1048 for_each_output_queue(irq_ptr, q, i) { 1049 del_timer(&q->u.out.timer); 1050 tasklet_kill(&q->tasklet); 1051 } 1052} 1053 1054/** 1055 * qdio_shutdown - shut down a qdio subchannel 1056 * @cdev: associated ccw device 1057 * @how: use halt or clear to shutdown 1058 */ 1059int qdio_shutdown(struct ccw_device *cdev, int how) 1060{ 1061 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1062 int rc; 1063 unsigned long flags; 1064 1065 if (!irq_ptr) 1066 return -ENODEV; 1067 1068 BUG_ON(irqs_disabled()); 1069 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); 1070 1071 mutex_lock(&irq_ptr->setup_mutex); 1072 /* 1073 * Subchannel was already shot down. We cannot prevent being called 1074 * twice since cio may trigger a shutdown asynchronously. 1075 */ 1076 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { 1077 mutex_unlock(&irq_ptr->setup_mutex); 1078 return 0; 1079 } 1080 1081 /* 1082 * Indicate that the device is going down. Scheduling the queue 1083 * tasklets is forbidden from here on. 1084 */ 1085 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 1086 1087 tiqdio_remove_input_queues(irq_ptr); 1088 qdio_shutdown_queues(cdev); 1089 qdio_shutdown_debug_entries(irq_ptr, cdev); 1090 1091 /* cleanup subchannel */ 1092 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1093 1094 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) 1095 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); 1096 else 1097 /* default behaviour is halt */ 1098 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); 1099 if (rc) { 1100 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no); 1101 DBF_ERROR("rc:%4d", rc); 1102 goto no_cleanup; 1103 } 1104 1105 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); 1106 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1107 wait_event_interruptible_timeout(cdev->private->wait_q, 1108 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || 1109 irq_ptr->state == QDIO_IRQ_STATE_ERR, 1110 10 * HZ); 1111 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1112 1113no_cleanup: 1114 qdio_shutdown_thinint(irq_ptr); 1115 1116 /* restore interrupt handler */ 1117 if ((void *)cdev->handler == (void *)qdio_int_handler) 1118 cdev->handler = irq_ptr->orig_handler; 1119 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1120 1121 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1122 mutex_unlock(&irq_ptr->setup_mutex); 1123 if (rc) 1124 return rc; 1125 return 0; 1126} 1127EXPORT_SYMBOL_GPL(qdio_shutdown); 1128 1129/** 1130 * qdio_free - free data structures for a qdio subchannel 1131 * @cdev: associated ccw device 1132 */ 1133int qdio_free(struct ccw_device *cdev) 1134{ 1135 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1136 1137 if (!irq_ptr) 1138 return -ENODEV; 1139 1140 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); 1141 mutex_lock(&irq_ptr->setup_mutex); 1142 1143 if (irq_ptr->debug_area != NULL) { 1144 debug_unregister(irq_ptr->debug_area); 1145 irq_ptr->debug_area = NULL; 1146 } 1147 cdev->private->qdio_data = NULL; 1148 mutex_unlock(&irq_ptr->setup_mutex); 1149 1150 qdio_release_memory(irq_ptr); 1151 return 0; 1152} 1153EXPORT_SYMBOL_GPL(qdio_free); 1154 1155/** 1156 * qdio_allocate - allocate qdio queues and associated data 1157 * @init_data: initialization data 1158 */ 1159int qdio_allocate(struct qdio_initialize *init_data) 1160{ 1161 struct qdio_irq *irq_ptr; 1162 1163 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no); 1164 1165 if ((init_data->no_input_qs && !init_data->input_handler) || 1166 (init_data->no_output_qs && !init_data->output_handler)) 1167 return -EINVAL; 1168 1169 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) || 1170 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)) 1171 return -EINVAL; 1172 1173 if ((!init_data->input_sbal_addr_array) || 1174 (!init_data->output_sbal_addr_array)) 1175 return -EINVAL; 1176 1177 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ 1178 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 1179 if (!irq_ptr) 1180 goto out_err; 1181 1182 mutex_init(&irq_ptr->setup_mutex); 1183 qdio_allocate_dbf(init_data, irq_ptr); 1184 1185 /* 1186 * Allocate a page for the chsc calls in qdio_establish. 1187 * Must be pre-allocated since a zfcp recovery will call 1188 * qdio_establish. In case of low memory and swap on a zfcp disk 1189 * we may not be able to allocate memory otherwise. 1190 */ 1191 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL); 1192 if (!irq_ptr->chsc_page) 1193 goto out_rel; 1194 1195 /* qdr is used in ccw1.cda which is u32 */ 1196 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 1197 if (!irq_ptr->qdr) 1198 goto out_rel; 1199 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff); 1200 1201 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, 1202 init_data->no_output_qs)) 1203 goto out_rel; 1204 1205 init_data->cdev->private->qdio_data = irq_ptr; 1206 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1207 return 0; 1208out_rel: 1209 qdio_release_memory(irq_ptr); 1210out_err: 1211 return -ENOMEM; 1212} 1213EXPORT_SYMBOL_GPL(qdio_allocate); 1214 1215/** 1216 * qdio_establish - establish queues on a qdio subchannel 1217 * @init_data: initialization data 1218 */ 1219int qdio_establish(struct qdio_initialize *init_data) 1220{ 1221 struct qdio_irq *irq_ptr; 1222 struct ccw_device *cdev = init_data->cdev; 1223 unsigned long saveflags; 1224 int rc; 1225 1226 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no); 1227 1228 irq_ptr = cdev->private->qdio_data; 1229 if (!irq_ptr) 1230 return -ENODEV; 1231 1232 if (cdev->private->state != DEV_STATE_ONLINE) 1233 return -EINVAL; 1234 1235 mutex_lock(&irq_ptr->setup_mutex); 1236 qdio_setup_irq(init_data); 1237 1238 rc = qdio_establish_thinint(irq_ptr); 1239 if (rc) { 1240 mutex_unlock(&irq_ptr->setup_mutex); 1241 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1242 return rc; 1243 } 1244 1245 /* establish q */ 1246 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd; 1247 irq_ptr->ccw.flags = CCW_FLAG_SLI; 1248 irq_ptr->ccw.count = irq_ptr->equeue.count; 1249 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); 1250 1251 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); 1252 ccw_device_set_options_mask(cdev, 0); 1253 1254 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); 1255 if (rc) { 1256 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); 1257 DBF_ERROR("rc:%4x", rc); 1258 } 1259 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); 1260 1261 if (rc) { 1262 mutex_unlock(&irq_ptr->setup_mutex); 1263 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1264 return rc; 1265 } 1266 1267 wait_event_interruptible_timeout(cdev->private->wait_q, 1268 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || 1269 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ); 1270 1271 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) { 1272 mutex_unlock(&irq_ptr->setup_mutex); 1273 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1274 return -EIO; 1275 } 1276 1277 qdio_setup_ssqd_info(irq_ptr); 1278 DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc); 1279 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac); 1280 1281 /* qebsm is now setup if available, initialize buffer states */ 1282 qdio_init_buf_states(irq_ptr); 1283 1284 mutex_unlock(&irq_ptr->setup_mutex); 1285 qdio_print_subchannel_info(irq_ptr, cdev); 1286 qdio_setup_debug_entries(irq_ptr, cdev); 1287 return 0; 1288} 1289EXPORT_SYMBOL_GPL(qdio_establish); 1290 1291/** 1292 * qdio_activate - activate queues on a qdio subchannel 1293 * @cdev: associated cdev 1294 */ 1295int qdio_activate(struct ccw_device *cdev) 1296{ 1297 struct qdio_irq *irq_ptr; 1298 int rc; 1299 unsigned long saveflags; 1300 1301 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no); 1302 1303 irq_ptr = cdev->private->qdio_data; 1304 if (!irq_ptr) 1305 return -ENODEV; 1306 1307 if (cdev->private->state != DEV_STATE_ONLINE) 1308 return -EINVAL; 1309 1310 mutex_lock(&irq_ptr->setup_mutex); 1311 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { 1312 rc = -EBUSY; 1313 goto out; 1314 } 1315 1316 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd; 1317 irq_ptr->ccw.flags = CCW_FLAG_SLI; 1318 irq_ptr->ccw.count = irq_ptr->aqueue.count; 1319 irq_ptr->ccw.cda = 0; 1320 1321 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); 1322 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); 1323 1324 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, 1325 0, DOIO_DENY_PREFETCH); 1326 if (rc) { 1327 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); 1328 DBF_ERROR("rc:%4x", rc); 1329 } 1330 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); 1331 1332 if (rc) 1333 goto out; 1334 1335 if (is_thinint_irq(irq_ptr)) 1336 tiqdio_add_input_queues(irq_ptr); 1337 1338 /* wait for subchannel to become active */ 1339 msleep(5); 1340 1341 switch (irq_ptr->state) { 1342 case QDIO_IRQ_STATE_STOPPED: 1343 case QDIO_IRQ_STATE_ERR: 1344 rc = -EIO; 1345 break; 1346 default: 1347 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); 1348 rc = 0; 1349 } 1350out: 1351 mutex_unlock(&irq_ptr->setup_mutex); 1352 return rc; 1353} 1354EXPORT_SYMBOL_GPL(qdio_activate); 1355 1356static inline int buf_in_between(int bufnr, int start, int count) 1357{ 1358 int end = add_buf(start, count); 1359 1360 if (end > start) { 1361 if (bufnr >= start && bufnr < end) 1362 return 1; 1363 else 1364 return 0; 1365 } 1366 1367 /* wrap-around case */ 1368 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) || 1369 (bufnr < end)) 1370 return 1; 1371 else 1372 return 0; 1373} 1374 1375/** 1376 * handle_inbound - reset processed input buffers 1377 * @q: queue containing the buffers 1378 * @callflags: flags 1379 * @bufnr: first buffer to process 1380 * @count: how many buffers are emptied 1381 */ 1382static int handle_inbound(struct qdio_q *q, unsigned int callflags, 1383 int bufnr, int count) 1384{ 1385 int used, diff; 1386 1387 qperf_inc(q, inbound_call); 1388 1389 if (!q->u.in.polling) 1390 goto set; 1391 1392 /* protect against stop polling setting an ACK for an emptied slsb */ 1393 if (count == QDIO_MAX_BUFFERS_PER_Q) { 1394 /* overwriting everything, just delete polling status */ 1395 q->u.in.polling = 0; 1396 q->u.in.ack_count = 0; 1397 goto set; 1398 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) { 1399 if (is_qebsm(q)) { 1400 /* partial overwrite, just update ack_start */ 1401 diff = add_buf(bufnr, count); 1402 diff = sub_buf(diff, q->u.in.ack_start); 1403 q->u.in.ack_count -= diff; 1404 if (q->u.in.ack_count <= 0) { 1405 q->u.in.polling = 0; 1406 q->u.in.ack_count = 0; 1407 goto set; 1408 } 1409 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff); 1410 } 1411 else 1412 /* the only ACK will be deleted, so stop polling */ 1413 q->u.in.polling = 0; 1414 } 1415 1416set: 1417 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); 1418 1419 used = atomic_add_return(count, &q->nr_buf_used) - count; 1420 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q); 1421 1422 /* no need to signal as long as the adapter had free buffers */ 1423 if (used) 1424 return 0; 1425 1426 if (need_siga_in(q)) 1427 return qdio_siga_input(q); 1428 return 0; 1429} 1430 1431/** 1432 * handle_outbound - process filled outbound buffers 1433 * @q: queue containing the buffers 1434 * @callflags: flags 1435 * @bufnr: first buffer to process 1436 * @count: how many buffers are filled 1437 */ 1438static int handle_outbound(struct qdio_q *q, unsigned int callflags, 1439 int bufnr, int count) 1440{ 1441 unsigned char state; 1442 int used, rc = 0; 1443 1444 qperf_inc(q, outbound_call); 1445 1446 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); 1447 used = atomic_add_return(count, &q->nr_buf_used); 1448 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); 1449 1450 if (callflags & QDIO_FLAG_PCI_OUT) { 1451 q->u.out.pci_out_enabled = 1; 1452 qperf_inc(q, pci_request_int); 1453 } 1454 else 1455 q->u.out.pci_out_enabled = 0; 1456 1457 if (queue_type(q) == QDIO_IQDIO_QFMT) { 1458 if (multicast_outbound(q)) 1459 rc = qdio_kick_outbound_q(q); 1460 else 1461 if ((q->irq_ptr->ssqd_desc.mmwc > 1) && 1462 (count > 1) && 1463 (count <= q->irq_ptr->ssqd_desc.mmwc)) { 1464 /* exploit enhanced SIGA */ 1465 q->u.out.use_enh_siga = 1; 1466 rc = qdio_kick_outbound_q(q); 1467 } else { 1468 /* 1469 * One siga-w per buffer required for unicast 1470 * HiperSockets. 1471 */ 1472 q->u.out.use_enh_siga = 0; 1473 while (count--) { 1474 rc = qdio_kick_outbound_q(q); 1475 if (rc) 1476 goto out; 1477 } 1478 } 1479 goto out; 1480 } 1481 1482 if (need_siga_sync(q)) { 1483 qdio_siga_sync_q(q); 1484 goto out; 1485 } 1486 1487 /* try to fast requeue buffers */ 1488 get_buf_state(q, prev_buf(bufnr), &state, 0); 1489 if (state != SLSB_CU_OUTPUT_PRIMED) 1490 rc = qdio_kick_outbound_q(q); 1491 else 1492 qperf_inc(q, fast_requeue); 1493 1494out: 1495 tasklet_schedule(&q->tasklet); 1496 return rc; 1497} 1498 1499/** 1500 * do_QDIO - process input or output buffers 1501 * @cdev: associated ccw_device for the qdio subchannel 1502 * @callflags: input or output and special flags from the program 1503 * @q_nr: queue number 1504 * @bufnr: buffer number 1505 * @count: how many buffers to process 1506 */ 1507int do_QDIO(struct ccw_device *cdev, unsigned int callflags, 1508 int q_nr, unsigned int bufnr, unsigned int count) 1509{ 1510 struct qdio_irq *irq_ptr; 1511 1512 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) 1513 return -EINVAL; 1514 1515 irq_ptr = cdev->private->qdio_data; 1516 if (!irq_ptr) 1517 return -ENODEV; 1518 1519 DBF_DEV_EVENT(DBF_INFO, irq_ptr, 1520 "do%02x b:%02x c:%02x", callflags, bufnr, count); 1521 1522 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) 1523 return -EBUSY; 1524 1525 if (callflags & QDIO_FLAG_SYNC_INPUT) 1526 return handle_inbound(irq_ptr->input_qs[q_nr], 1527 callflags, bufnr, count); 1528 else if (callflags & QDIO_FLAG_SYNC_OUTPUT) 1529 return handle_outbound(irq_ptr->output_qs[q_nr], 1530 callflags, bufnr, count); 1531 return -EINVAL; 1532} 1533EXPORT_SYMBOL_GPL(do_QDIO); 1534 1535static int __init init_QDIO(void) 1536{ 1537 int rc; 1538 1539 rc = qdio_setup_init(); 1540 if (rc) 1541 return rc; 1542 rc = tiqdio_allocate_memory(); 1543 if (rc) 1544 goto out_cache; 1545 rc = qdio_debug_init(); 1546 if (rc) 1547 goto out_ti; 1548 rc = tiqdio_register_thinints(); 1549 if (rc) 1550 goto out_debug; 1551 return 0; 1552 1553out_debug: 1554 qdio_debug_exit(); 1555out_ti: 1556 tiqdio_free_memory(); 1557out_cache: 1558 qdio_setup_exit(); 1559 return rc; 1560} 1561 1562static void __exit exit_QDIO(void) 1563{ 1564 tiqdio_unregister_thinints(); 1565 tiqdio_free_memory(); 1566 qdio_debug_exit(); 1567 qdio_setup_exit(); 1568} 1569 1570module_init(init_QDIO); 1571module_exit(exit_QDIO); 1572