1/* 2 * IBM eServer eHCA Infiniband device driver for Linux on POWER 3 * 4 * QP functions 5 * 6 * Authors: Joachim Fenkes <fenkes@de.ibm.com> 7 * Stefan Roscher <stefan.roscher@de.ibm.com> 8 * Waleri Fomin <fomin@de.ibm.com> 9 * Hoang-Nam Nguyen <hnguyen@de.ibm.com> 10 * Reinhard Ernst <rernst@de.ibm.com> 11 * Heiko J Schick <schickhj@de.ibm.com> 12 * 13 * Copyright (c) 2005 IBM Corporation 14 * 15 * All rights reserved. 16 * 17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB 18 * BSD. 19 * 20 * OpenIB BSD License 21 * 22 * Redistribution and use in source and binary forms, with or without 23 * modification, are permitted provided that the following conditions are met: 24 * 25 * Redistributions of source code must retain the above copyright notice, this 26 * list of conditions and the following disclaimer. 27 * 28 * Redistributions in binary form must reproduce the above copyright notice, 29 * this list of conditions and the following disclaimer in the documentation 30 * and/or other materials 31 * provided with the distribution. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 43 * POSSIBILITY OF SUCH DAMAGE. 44 */ 45 46#include <linux/slab.h> 47 48#include "ehca_classes.h" 49#include "ehca_tools.h" 50#include "ehca_qes.h" 51#include "ehca_iverbs.h" 52#include "hcp_if.h" 53#include "hipz_fns.h" 54 55static struct kmem_cache *qp_cache; 56 57/* 58 * attributes not supported by query qp 59 */ 60#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS | \ 61 IB_QP_EN_SQD_ASYNC_NOTIFY) 62 63/* 64 * ehca (internal) qp state values 65 */ 66enum ehca_qp_state { 67 EHCA_QPS_RESET = 1, 68 EHCA_QPS_INIT = 2, 69 EHCA_QPS_RTR = 3, 70 EHCA_QPS_RTS = 5, 71 EHCA_QPS_SQD = 6, 72 EHCA_QPS_SQE = 8, 73 EHCA_QPS_ERR = 128 74}; 75 76/* 77 * qp state transitions as defined by IB Arch Rel 1.1 page 431 78 */ 79enum ib_qp_statetrans { 80 IB_QPST_ANY2RESET, 81 IB_QPST_ANY2ERR, 82 IB_QPST_RESET2INIT, 83 IB_QPST_INIT2RTR, 84 IB_QPST_INIT2INIT, 85 IB_QPST_RTR2RTS, 86 IB_QPST_RTS2SQD, 87 IB_QPST_RTS2RTS, 88 IB_QPST_SQD2RTS, 89 IB_QPST_SQE2RTS, 90 IB_QPST_SQD2SQD, 91 IB_QPST_MAX /* nr of transitions, this must be last!!! */ 92}; 93 94/* 95 * ib2ehca_qp_state maps IB to ehca qp_state 96 * returns ehca qp state corresponding to given ib qp state 97 */ 98static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state) 99{ 100 switch (ib_qp_state) { 101 case IB_QPS_RESET: 102 return EHCA_QPS_RESET; 103 case IB_QPS_INIT: 104 return EHCA_QPS_INIT; 105 case IB_QPS_RTR: 106 return EHCA_QPS_RTR; 107 case IB_QPS_RTS: 108 return EHCA_QPS_RTS; 109 case IB_QPS_SQD: 110 return EHCA_QPS_SQD; 111 case IB_QPS_SQE: 112 return EHCA_QPS_SQE; 113 case IB_QPS_ERR: 114 return EHCA_QPS_ERR; 115 default: 116 ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state); 117 return -EINVAL; 118 } 119} 120 121/* 122 * ehca2ib_qp_state maps ehca to IB qp_state 123 * returns ib qp state corresponding to given ehca qp state 124 */ 125static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state 126 ehca_qp_state) 127{ 128 switch (ehca_qp_state) { 129 case EHCA_QPS_RESET: 130 return IB_QPS_RESET; 131 case EHCA_QPS_INIT: 132 return IB_QPS_INIT; 133 case EHCA_QPS_RTR: 134 return IB_QPS_RTR; 135 case EHCA_QPS_RTS: 136 return IB_QPS_RTS; 137 case EHCA_QPS_SQD: 138 return IB_QPS_SQD; 139 case EHCA_QPS_SQE: 140 return IB_QPS_SQE; 141 case EHCA_QPS_ERR: 142 return IB_QPS_ERR; 143 default: 144 ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state); 145 return -EINVAL; 146 } 147} 148 149/* 150 * ehca_qp_type used as index for req_attr and opt_attr of 151 * struct ehca_modqp_statetrans 152 */ 153enum ehca_qp_type { 154 QPT_RC = 0, 155 QPT_UC = 1, 156 QPT_UD = 2, 157 QPT_SQP = 3, 158 QPT_MAX 159}; 160 161/* 162 * ib2ehcaqptype maps Ib to ehca qp_type 163 * returns ehca qp type corresponding to ib qp type 164 */ 165static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype) 166{ 167 switch (ibqptype) { 168 case IB_QPT_SMI: 169 case IB_QPT_GSI: 170 return QPT_SQP; 171 case IB_QPT_RC: 172 return QPT_RC; 173 case IB_QPT_UC: 174 return QPT_UC; 175 case IB_QPT_UD: 176 return QPT_UD; 177 default: 178 ehca_gen_err("Invalid ibqptype=%x", ibqptype); 179 return -EINVAL; 180 } 181} 182 183static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate, 184 int ib_tostate) 185{ 186 int index = -EINVAL; 187 switch (ib_tostate) { 188 case IB_QPS_RESET: 189 index = IB_QPST_ANY2RESET; 190 break; 191 case IB_QPS_INIT: 192 switch (ib_fromstate) { 193 case IB_QPS_RESET: 194 index = IB_QPST_RESET2INIT; 195 break; 196 case IB_QPS_INIT: 197 index = IB_QPST_INIT2INIT; 198 break; 199 } 200 break; 201 case IB_QPS_RTR: 202 if (ib_fromstate == IB_QPS_INIT) 203 index = IB_QPST_INIT2RTR; 204 break; 205 case IB_QPS_RTS: 206 switch (ib_fromstate) { 207 case IB_QPS_RTR: 208 index = IB_QPST_RTR2RTS; 209 break; 210 case IB_QPS_RTS: 211 index = IB_QPST_RTS2RTS; 212 break; 213 case IB_QPS_SQD: 214 index = IB_QPST_SQD2RTS; 215 break; 216 case IB_QPS_SQE: 217 index = IB_QPST_SQE2RTS; 218 break; 219 } 220 break; 221 case IB_QPS_SQD: 222 if (ib_fromstate == IB_QPS_RTS) 223 index = IB_QPST_RTS2SQD; 224 break; 225 case IB_QPS_SQE: 226 break; 227 case IB_QPS_ERR: 228 index = IB_QPST_ANY2ERR; 229 break; 230 default: 231 break; 232 } 233 return index; 234} 235 236/* 237 * ibqptype2servicetype returns hcp service type corresponding to given 238 * ib qp type used by create_qp() 239 */ 240static inline int ibqptype2servicetype(enum ib_qp_type ibqptype) 241{ 242 switch (ibqptype) { 243 case IB_QPT_SMI: 244 case IB_QPT_GSI: 245 return ST_UD; 246 case IB_QPT_RC: 247 return ST_RC; 248 case IB_QPT_UC: 249 return ST_UC; 250 case IB_QPT_UD: 251 return ST_UD; 252 case IB_QPT_RAW_IPV6: 253 return -EINVAL; 254 case IB_QPT_RAW_ETHERTYPE: 255 return -EINVAL; 256 default: 257 ehca_gen_err("Invalid ibqptype=%x", ibqptype); 258 return -EINVAL; 259 } 260} 261 262/* 263 * init userspace queue info from ipz_queue data 264 */ 265static inline void queue2resp(struct ipzu_queue_resp *resp, 266 struct ipz_queue *queue) 267{ 268 resp->qe_size = queue->qe_size; 269 resp->act_nr_of_sg = queue->act_nr_of_sg; 270 resp->queue_length = queue->queue_length; 271 resp->pagesize = queue->pagesize; 272 resp->toggle_state = queue->toggle_state; 273 resp->offset = queue->offset; 274} 275 276/* 277 * init_qp_queue initializes/constructs r/squeue and registers queue pages. 278 */ 279static inline int init_qp_queue(struct ehca_shca *shca, 280 struct ehca_pd *pd, 281 struct ehca_qp *my_qp, 282 struct ipz_queue *queue, 283 int q_type, 284 u64 expected_hret, 285 struct ehca_alloc_queue_parms *parms, 286 int wqe_size) 287{ 288 int ret, cnt, ipz_rc, nr_q_pages; 289 void *vpage; 290 u64 rpage, h_ret; 291 struct ib_device *ib_dev = &shca->ib_device; 292 struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle; 293 294 if (!parms->queue_size) 295 return 0; 296 297 if (parms->is_small) { 298 nr_q_pages = 1; 299 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages, 300 128 << parms->page_size, 301 wqe_size, parms->act_nr_sges, 1); 302 } else { 303 nr_q_pages = parms->queue_size; 304 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages, 305 EHCA_PAGESIZE, wqe_size, 306 parms->act_nr_sges, 0); 307 } 308 309 if (!ipz_rc) { 310 ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%i", 311 ipz_rc); 312 return -EBUSY; 313 } 314 315 /* register queue pages */ 316 for (cnt = 0; cnt < nr_q_pages; cnt++) { 317 vpage = ipz_qpageit_get_inc(queue); 318 if (!vpage) { 319 ehca_err(ib_dev, "ipz_qpageit_get_inc() " 320 "failed p_vpage= %p", vpage); 321 ret = -EINVAL; 322 goto init_qp_queue1; 323 } 324 rpage = virt_to_abs(vpage); 325 326 h_ret = hipz_h_register_rpage_qp(ipz_hca_handle, 327 my_qp->ipz_qp_handle, 328 NULL, 0, q_type, 329 rpage, parms->is_small ? 0 : 1, 330 my_qp->galpas.kernel); 331 if (cnt == (nr_q_pages - 1)) { /* last page! */ 332 if (h_ret != expected_hret) { 333 ehca_err(ib_dev, "hipz_qp_register_rpage() " 334 "h_ret=%lli", h_ret); 335 ret = ehca2ib_return_code(h_ret); 336 goto init_qp_queue1; 337 } 338 vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue); 339 if (vpage) { 340 ehca_err(ib_dev, "ipz_qpageit_get_inc() " 341 "should not succeed vpage=%p", vpage); 342 ret = -EINVAL; 343 goto init_qp_queue1; 344 } 345 } else { 346 if (h_ret != H_PAGE_REGISTERED) { 347 ehca_err(ib_dev, "hipz_qp_register_rpage() " 348 "h_ret=%lli", h_ret); 349 ret = ehca2ib_return_code(h_ret); 350 goto init_qp_queue1; 351 } 352 } 353 } 354 355 ipz_qeit_reset(queue); 356 357 return 0; 358 359init_qp_queue1: 360 ipz_queue_dtor(pd, queue); 361 return ret; 362} 363 364static inline int ehca_calc_wqe_size(int act_nr_sge, int is_llqp) 365{ 366 if (is_llqp) 367 return 128 << act_nr_sge; 368 else 369 return offsetof(struct ehca_wqe, 370 u.nud.sg_list[act_nr_sge]); 371} 372 373static void ehca_determine_small_queue(struct ehca_alloc_queue_parms *queue, 374 int req_nr_sge, int is_llqp) 375{ 376 u32 wqe_size, q_size; 377 int act_nr_sge = req_nr_sge; 378 379 if (!is_llqp) 380 /* round up #SGEs so WQE size is a power of 2 */ 381 for (act_nr_sge = 4; act_nr_sge <= 252; 382 act_nr_sge = 4 + 2 * act_nr_sge) 383 if (act_nr_sge >= req_nr_sge) 384 break; 385 386 wqe_size = ehca_calc_wqe_size(act_nr_sge, is_llqp); 387 q_size = wqe_size * (queue->max_wr + 1); 388 389 if (q_size <= 512) 390 queue->page_size = 2; 391 else if (q_size <= 1024) 392 queue->page_size = 3; 393 else 394 queue->page_size = 0; 395 396 queue->is_small = (queue->page_size != 0); 397} 398 399/* needs to be called with cq->spinlock held */ 400void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq) 401{ 402 struct list_head *list, *node; 403 404 /* TODO: support low latency QPs */ 405 if (qp->ext_type == EQPT_LLQP) 406 return; 407 408 if (on_sq) { 409 list = &qp->send_cq->sqp_err_list; 410 node = &qp->sq_err_node; 411 } else { 412 list = &qp->recv_cq->rqp_err_list; 413 node = &qp->rq_err_node; 414 } 415 416 if (list_empty(node)) 417 list_add_tail(node, list); 418 419 return; 420} 421 422static void del_from_err_list(struct ehca_cq *cq, struct list_head *node) 423{ 424 unsigned long flags; 425 426 spin_lock_irqsave(&cq->spinlock, flags); 427 428 if (!list_empty(node)) 429 list_del_init(node); 430 431 spin_unlock_irqrestore(&cq->spinlock, flags); 432} 433 434static void reset_queue_map(struct ehca_queue_map *qmap) 435{ 436 int i; 437 438 qmap->tail = qmap->entries - 1; 439 qmap->left_to_poll = 0; 440 qmap->next_wqe_idx = 0; 441 for (i = 0; i < qmap->entries; i++) { 442 qmap->map[i].reported = 1; 443 qmap->map[i].cqe_req = 0; 444 } 445} 446 447/* 448 * Create an ib_qp struct that is either a QP or an SRQ, depending on 449 * the value of the is_srq parameter. If init_attr and srq_init_attr share 450 * fields, the field out of init_attr is used. 451 */ 452static struct ehca_qp *internal_create_qp( 453 struct ib_pd *pd, 454 struct ib_qp_init_attr *init_attr, 455 struct ib_srq_init_attr *srq_init_attr, 456 struct ib_udata *udata, int is_srq) 457{ 458 struct ehca_qp *my_qp, *my_srq = NULL; 459 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd); 460 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, 461 ib_device); 462 struct ib_ucontext *context = NULL; 463 u64 h_ret; 464 int is_llqp = 0, has_srq = 0, is_user = 0; 465 int qp_type, max_send_sge, max_recv_sge, ret; 466 467 /* h_call's out parameters */ 468 struct ehca_alloc_qp_parms parms; 469 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num; 470 unsigned long flags; 471 472 if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) { 473 ehca_err(pd->device, "Unable to create QP, max number of %i " 474 "QPs reached.", shca->max_num_qps); 475 ehca_err(pd->device, "To increase the maximum number of QPs " 476 "use the number_of_qps module parameter.\n"); 477 return ERR_PTR(-ENOSPC); 478 } 479 480 if (init_attr->create_flags) { 481 atomic_dec(&shca->num_qps); 482 return ERR_PTR(-EINVAL); 483 } 484 485 memset(&parms, 0, sizeof(parms)); 486 qp_type = init_attr->qp_type; 487 488 if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR && 489 init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) { 490 ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed", 491 init_attr->sq_sig_type); 492 atomic_dec(&shca->num_qps); 493 return ERR_PTR(-EINVAL); 494 } 495 496 /* save LLQP info */ 497 if (qp_type & 0x80) { 498 is_llqp = 1; 499 parms.ext_type = EQPT_LLQP; 500 parms.ll_comp_flags = qp_type & LLQP_COMP_MASK; 501 } 502 qp_type &= 0x1F; 503 init_attr->qp_type &= 0x1F; 504 505 /* handle SRQ base QPs */ 506 if (init_attr->srq) { 507 my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq); 508 509 if (qp_type == IB_QPT_UC) { 510 ehca_err(pd->device, "UC with SRQ not supported"); 511 atomic_dec(&shca->num_qps); 512 return ERR_PTR(-EINVAL); 513 } 514 515 has_srq = 1; 516 parms.ext_type = EQPT_SRQBASE; 517 parms.srq_qpn = my_srq->real_qp_num; 518 } 519 520 if (is_llqp && has_srq) { 521 ehca_err(pd->device, "LLQPs can't have an SRQ"); 522 atomic_dec(&shca->num_qps); 523 return ERR_PTR(-EINVAL); 524 } 525 526 /* handle SRQs */ 527 if (is_srq) { 528 parms.ext_type = EQPT_SRQ; 529 parms.srq_limit = srq_init_attr->attr.srq_limit; 530 if (init_attr->cap.max_recv_sge > 3) { 531 ehca_err(pd->device, "no more than three SGEs " 532 "supported for SRQ pd=%p max_sge=%x", 533 pd, init_attr->cap.max_recv_sge); 534 atomic_dec(&shca->num_qps); 535 return ERR_PTR(-EINVAL); 536 } 537 } 538 539 /* check QP type */ 540 if (qp_type != IB_QPT_UD && 541 qp_type != IB_QPT_UC && 542 qp_type != IB_QPT_RC && 543 qp_type != IB_QPT_SMI && 544 qp_type != IB_QPT_GSI) { 545 ehca_err(pd->device, "wrong QP Type=%x", qp_type); 546 atomic_dec(&shca->num_qps); 547 return ERR_PTR(-EINVAL); 548 } 549 550 if (is_llqp) { 551 switch (qp_type) { 552 case IB_QPT_RC: 553 if ((init_attr->cap.max_send_wr > 255) || 554 (init_attr->cap.max_recv_wr > 255)) { 555 ehca_err(pd->device, 556 "Invalid Number of max_sq_wr=%x " 557 "or max_rq_wr=%x for RC LLQP", 558 init_attr->cap.max_send_wr, 559 init_attr->cap.max_recv_wr); 560 atomic_dec(&shca->num_qps); 561 return ERR_PTR(-EINVAL); 562 } 563 break; 564 case IB_QPT_UD: 565 if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) { 566 ehca_err(pd->device, "UD LLQP not supported " 567 "by this adapter"); 568 atomic_dec(&shca->num_qps); 569 return ERR_PTR(-ENOSYS); 570 } 571 if (!(init_attr->cap.max_send_sge <= 5 572 && init_attr->cap.max_send_sge >= 1 573 && init_attr->cap.max_recv_sge <= 5 574 && init_attr->cap.max_recv_sge >= 1)) { 575 ehca_err(pd->device, 576 "Invalid Number of max_send_sge=%x " 577 "or max_recv_sge=%x for UD LLQP", 578 init_attr->cap.max_send_sge, 579 init_attr->cap.max_recv_sge); 580 atomic_dec(&shca->num_qps); 581 return ERR_PTR(-EINVAL); 582 } else if (init_attr->cap.max_send_wr > 255) { 583 ehca_err(pd->device, 584 "Invalid Number of " 585 "max_send_wr=%x for UD QP_TYPE=%x", 586 init_attr->cap.max_send_wr, qp_type); 587 atomic_dec(&shca->num_qps); 588 return ERR_PTR(-EINVAL); 589 } 590 break; 591 default: 592 ehca_err(pd->device, "unsupported LL QP Type=%x", 593 qp_type); 594 atomic_dec(&shca->num_qps); 595 return ERR_PTR(-EINVAL); 596 } 597 } else { 598 int max_sge = (qp_type == IB_QPT_UD || qp_type == IB_QPT_SMI 599 || qp_type == IB_QPT_GSI) ? 250 : 252; 600 601 if (init_attr->cap.max_send_sge > max_sge 602 || init_attr->cap.max_recv_sge > max_sge) { 603 ehca_err(pd->device, "Invalid number of SGEs requested " 604 "send_sge=%x recv_sge=%x max_sge=%x", 605 init_attr->cap.max_send_sge, 606 init_attr->cap.max_recv_sge, max_sge); 607 atomic_dec(&shca->num_qps); 608 return ERR_PTR(-EINVAL); 609 } 610 } 611 612 my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL); 613 if (!my_qp) { 614 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); 615 atomic_dec(&shca->num_qps); 616 return ERR_PTR(-ENOMEM); 617 } 618 619 if (pd->uobject && udata) { 620 is_user = 1; 621 context = pd->uobject->context; 622 } 623 624 atomic_set(&my_qp->nr_events, 0); 625 init_waitqueue_head(&my_qp->wait_completion); 626 spin_lock_init(&my_qp->spinlock_s); 627 spin_lock_init(&my_qp->spinlock_r); 628 my_qp->qp_type = qp_type; 629 my_qp->ext_type = parms.ext_type; 630 my_qp->state = IB_QPS_RESET; 631 632 if (init_attr->recv_cq) 633 my_qp->recv_cq = 634 container_of(init_attr->recv_cq, struct ehca_cq, ib_cq); 635 if (init_attr->send_cq) 636 my_qp->send_cq = 637 container_of(init_attr->send_cq, struct ehca_cq, ib_cq); 638 639 do { 640 if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) { 641 ret = -ENOMEM; 642 ehca_err(pd->device, "Can't reserve idr resources."); 643 goto create_qp_exit0; 644 } 645 646 write_lock_irqsave(&ehca_qp_idr_lock, flags); 647 ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token); 648 write_unlock_irqrestore(&ehca_qp_idr_lock, flags); 649 } while (ret == -EAGAIN); 650 651 if (ret) { 652 ret = -ENOMEM; 653 ehca_err(pd->device, "Can't allocate new idr entry."); 654 goto create_qp_exit0; 655 } 656 657 if (my_qp->token > 0x1FFFFFF) { 658 ret = -EINVAL; 659 ehca_err(pd->device, "Invalid number of qp"); 660 goto create_qp_exit1; 661 } 662 663 if (has_srq) 664 parms.srq_token = my_qp->token; 665 666 parms.servicetype = ibqptype2servicetype(qp_type); 667 if (parms.servicetype < 0) { 668 ret = -EINVAL; 669 ehca_err(pd->device, "Invalid qp_type=%x", qp_type); 670 goto create_qp_exit1; 671 } 672 673 /* Always signal by WQE so we can hide circ. WQEs */ 674 parms.sigtype = HCALL_SIGT_BY_WQE; 675 676 /* UD_AV CIRCUMVENTION */ 677 max_send_sge = init_attr->cap.max_send_sge; 678 max_recv_sge = init_attr->cap.max_recv_sge; 679 if (parms.servicetype == ST_UD && !is_llqp) { 680 max_send_sge += 2; 681 max_recv_sge += 2; 682 } 683 684 parms.token = my_qp->token; 685 parms.eq_handle = shca->eq.ipz_eq_handle; 686 parms.pd = my_pd->fw_pd; 687 if (my_qp->send_cq) 688 parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle; 689 if (my_qp->recv_cq) 690 parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle; 691 692 parms.squeue.max_wr = init_attr->cap.max_send_wr; 693 parms.rqueue.max_wr = init_attr->cap.max_recv_wr; 694 parms.squeue.max_sge = max_send_sge; 695 parms.rqueue.max_sge = max_recv_sge; 696 697 /* RC QPs need one more SWQE for unsolicited ack circumvention */ 698 if (qp_type == IB_QPT_RC) 699 parms.squeue.max_wr++; 700 701 if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) { 702 if (HAS_SQ(my_qp)) 703 ehca_determine_small_queue( 704 &parms.squeue, max_send_sge, is_llqp); 705 if (HAS_RQ(my_qp)) 706 ehca_determine_small_queue( 707 &parms.rqueue, max_recv_sge, is_llqp); 708 parms.qp_storage = 709 (parms.squeue.is_small || parms.rqueue.is_small); 710 } 711 712 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user); 713 if (h_ret != H_SUCCESS) { 714 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli", 715 h_ret); 716 ret = ehca2ib_return_code(h_ret); 717 goto create_qp_exit1; 718 } 719 720 ib_qp_num = my_qp->real_qp_num = parms.real_qp_num; 721 my_qp->ipz_qp_handle = parms.qp_handle; 722 my_qp->galpas = parms.galpas; 723 724 swqe_size = ehca_calc_wqe_size(parms.squeue.act_nr_sges, is_llqp); 725 rwqe_size = ehca_calc_wqe_size(parms.rqueue.act_nr_sges, is_llqp); 726 727 switch (qp_type) { 728 case IB_QPT_RC: 729 if (is_llqp) { 730 parms.squeue.act_nr_sges = 1; 731 parms.rqueue.act_nr_sges = 1; 732 } 733 /* hide the extra WQE */ 734 parms.squeue.act_nr_wqes--; 735 break; 736 case IB_QPT_UD: 737 case IB_QPT_GSI: 738 case IB_QPT_SMI: 739 /* UD circumvention */ 740 if (is_llqp) { 741 parms.squeue.act_nr_sges = 1; 742 parms.rqueue.act_nr_sges = 1; 743 } else { 744 parms.squeue.act_nr_sges -= 2; 745 parms.rqueue.act_nr_sges -= 2; 746 } 747 748 if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) { 749 parms.squeue.act_nr_wqes = init_attr->cap.max_send_wr; 750 parms.rqueue.act_nr_wqes = init_attr->cap.max_recv_wr; 751 parms.squeue.act_nr_sges = init_attr->cap.max_send_sge; 752 parms.rqueue.act_nr_sges = init_attr->cap.max_recv_sge; 753 ib_qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1; 754 } 755 756 break; 757 758 default: 759 break; 760 } 761 762 /* initialize r/squeue and register queue pages */ 763 if (HAS_SQ(my_qp)) { 764 ret = init_qp_queue( 765 shca, my_pd, my_qp, &my_qp->ipz_squeue, 0, 766 HAS_RQ(my_qp) ? H_PAGE_REGISTERED : H_SUCCESS, 767 &parms.squeue, swqe_size); 768 if (ret) { 769 ehca_err(pd->device, "Couldn't initialize squeue " 770 "and pages ret=%i", ret); 771 goto create_qp_exit2; 772 } 773 774 if (!is_user) { 775 my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length / 776 my_qp->ipz_squeue.qe_size; 777 my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries * 778 sizeof(struct ehca_qmap_entry)); 779 if (!my_qp->sq_map.map) { 780 ehca_err(pd->device, "Couldn't allocate squeue " 781 "map ret=%i", ret); 782 goto create_qp_exit3; 783 } 784 INIT_LIST_HEAD(&my_qp->sq_err_node); 785 /* to avoid the generation of bogus flush CQEs */ 786 reset_queue_map(&my_qp->sq_map); 787 } 788 } 789 790 if (HAS_RQ(my_qp)) { 791 ret = init_qp_queue( 792 shca, my_pd, my_qp, &my_qp->ipz_rqueue, 1, 793 H_SUCCESS, &parms.rqueue, rwqe_size); 794 if (ret) { 795 ehca_err(pd->device, "Couldn't initialize rqueue " 796 "and pages ret=%i", ret); 797 goto create_qp_exit4; 798 } 799 if (!is_user) { 800 my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length / 801 my_qp->ipz_rqueue.qe_size; 802 my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries * 803 sizeof(struct ehca_qmap_entry)); 804 if (!my_qp->rq_map.map) { 805 ehca_err(pd->device, "Couldn't allocate squeue " 806 "map ret=%i", ret); 807 goto create_qp_exit5; 808 } 809 INIT_LIST_HEAD(&my_qp->rq_err_node); 810 /* to avoid the generation of bogus flush CQEs */ 811 reset_queue_map(&my_qp->rq_map); 812 } 813 } else if (init_attr->srq && !is_user) { 814 /* this is a base QP, use the queue map of the SRQ */ 815 my_qp->rq_map = my_srq->rq_map; 816 INIT_LIST_HEAD(&my_qp->rq_err_node); 817 818 my_qp->ipz_rqueue = my_srq->ipz_rqueue; 819 } 820 821 if (is_srq) { 822 my_qp->ib_srq.pd = &my_pd->ib_pd; 823 my_qp->ib_srq.device = my_pd->ib_pd.device; 824 825 my_qp->ib_srq.srq_context = init_attr->qp_context; 826 my_qp->ib_srq.event_handler = init_attr->event_handler; 827 } else { 828 my_qp->ib_qp.qp_num = ib_qp_num; 829 my_qp->ib_qp.pd = &my_pd->ib_pd; 830 my_qp->ib_qp.device = my_pd->ib_pd.device; 831 832 my_qp->ib_qp.recv_cq = init_attr->recv_cq; 833 my_qp->ib_qp.send_cq = init_attr->send_cq; 834 835 my_qp->ib_qp.qp_type = qp_type; 836 my_qp->ib_qp.srq = init_attr->srq; 837 838 my_qp->ib_qp.qp_context = init_attr->qp_context; 839 my_qp->ib_qp.event_handler = init_attr->event_handler; 840 } 841 842 init_attr->cap.max_inline_data = 0; /* not supported yet */ 843 init_attr->cap.max_recv_sge = parms.rqueue.act_nr_sges; 844 init_attr->cap.max_recv_wr = parms.rqueue.act_nr_wqes; 845 init_attr->cap.max_send_sge = parms.squeue.act_nr_sges; 846 init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes; 847 my_qp->init_attr = *init_attr; 848 849 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) { 850 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] = 851 &my_qp->ib_qp; 852 if (ehca_nr_ports < 0) { 853 /* alloc array to cache subsequent modify qp parms 854 * for autodetect mode 855 */ 856 my_qp->mod_qp_parm = 857 kzalloc(EHCA_MOD_QP_PARM_MAX * 858 sizeof(*my_qp->mod_qp_parm), 859 GFP_KERNEL); 860 if (!my_qp->mod_qp_parm) { 861 ehca_err(pd->device, 862 "Could not alloc mod_qp_parm"); 863 goto create_qp_exit5; 864 } 865 } 866 } 867 868 /* NOTE: define_apq0() not supported yet */ 869 if (qp_type == IB_QPT_GSI) { 870 h_ret = ehca_define_sqp(shca, my_qp, init_attr); 871 if (h_ret != H_SUCCESS) { 872 kfree(my_qp->mod_qp_parm); 873 my_qp->mod_qp_parm = NULL; 874 /* the QP pointer is no longer valid */ 875 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] = 876 NULL; 877 ret = ehca2ib_return_code(h_ret); 878 goto create_qp_exit6; 879 } 880 } 881 882 if (my_qp->send_cq) { 883 ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp); 884 if (ret) { 885 ehca_err(pd->device, 886 "Couldn't assign qp to send_cq ret=%i", ret); 887 goto create_qp_exit7; 888 } 889 } 890 891 /* copy queues, galpa data to user space */ 892 if (context && udata) { 893 struct ehca_create_qp_resp resp; 894 memset(&resp, 0, sizeof(resp)); 895 896 resp.qp_num = my_qp->real_qp_num; 897 resp.token = my_qp->token; 898 resp.qp_type = my_qp->qp_type; 899 resp.ext_type = my_qp->ext_type; 900 resp.qkey = my_qp->qkey; 901 resp.real_qp_num = my_qp->real_qp_num; 902 903 if (HAS_SQ(my_qp)) 904 queue2resp(&resp.ipz_squeue, &my_qp->ipz_squeue); 905 if (HAS_RQ(my_qp)) 906 queue2resp(&resp.ipz_rqueue, &my_qp->ipz_rqueue); 907 resp.fw_handle_ofs = (u32) 908 (my_qp->galpas.user.fw_handle & (PAGE_SIZE - 1)); 909 910 if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 911 ehca_err(pd->device, "Copy to udata failed"); 912 ret = -EINVAL; 913 goto create_qp_exit8; 914 } 915 } 916 917 return my_qp; 918 919create_qp_exit8: 920 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num); 921 922create_qp_exit7: 923 kfree(my_qp->mod_qp_parm); 924 925create_qp_exit6: 926 if (HAS_RQ(my_qp) && !is_user) 927 vfree(my_qp->rq_map.map); 928 929create_qp_exit5: 930 if (HAS_RQ(my_qp)) 931 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 932 933create_qp_exit4: 934 if (HAS_SQ(my_qp) && !is_user) 935 vfree(my_qp->sq_map.map); 936 937create_qp_exit3: 938 if (HAS_SQ(my_qp)) 939 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); 940 941create_qp_exit2: 942 hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); 943 944create_qp_exit1: 945 write_lock_irqsave(&ehca_qp_idr_lock, flags); 946 idr_remove(&ehca_qp_idr, my_qp->token); 947 write_unlock_irqrestore(&ehca_qp_idr_lock, flags); 948 949create_qp_exit0: 950 kmem_cache_free(qp_cache, my_qp); 951 atomic_dec(&shca->num_qps); 952 return ERR_PTR(ret); 953} 954 955struct ib_qp *ehca_create_qp(struct ib_pd *pd, 956 struct ib_qp_init_attr *qp_init_attr, 957 struct ib_udata *udata) 958{ 959 struct ehca_qp *ret; 960 961 ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0); 962 return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp; 963} 964 965static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, 966 struct ib_uobject *uobject); 967 968struct ib_srq *ehca_create_srq(struct ib_pd *pd, 969 struct ib_srq_init_attr *srq_init_attr, 970 struct ib_udata *udata) 971{ 972 struct ib_qp_init_attr qp_init_attr; 973 struct ehca_qp *my_qp; 974 struct ib_srq *ret; 975 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, 976 ib_device); 977 struct hcp_modify_qp_control_block *mqpcb; 978 u64 hret, update_mask; 979 980 /* For common attributes, internal_create_qp() takes its info 981 * out of qp_init_attr, so copy all common attrs there. 982 */ 983 memset(&qp_init_attr, 0, sizeof(qp_init_attr)); 984 qp_init_attr.event_handler = srq_init_attr->event_handler; 985 qp_init_attr.qp_context = srq_init_attr->srq_context; 986 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 987 qp_init_attr.qp_type = IB_QPT_RC; 988 qp_init_attr.cap.max_recv_wr = srq_init_attr->attr.max_wr; 989 qp_init_attr.cap.max_recv_sge = srq_init_attr->attr.max_sge; 990 991 my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1); 992 if (IS_ERR(my_qp)) 993 return (struct ib_srq *)my_qp; 994 995 /* copy back return values */ 996 srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr; 997 srq_init_attr->attr.max_sge = 3; 998 999 /* drive SRQ into RTR state */ 1000 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 1001 if (!mqpcb) { 1002 ehca_err(pd->device, "Could not get zeroed page for mqpcb " 1003 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num); 1004 ret = ERR_PTR(-ENOMEM); 1005 goto create_srq1; 1006 } 1007 1008 mqpcb->qp_state = EHCA_QPS_INIT; 1009 mqpcb->prim_phys_port = 1; 1010 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1); 1011 hret = hipz_h_modify_qp(shca->ipz_hca_handle, 1012 my_qp->ipz_qp_handle, 1013 &my_qp->pf, 1014 update_mask, 1015 mqpcb, my_qp->galpas.kernel); 1016 if (hret != H_SUCCESS) { 1017 ehca_err(pd->device, "Could not modify SRQ to INIT " 1018 "ehca_qp=%p qp_num=%x h_ret=%lli", 1019 my_qp, my_qp->real_qp_num, hret); 1020 goto create_srq2; 1021 } 1022 1023 mqpcb->qp_enable = 1; 1024 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1); 1025 hret = hipz_h_modify_qp(shca->ipz_hca_handle, 1026 my_qp->ipz_qp_handle, 1027 &my_qp->pf, 1028 update_mask, 1029 mqpcb, my_qp->galpas.kernel); 1030 if (hret != H_SUCCESS) { 1031 ehca_err(pd->device, "Could not enable SRQ " 1032 "ehca_qp=%p qp_num=%x h_ret=%lli", 1033 my_qp, my_qp->real_qp_num, hret); 1034 goto create_srq2; 1035 } 1036 1037 mqpcb->qp_state = EHCA_QPS_RTR; 1038 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1); 1039 hret = hipz_h_modify_qp(shca->ipz_hca_handle, 1040 my_qp->ipz_qp_handle, 1041 &my_qp->pf, 1042 update_mask, 1043 mqpcb, my_qp->galpas.kernel); 1044 if (hret != H_SUCCESS) { 1045 ehca_err(pd->device, "Could not modify SRQ to RTR " 1046 "ehca_qp=%p qp_num=%x h_ret=%lli", 1047 my_qp, my_qp->real_qp_num, hret); 1048 goto create_srq2; 1049 } 1050 1051 ehca_free_fw_ctrlblock(mqpcb); 1052 1053 return &my_qp->ib_srq; 1054 1055create_srq2: 1056 ret = ERR_PTR(ehca2ib_return_code(hret)); 1057 ehca_free_fw_ctrlblock(mqpcb); 1058 1059create_srq1: 1060 internal_destroy_qp(pd->device, my_qp, my_qp->ib_srq.uobject); 1061 1062 return ret; 1063} 1064 1065/* 1066 * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts 1067 * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe 1068 * returns total number of bad wqes in bad_wqe_cnt 1069 */ 1070static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, 1071 int *bad_wqe_cnt) 1072{ 1073 u64 h_ret; 1074 struct ipz_queue *squeue; 1075 void *bad_send_wqe_p, *bad_send_wqe_v; 1076 u64 q_ofs; 1077 struct ehca_wqe *wqe; 1078 int qp_num = my_qp->ib_qp.qp_num; 1079 1080 /* get send wqe pointer */ 1081 h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle, 1082 my_qp->ipz_qp_handle, &my_qp->pf, 1083 &bad_send_wqe_p, NULL, 2); 1084 if (h_ret != H_SUCCESS) { 1085 ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed" 1086 " ehca_qp=%p qp_num=%x h_ret=%lli", 1087 my_qp, qp_num, h_ret); 1088 return ehca2ib_return_code(h_ret); 1089 } 1090 bad_send_wqe_p = (void *)((u64)bad_send_wqe_p & (~(1L << 63))); 1091 ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p", 1092 qp_num, bad_send_wqe_p); 1093 /* convert wqe pointer to vadr */ 1094 bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p); 1095 if (ehca_debug_level >= 2) 1096 ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); 1097 squeue = &my_qp->ipz_squeue; 1098 if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) { 1099 ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x" 1100 " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p); 1101 return -EFAULT; 1102 } 1103 1104 /* loop sets wqe's purge bit */ 1105 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); 1106 *bad_wqe_cnt = 0; 1107 while (wqe->optype != 0xff && wqe->wqef != 0xff) { 1108 if (ehca_debug_level >= 2) 1109 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); 1110 wqe->nr_of_data_seg = 0; /* suppress data access */ 1111 wqe->wqef = WQEF_PURGE; /* WQE to be purged */ 1112 q_ofs = ipz_queue_advance_offset(squeue, q_ofs); 1113 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); 1114 *bad_wqe_cnt = (*bad_wqe_cnt)+1; 1115 } 1116 /* 1117 * bad wqe will be reprocessed and ignored when pol_cq() is called, 1118 * i.e. nr of wqes with flush error status is one less 1119 */ 1120 ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x", 1121 qp_num, (*bad_wqe_cnt)-1); 1122 wqe->wqef = 0; 1123 1124 return 0; 1125} 1126 1127static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue, 1128 struct ehca_queue_map *qmap) 1129{ 1130 void *wqe_v; 1131 u64 q_ofs; 1132 u32 wqe_idx; 1133 unsigned int tail_idx; 1134 1135 /* convert real to abs address */ 1136 wqe_p = wqe_p & (~(1UL << 63)); 1137 1138 wqe_v = abs_to_virt(wqe_p); 1139 1140 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) { 1141 ehca_gen_err("Invalid offset for calculating left cqes " 1142 "wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v); 1143 return -EFAULT; 1144 } 1145 1146 tail_idx = next_index(qmap->tail, qmap->entries); 1147 wqe_idx = q_ofs / ipz_queue->qe_size; 1148 1149 /* check all processed wqes, whether a cqe is requested or not */ 1150 while (tail_idx != wqe_idx) { 1151 if (qmap->map[tail_idx].cqe_req) 1152 qmap->left_to_poll++; 1153 tail_idx = next_index(tail_idx, qmap->entries); 1154 } 1155 /* save index in queue, where we have to start flushing */ 1156 qmap->next_wqe_idx = wqe_idx; 1157 return 0; 1158} 1159 1160static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca) 1161{ 1162 u64 h_ret; 1163 void *send_wqe_p, *recv_wqe_p; 1164 int ret; 1165 unsigned long flags; 1166 int qp_num = my_qp->ib_qp.qp_num; 1167 1168 /* this hcall is not supported on base QPs */ 1169 if (my_qp->ext_type != EQPT_SRQBASE) { 1170 /* get send and receive wqe pointer */ 1171 h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle, 1172 my_qp->ipz_qp_handle, &my_qp->pf, 1173 &send_wqe_p, &recv_wqe_p, 4); 1174 if (h_ret != H_SUCCESS) { 1175 ehca_err(&shca->ib_device, "disable_and_get_wqe() " 1176 "failed ehca_qp=%p qp_num=%x h_ret=%lli", 1177 my_qp, qp_num, h_ret); 1178 return ehca2ib_return_code(h_ret); 1179 } 1180 1181 /* 1182 * acquire lock to ensure that nobody is polling the cq which 1183 * could mean that the qmap->tail pointer is in an 1184 * inconsistent state. 1185 */ 1186 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags); 1187 ret = calc_left_cqes((u64)send_wqe_p, &my_qp->ipz_squeue, 1188 &my_qp->sq_map); 1189 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags); 1190 if (ret) 1191 return ret; 1192 1193 1194 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags); 1195 ret = calc_left_cqes((u64)recv_wqe_p, &my_qp->ipz_rqueue, 1196 &my_qp->rq_map); 1197 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags); 1198 if (ret) 1199 return ret; 1200 } else { 1201 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags); 1202 my_qp->sq_map.left_to_poll = 0; 1203 my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail, 1204 my_qp->sq_map.entries); 1205 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags); 1206 1207 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags); 1208 my_qp->rq_map.left_to_poll = 0; 1209 my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail, 1210 my_qp->rq_map.entries); 1211 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags); 1212 } 1213 1214 /* this assures flush cqes being generated only for pending wqes */ 1215 if ((my_qp->sq_map.left_to_poll == 0) && 1216 (my_qp->rq_map.left_to_poll == 0)) { 1217 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags); 1218 ehca_add_to_err_list(my_qp, 1); 1219 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags); 1220 1221 if (HAS_RQ(my_qp)) { 1222 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags); 1223 ehca_add_to_err_list(my_qp, 0); 1224 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, 1225 flags); 1226 } 1227 } 1228 1229 return 0; 1230} 1231 1232/* 1233 * internal_modify_qp with circumvention to handle aqp0 properly 1234 * smi_reset2init indicates if this is an internal reset-to-init-call for 1235 * smi. This flag must always be zero if called from ehca_modify_qp()! 1236 * This internal func was intorduced to avoid recursion of ehca_modify_qp()! 1237 */ 1238static int internal_modify_qp(struct ib_qp *ibqp, 1239 struct ib_qp_attr *attr, 1240 int attr_mask, int smi_reset2init) 1241{ 1242 enum ib_qp_state qp_cur_state, qp_new_state; 1243 int cnt, qp_attr_idx, ret = 0; 1244 enum ib_qp_statetrans statetrans; 1245 struct hcp_modify_qp_control_block *mqpcb; 1246 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); 1247 struct ehca_shca *shca = 1248 container_of(ibqp->pd->device, struct ehca_shca, ib_device); 1249 u64 update_mask; 1250 u64 h_ret; 1251 int bad_wqe_cnt = 0; 1252 int is_user = 0; 1253 int squeue_locked = 0; 1254 unsigned long flags = 0; 1255 1256 /* do query_qp to obtain current attr values */ 1257 mqpcb = ehca_alloc_fw_ctrlblock(GFP_ATOMIC); 1258 if (!mqpcb) { 1259 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " 1260 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); 1261 return -ENOMEM; 1262 } 1263 1264 h_ret = hipz_h_query_qp(shca->ipz_hca_handle, 1265 my_qp->ipz_qp_handle, 1266 &my_qp->pf, 1267 mqpcb, my_qp->galpas.kernel); 1268 if (h_ret != H_SUCCESS) { 1269 ehca_err(ibqp->device, "hipz_h_query_qp() failed " 1270 "ehca_qp=%p qp_num=%x h_ret=%lli", 1271 my_qp, ibqp->qp_num, h_ret); 1272 ret = ehca2ib_return_code(h_ret); 1273 goto modify_qp_exit1; 1274 } 1275 if (ibqp->uobject) 1276 is_user = 1; 1277 1278 qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state); 1279 1280 if (qp_cur_state == -EINVAL) { /* invalid qp state */ 1281 ret = -EINVAL; 1282 ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x " 1283 "ehca_qp=%p qp_num=%x", 1284 mqpcb->qp_state, my_qp, ibqp->qp_num); 1285 goto modify_qp_exit1; 1286 } 1287 /* 1288 * circumvention to set aqp0 initial state to init 1289 * as expected by IB spec 1290 */ 1291 if (smi_reset2init == 0 && 1292 ibqp->qp_type == IB_QPT_SMI && 1293 qp_cur_state == IB_QPS_RESET && 1294 (attr_mask & IB_QP_STATE) && 1295 attr->qp_state == IB_QPS_INIT) { /* RESET -> INIT */ 1296 struct ib_qp_attr smiqp_attr = { 1297 .qp_state = IB_QPS_INIT, 1298 .port_num = my_qp->init_attr.port_num, 1299 .pkey_index = 0, 1300 .qkey = 0 1301 }; 1302 int smiqp_attr_mask = IB_QP_STATE | IB_QP_PORT | 1303 IB_QP_PKEY_INDEX | IB_QP_QKEY; 1304 int smirc = internal_modify_qp( 1305 ibqp, &smiqp_attr, smiqp_attr_mask, 1); 1306 if (smirc) { 1307 ehca_err(ibqp->device, "SMI RESET -> INIT failed. " 1308 "ehca_modify_qp() rc=%i", smirc); 1309 ret = H_PARAMETER; 1310 goto modify_qp_exit1; 1311 } 1312 qp_cur_state = IB_QPS_INIT; 1313 ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded"); 1314 } 1315 /* is transmitted current state equal to "real" current state */ 1316 if ((attr_mask & IB_QP_CUR_STATE) && 1317 qp_cur_state != attr->cur_qp_state) { 1318 ret = -EINVAL; 1319 ehca_err(ibqp->device, 1320 "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>" 1321 " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x", 1322 attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num); 1323 goto modify_qp_exit1; 1324 } 1325 1326 ehca_dbg(ibqp->device, "ehca_qp=%p qp_num=%x current qp_state=%x " 1327 "new qp_state=%x attribute_mask=%x", 1328 my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask); 1329 1330 qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state; 1331 if (!smi_reset2init && 1332 !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type, 1333 attr_mask)) { 1334 ret = -EINVAL; 1335 ehca_err(ibqp->device, 1336 "Invalid qp transition new_state=%x cur_state=%x " 1337 "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state, 1338 qp_cur_state, my_qp, ibqp->qp_num, attr_mask); 1339 goto modify_qp_exit1; 1340 } 1341 1342 mqpcb->qp_state = ib2ehca_qp_state(qp_new_state); 1343 if (mqpcb->qp_state) 1344 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1); 1345 else { 1346 ret = -EINVAL; 1347 ehca_err(ibqp->device, "Invalid new qp state=%x " 1348 "ehca_qp=%p qp_num=%x", 1349 qp_new_state, my_qp, ibqp->qp_num); 1350 goto modify_qp_exit1; 1351 } 1352 1353 /* retrieve state transition struct to get req and opt attrs */ 1354 statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state); 1355 if (statetrans < 0) { 1356 ret = -EINVAL; 1357 ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x " 1358 "new_qp_state=%x State_xsition=%x ehca_qp=%p " 1359 "qp_num=%x", qp_cur_state, qp_new_state, 1360 statetrans, my_qp, ibqp->qp_num); 1361 goto modify_qp_exit1; 1362 } 1363 1364 qp_attr_idx = ib2ehcaqptype(ibqp->qp_type); 1365 1366 if (qp_attr_idx < 0) { 1367 ret = qp_attr_idx; 1368 ehca_err(ibqp->device, 1369 "Invalid QP type=%x ehca_qp=%p qp_num=%x", 1370 ibqp->qp_type, my_qp, ibqp->qp_num); 1371 goto modify_qp_exit1; 1372 } 1373 1374 ehca_dbg(ibqp->device, 1375 "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x", 1376 my_qp, ibqp->qp_num, statetrans); 1377 1378 /* eHCA2 rev2 and higher require the SEND_GRH_FLAG to be set 1379 * in non-LL UD QPs. 1380 */ 1381 if ((my_qp->qp_type == IB_QPT_UD) && 1382 (my_qp->ext_type != EQPT_LLQP) && 1383 (statetrans == IB_QPST_INIT2RTR) && 1384 (shca->hw_level >= 0x22)) { 1385 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1); 1386 mqpcb->send_grh_flag = 1; 1387 } 1388 1389 /* sqe -> rts: set purge bit of bad wqe before actual trans */ 1390 if ((my_qp->qp_type == IB_QPT_UD || 1391 my_qp->qp_type == IB_QPT_GSI || 1392 my_qp->qp_type == IB_QPT_SMI) && 1393 statetrans == IB_QPST_SQE2RTS) { 1394 /* mark next free wqe if kernel */ 1395 if (!ibqp->uobject) { 1396 struct ehca_wqe *wqe; 1397 /* lock send queue */ 1398 spin_lock_irqsave(&my_qp->spinlock_s, flags); 1399 squeue_locked = 1; 1400 /* mark next free wqe */ 1401 wqe = (struct ehca_wqe *) 1402 ipz_qeit_get(&my_qp->ipz_squeue); 1403 wqe->optype = wqe->wqef = 0xff; 1404 ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p", 1405 ibqp->qp_num, wqe); 1406 } 1407 ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt); 1408 if (ret) { 1409 ehca_err(ibqp->device, "prepare_sqe_rts() failed " 1410 "ehca_qp=%p qp_num=%x ret=%i", 1411 my_qp, ibqp->qp_num, ret); 1412 goto modify_qp_exit2; 1413 } 1414 } 1415 1416 /* 1417 * enable RDMA_Atomic_Control if reset->init und reliable con 1418 * this is necessary since gen2 does not provide that flag, 1419 * but pHyp requires it 1420 */ 1421 if (statetrans == IB_QPST_RESET2INIT && 1422 (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) { 1423 mqpcb->rdma_atomic_ctrl = 3; 1424 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL, 1); 1425 } 1426 /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */ 1427 if (statetrans == IB_QPST_INIT2RTR && 1428 (ibqp->qp_type == IB_QPT_UC) && 1429 !(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) { 1430 mqpcb->rdma_nr_atomic_resp_res = 1; /* default to 1 */ 1431 update_mask |= 1432 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1); 1433 } 1434 1435 if (attr_mask & IB_QP_PKEY_INDEX) { 1436 if (attr->pkey_index >= 16) { 1437 ret = -EINVAL; 1438 ehca_err(ibqp->device, "Invalid pkey_index=%x. " 1439 "ehca_qp=%p qp_num=%x max_pkey_index=f", 1440 attr->pkey_index, my_qp, ibqp->qp_num); 1441 goto modify_qp_exit2; 1442 } 1443 mqpcb->prim_p_key_idx = attr->pkey_index; 1444 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1); 1445 } 1446 if (attr_mask & IB_QP_PORT) { 1447 struct ehca_sport *sport; 1448 struct ehca_qp *aqp1; 1449 if (attr->port_num < 1 || attr->port_num > shca->num_ports) { 1450 ret = -EINVAL; 1451 ehca_err(ibqp->device, "Invalid port=%x. " 1452 "ehca_qp=%p qp_num=%x num_ports=%x", 1453 attr->port_num, my_qp, ibqp->qp_num, 1454 shca->num_ports); 1455 goto modify_qp_exit2; 1456 } 1457 sport = &shca->sport[attr->port_num - 1]; 1458 if (!sport->ibqp_sqp[IB_QPT_GSI]) { 1459 /* should not occur */ 1460 ret = -EFAULT; 1461 ehca_err(ibqp->device, "AQP1 was not created for " 1462 "port=%x", attr->port_num); 1463 goto modify_qp_exit2; 1464 } 1465 aqp1 = container_of(sport->ibqp_sqp[IB_QPT_GSI], 1466 struct ehca_qp, ib_qp); 1467 if (ibqp->qp_type != IB_QPT_GSI && 1468 ibqp->qp_type != IB_QPT_SMI && 1469 aqp1->mod_qp_parm) { 1470 /* 1471 * firmware will reject this modify_qp() because 1472 * port is not activated/initialized fully 1473 */ 1474 ret = -EFAULT; 1475 ehca_warn(ibqp->device, "Couldn't modify qp port=%x: " 1476 "either port is being activated (try again) " 1477 "or cabling issue", attr->port_num); 1478 goto modify_qp_exit2; 1479 } 1480 mqpcb->prim_phys_port = attr->port_num; 1481 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1); 1482 } 1483 if (attr_mask & IB_QP_QKEY) { 1484 mqpcb->qkey = attr->qkey; 1485 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1); 1486 } 1487 if (attr_mask & IB_QP_AV) { 1488 mqpcb->dlid = attr->ah_attr.dlid; 1489 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1); 1490 mqpcb->source_path_bits = attr->ah_attr.src_path_bits; 1491 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS, 1); 1492 mqpcb->service_level = attr->ah_attr.sl; 1493 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1); 1494 1495 if (ehca_calc_ipd(shca, mqpcb->prim_phys_port, 1496 attr->ah_attr.static_rate, 1497 &mqpcb->max_static_rate)) { 1498 ret = -EINVAL; 1499 goto modify_qp_exit2; 1500 } 1501 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1); 1502 1503 /* 1504 * Always supply the GRH flag, even if it's zero, to give the 1505 * hypervisor a clear "yes" or "no" instead of a "perhaps" 1506 */ 1507 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1); 1508 1509 /* 1510 * only if GRH is TRUE we might consider SOURCE_GID_IDX 1511 * and DEST_GID otherwise phype will return H_ATTR_PARM!!! 1512 */ 1513 if (attr->ah_attr.ah_flags == IB_AH_GRH) { 1514 mqpcb->send_grh_flag = 1; 1515 1516 mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index; 1517 update_mask |= 1518 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1); 1519 1520 for (cnt = 0; cnt < 16; cnt++) 1521 mqpcb->dest_gid.byte[cnt] = 1522 attr->ah_attr.grh.dgid.raw[cnt]; 1523 1524 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID, 1); 1525 mqpcb->flow_label = attr->ah_attr.grh.flow_label; 1526 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL, 1); 1527 mqpcb->hop_limit = attr->ah_attr.grh.hop_limit; 1528 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT, 1); 1529 mqpcb->traffic_class = attr->ah_attr.grh.traffic_class; 1530 update_mask |= 1531 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1); 1532 } 1533 } 1534 1535 if (attr_mask & IB_QP_PATH_MTU) { 1536 /* store ld(MTU) */ 1537 my_qp->mtu_shift = attr->path_mtu + 7; 1538 mqpcb->path_mtu = attr->path_mtu; 1539 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1); 1540 } 1541 if (attr_mask & IB_QP_TIMEOUT) { 1542 mqpcb->timeout = attr->timeout; 1543 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1); 1544 } 1545 if (attr_mask & IB_QP_RETRY_CNT) { 1546 mqpcb->retry_count = attr->retry_cnt; 1547 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1); 1548 } 1549 if (attr_mask & IB_QP_RNR_RETRY) { 1550 mqpcb->rnr_retry_count = attr->rnr_retry; 1551 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1); 1552 } 1553 if (attr_mask & IB_QP_RQ_PSN) { 1554 mqpcb->receive_psn = attr->rq_psn; 1555 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1); 1556 } 1557 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 1558 mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ? 1559 attr->max_dest_rd_atomic : 2; 1560 update_mask |= 1561 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1); 1562 } 1563 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 1564 mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ? 1565 attr->max_rd_atomic : 2; 1566 update_mask |= 1567 EHCA_BMASK_SET 1568 (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1); 1569 } 1570 if (attr_mask & IB_QP_ALT_PATH) { 1571 if (attr->alt_port_num < 1 1572 || attr->alt_port_num > shca->num_ports) { 1573 ret = -EINVAL; 1574 ehca_err(ibqp->device, "Invalid alt_port=%x. " 1575 "ehca_qp=%p qp_num=%x num_ports=%x", 1576 attr->alt_port_num, my_qp, ibqp->qp_num, 1577 shca->num_ports); 1578 goto modify_qp_exit2; 1579 } 1580 mqpcb->alt_phys_port = attr->alt_port_num; 1581 1582 if (attr->alt_pkey_index >= 16) { 1583 ret = -EINVAL; 1584 ehca_err(ibqp->device, "Invalid alt_pkey_index=%x. " 1585 "ehca_qp=%p qp_num=%x max_pkey_index=f", 1586 attr->pkey_index, my_qp, ibqp->qp_num); 1587 goto modify_qp_exit2; 1588 } 1589 mqpcb->alt_p_key_idx = attr->alt_pkey_index; 1590 1591 mqpcb->timeout_al = attr->alt_timeout; 1592 mqpcb->dlid_al = attr->alt_ah_attr.dlid; 1593 mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits; 1594 mqpcb->service_level_al = attr->alt_ah_attr.sl; 1595 1596 if (ehca_calc_ipd(shca, mqpcb->alt_phys_port, 1597 attr->alt_ah_attr.static_rate, 1598 &mqpcb->max_static_rate_al)) { 1599 ret = -EINVAL; 1600 goto modify_qp_exit2; 1601 } 1602 1603 /* OpenIB doesn't support alternate retry counts - copy them */ 1604 mqpcb->retry_count_al = mqpcb->retry_count; 1605 mqpcb->rnr_retry_count_al = mqpcb->rnr_retry_count; 1606 1607 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_ALT_PHYS_PORT, 1) 1608 | EHCA_BMASK_SET(MQPCB_MASK_ALT_P_KEY_IDX, 1) 1609 | EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT_AL, 1) 1610 | EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1) 1611 | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1) 1612 | EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1) 1613 | EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1) 1614 | EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT_AL, 1) 1615 | EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT_AL, 1); 1616 1617 /* 1618 * Always supply the GRH flag, even if it's zero, to give the 1619 * hypervisor a clear "yes" or "no" instead of a "perhaps" 1620 */ 1621 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1); 1622 1623 /* 1624 * only if GRH is TRUE we might consider SOURCE_GID_IDX 1625 * and DEST_GID otherwise phype will return H_ATTR_PARM!!! 1626 */ 1627 if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) { 1628 mqpcb->send_grh_flag_al = 1; 1629 1630 for (cnt = 0; cnt < 16; cnt++) 1631 mqpcb->dest_gid_al.byte[cnt] = 1632 attr->alt_ah_attr.grh.dgid.raw[cnt]; 1633 mqpcb->source_gid_idx_al = 1634 attr->alt_ah_attr.grh.sgid_index; 1635 mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label; 1636 mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit; 1637 mqpcb->traffic_class_al = 1638 attr->alt_ah_attr.grh.traffic_class; 1639 1640 update_mask |= 1641 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1) 1642 | EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1) 1643 | EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1) 1644 | EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1) | 1645 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1); 1646 } 1647 } 1648 1649 if (attr_mask & IB_QP_MIN_RNR_TIMER) { 1650 mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer; 1651 update_mask |= 1652 EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1); 1653 } 1654 1655 if (attr_mask & IB_QP_SQ_PSN) { 1656 mqpcb->send_psn = attr->sq_psn; 1657 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1); 1658 } 1659 1660 if (attr_mask & IB_QP_DEST_QPN) { 1661 mqpcb->dest_qp_nr = attr->dest_qp_num; 1662 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1); 1663 } 1664 1665 if (attr_mask & IB_QP_PATH_MIG_STATE) { 1666 if (attr->path_mig_state != IB_MIG_REARM 1667 && attr->path_mig_state != IB_MIG_MIGRATED) { 1668 ret = -EINVAL; 1669 ehca_err(ibqp->device, "Invalid mig_state=%x", 1670 attr->path_mig_state); 1671 goto modify_qp_exit2; 1672 } 1673 mqpcb->path_migration_state = attr->path_mig_state + 1; 1674 if (attr->path_mig_state == IB_MIG_REARM) 1675 my_qp->mig_armed = 1; 1676 update_mask |= 1677 EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1); 1678 } 1679 1680 if (attr_mask & IB_QP_CAP) { 1681 mqpcb->max_nr_outst_send_wr = attr->cap.max_send_wr+1; 1682 update_mask |= 1683 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR, 1); 1684 mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1; 1685 update_mask |= 1686 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1); 1687 /* no support for max_send/recv_sge yet */ 1688 } 1689 1690 if (ehca_debug_level >= 2) 1691 ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num); 1692 1693 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, 1694 my_qp->ipz_qp_handle, 1695 &my_qp->pf, 1696 update_mask, 1697 mqpcb, my_qp->galpas.kernel); 1698 1699 if (h_ret != H_SUCCESS) { 1700 ret = ehca2ib_return_code(h_ret); 1701 ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli " 1702 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num); 1703 goto modify_qp_exit2; 1704 } 1705 1706 if ((my_qp->qp_type == IB_QPT_UD || 1707 my_qp->qp_type == IB_QPT_GSI || 1708 my_qp->qp_type == IB_QPT_SMI) && 1709 statetrans == IB_QPST_SQE2RTS) { 1710 /* doorbell to reprocessing wqes */ 1711 iosync(); /* serialize GAL register access */ 1712 hipz_update_sqa(my_qp, bad_wqe_cnt-1); 1713 ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt); 1714 } 1715 1716 if (statetrans == IB_QPST_RESET2INIT || 1717 statetrans == IB_QPST_INIT2INIT) { 1718 mqpcb->qp_enable = 1; 1719 mqpcb->qp_state = EHCA_QPS_INIT; 1720 update_mask = 0; 1721 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1); 1722 1723 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, 1724 my_qp->ipz_qp_handle, 1725 &my_qp->pf, 1726 update_mask, 1727 mqpcb, 1728 my_qp->galpas.kernel); 1729 1730 if (h_ret != H_SUCCESS) { 1731 ret = ehca2ib_return_code(h_ret); 1732 ehca_err(ibqp->device, "ENABLE in context of " 1733 "RESET_2_INIT failed! Maybe you didn't get " 1734 "a LID h_ret=%lli ehca_qp=%p qp_num=%x", 1735 h_ret, my_qp, ibqp->qp_num); 1736 goto modify_qp_exit2; 1737 } 1738 } 1739 if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR) 1740 && !is_user) { 1741 ret = check_for_left_cqes(my_qp, shca); 1742 if (ret) 1743 goto modify_qp_exit2; 1744 } 1745 1746 if (statetrans == IB_QPST_ANY2RESET) { 1747 ipz_qeit_reset(&my_qp->ipz_rqueue); 1748 ipz_qeit_reset(&my_qp->ipz_squeue); 1749 1750 if (qp_cur_state == IB_QPS_ERR && !is_user) { 1751 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); 1752 1753 if (HAS_RQ(my_qp)) 1754 del_from_err_list(my_qp->recv_cq, 1755 &my_qp->rq_err_node); 1756 } 1757 if (!is_user) 1758 reset_queue_map(&my_qp->sq_map); 1759 1760 if (HAS_RQ(my_qp) && !is_user) 1761 reset_queue_map(&my_qp->rq_map); 1762 } 1763 1764 if (attr_mask & IB_QP_QKEY) 1765 my_qp->qkey = attr->qkey; 1766 1767modify_qp_exit2: 1768 if (squeue_locked) { /* this means: sqe -> rts */ 1769 spin_unlock_irqrestore(&my_qp->spinlock_s, flags); 1770 my_qp->sqerr_purgeflag = 1; 1771 } 1772 1773modify_qp_exit1: 1774 ehca_free_fw_ctrlblock(mqpcb); 1775 1776 return ret; 1777} 1778 1779int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 1780 struct ib_udata *udata) 1781{ 1782 int ret = 0; 1783 1784 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca, 1785 ib_device); 1786 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); 1787 1788 /* The if-block below caches qp_attr to be modified for GSI and SMI 1789 * qps during the initialization by ib_mad. When the respective port 1790 * is activated, ie we got an event PORT_ACTIVE, we'll replay the 1791 * cached modify calls sequence, see ehca_recover_sqs() below. 1792 * Why that is required: 1793 * 1) If one port is connected, older code requires that port one 1794 * to be connected and module option nr_ports=1 to be given by 1795 * user, which is very inconvenient for end user. 1796 * 2) Firmware accepts modify_qp() only if respective port has become 1797 * active. Older code had a wait loop of 30sec create_qp()/ 1798 * define_aqp1(), which is not appropriate in practice. This 1799 * code now removes that wait loop, see define_aqp1(), and always 1800 * reports all ports to ib_mad resp. users. Only activated ports 1801 * will then usable for the users. 1802 */ 1803 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) { 1804 int port = my_qp->init_attr.port_num; 1805 struct ehca_sport *sport = &shca->sport[port - 1]; 1806 unsigned long flags; 1807 spin_lock_irqsave(&sport->mod_sqp_lock, flags); 1808 /* cache qp_attr only during init */ 1809 if (my_qp->mod_qp_parm) { 1810 struct ehca_mod_qp_parm *p; 1811 if (my_qp->mod_qp_parm_idx >= EHCA_MOD_QP_PARM_MAX) { 1812 ehca_err(&shca->ib_device, 1813 "mod_qp_parm overflow state=%x port=%x" 1814 " type=%x", attr->qp_state, 1815 my_qp->init_attr.port_num, 1816 ibqp->qp_type); 1817 spin_unlock_irqrestore(&sport->mod_sqp_lock, 1818 flags); 1819 return -EINVAL; 1820 } 1821 p = &my_qp->mod_qp_parm[my_qp->mod_qp_parm_idx]; 1822 p->mask = attr_mask; 1823 p->attr = *attr; 1824 my_qp->mod_qp_parm_idx++; 1825 ehca_dbg(&shca->ib_device, 1826 "Saved qp_attr for state=%x port=%x type=%x", 1827 attr->qp_state, my_qp->init_attr.port_num, 1828 ibqp->qp_type); 1829 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); 1830 goto out; 1831 } 1832 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); 1833 } 1834 1835 ret = internal_modify_qp(ibqp, attr, attr_mask, 0); 1836 1837out: 1838 if ((ret == 0) && (attr_mask & IB_QP_STATE)) 1839 my_qp->state = attr->qp_state; 1840 1841 return ret; 1842} 1843 1844void ehca_recover_sqp(struct ib_qp *sqp) 1845{ 1846 struct ehca_qp *my_sqp = container_of(sqp, struct ehca_qp, ib_qp); 1847 int port = my_sqp->init_attr.port_num; 1848 struct ib_qp_attr attr; 1849 struct ehca_mod_qp_parm *qp_parm; 1850 int i, qp_parm_idx, ret; 1851 unsigned long flags, wr_cnt; 1852 1853 if (!my_sqp->mod_qp_parm) 1854 return; 1855 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x", port, sqp->qp_num); 1856 1857 qp_parm = my_sqp->mod_qp_parm; 1858 qp_parm_idx = my_sqp->mod_qp_parm_idx; 1859 for (i = 0; i < qp_parm_idx; i++) { 1860 attr = qp_parm[i].attr; 1861 ret = internal_modify_qp(sqp, &attr, qp_parm[i].mask, 0); 1862 if (ret) { 1863 ehca_err(sqp->device, "Could not modify SQP port=%x " 1864 "qp_num=%x ret=%x", port, sqp->qp_num, ret); 1865 goto free_qp_parm; 1866 } 1867 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x in state=%x", 1868 port, sqp->qp_num, attr.qp_state); 1869 } 1870 1871 /* re-trigger posted recv wrs */ 1872 wr_cnt = my_sqp->ipz_rqueue.current_q_offset / 1873 my_sqp->ipz_rqueue.qe_size; 1874 if (wr_cnt) { 1875 spin_lock_irqsave(&my_sqp->spinlock_r, flags); 1876 hipz_update_rqa(my_sqp, wr_cnt); 1877 spin_unlock_irqrestore(&my_sqp->spinlock_r, flags); 1878 ehca_dbg(sqp->device, "doorbell port=%x qp_num=%x wr_cnt=%lx", 1879 port, sqp->qp_num, wr_cnt); 1880 } 1881 1882free_qp_parm: 1883 kfree(qp_parm); 1884 /* this prevents subsequent calls to modify_qp() to cache qp_attr */ 1885 my_sqp->mod_qp_parm = NULL; 1886} 1887 1888int ehca_query_qp(struct ib_qp *qp, 1889 struct ib_qp_attr *qp_attr, 1890 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) 1891{ 1892 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); 1893 struct ehca_shca *shca = container_of(qp->device, struct ehca_shca, 1894 ib_device); 1895 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle; 1896 struct hcp_modify_qp_control_block *qpcb; 1897 int cnt, ret = 0; 1898 u64 h_ret; 1899 1900 if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) { 1901 ehca_err(qp->device, "Invalid attribute mask " 1902 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ", 1903 my_qp, qp->qp_num, qp_attr_mask); 1904 return -EINVAL; 1905 } 1906 1907 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 1908 if (!qpcb) { 1909 ehca_err(qp->device, "Out of memory for qpcb " 1910 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); 1911 return -ENOMEM; 1912 } 1913 1914 h_ret = hipz_h_query_qp(adapter_handle, 1915 my_qp->ipz_qp_handle, 1916 &my_qp->pf, 1917 qpcb, my_qp->galpas.kernel); 1918 1919 if (h_ret != H_SUCCESS) { 1920 ret = ehca2ib_return_code(h_ret); 1921 ehca_err(qp->device, "hipz_h_query_qp() failed " 1922 "ehca_qp=%p qp_num=%x h_ret=%lli", 1923 my_qp, qp->qp_num, h_ret); 1924 goto query_qp_exit1; 1925 } 1926 1927 qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state); 1928 qp_attr->qp_state = qp_attr->cur_qp_state; 1929 1930 if (qp_attr->cur_qp_state == -EINVAL) { 1931 ret = -EINVAL; 1932 ehca_err(qp->device, "Got invalid ehca_qp_state=%x " 1933 "ehca_qp=%p qp_num=%x", 1934 qpcb->qp_state, my_qp, qp->qp_num); 1935 goto query_qp_exit1; 1936 } 1937 1938 if (qp_attr->qp_state == IB_QPS_SQD) 1939 qp_attr->sq_draining = 1; 1940 1941 qp_attr->qkey = qpcb->qkey; 1942 qp_attr->path_mtu = qpcb->path_mtu; 1943 qp_attr->path_mig_state = qpcb->path_migration_state - 1; 1944 qp_attr->rq_psn = qpcb->receive_psn; 1945 qp_attr->sq_psn = qpcb->send_psn; 1946 qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field; 1947 qp_attr->cap.max_send_wr = qpcb->max_nr_outst_send_wr-1; 1948 qp_attr->cap.max_recv_wr = qpcb->max_nr_outst_recv_wr-1; 1949 /* UD_AV CIRCUMVENTION */ 1950 if (my_qp->qp_type == IB_QPT_UD) { 1951 qp_attr->cap.max_send_sge = 1952 qpcb->actual_nr_sges_in_sq_wqe - 2; 1953 qp_attr->cap.max_recv_sge = 1954 qpcb->actual_nr_sges_in_rq_wqe - 2; 1955 } else { 1956 qp_attr->cap.max_send_sge = 1957 qpcb->actual_nr_sges_in_sq_wqe; 1958 qp_attr->cap.max_recv_sge = 1959 qpcb->actual_nr_sges_in_rq_wqe; 1960 } 1961 1962 qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size; 1963 qp_attr->dest_qp_num = qpcb->dest_qp_nr; 1964 1965 qp_attr->pkey_index = qpcb->prim_p_key_idx; 1966 qp_attr->port_num = qpcb->prim_phys_port; 1967 qp_attr->timeout = qpcb->timeout; 1968 qp_attr->retry_cnt = qpcb->retry_count; 1969 qp_attr->rnr_retry = qpcb->rnr_retry_count; 1970 1971 qp_attr->alt_pkey_index = qpcb->alt_p_key_idx; 1972 qp_attr->alt_port_num = qpcb->alt_phys_port; 1973 qp_attr->alt_timeout = qpcb->timeout_al; 1974 1975 qp_attr->max_dest_rd_atomic = qpcb->rdma_nr_atomic_resp_res; 1976 qp_attr->max_rd_atomic = qpcb->rdma_atomic_outst_dest_qp; 1977 1978 /* primary av */ 1979 qp_attr->ah_attr.sl = qpcb->service_level; 1980 1981 if (qpcb->send_grh_flag) { 1982 qp_attr->ah_attr.ah_flags = IB_AH_GRH; 1983 } 1984 1985 qp_attr->ah_attr.static_rate = qpcb->max_static_rate; 1986 qp_attr->ah_attr.dlid = qpcb->dlid; 1987 qp_attr->ah_attr.src_path_bits = qpcb->source_path_bits; 1988 qp_attr->ah_attr.port_num = qp_attr->port_num; 1989 1990 /* primary GRH */ 1991 qp_attr->ah_attr.grh.traffic_class = qpcb->traffic_class; 1992 qp_attr->ah_attr.grh.hop_limit = qpcb->hop_limit; 1993 qp_attr->ah_attr.grh.sgid_index = qpcb->source_gid_idx; 1994 qp_attr->ah_attr.grh.flow_label = qpcb->flow_label; 1995 1996 for (cnt = 0; cnt < 16; cnt++) 1997 qp_attr->ah_attr.grh.dgid.raw[cnt] = 1998 qpcb->dest_gid.byte[cnt]; 1999 2000 /* alternate AV */ 2001 qp_attr->alt_ah_attr.sl = qpcb->service_level_al; 2002 if (qpcb->send_grh_flag_al) { 2003 qp_attr->alt_ah_attr.ah_flags = IB_AH_GRH; 2004 } 2005 2006 qp_attr->alt_ah_attr.static_rate = qpcb->max_static_rate_al; 2007 qp_attr->alt_ah_attr.dlid = qpcb->dlid_al; 2008 qp_attr->alt_ah_attr.src_path_bits = qpcb->source_path_bits_al; 2009 2010 /* alternate GRH */ 2011 qp_attr->alt_ah_attr.grh.traffic_class = qpcb->traffic_class_al; 2012 qp_attr->alt_ah_attr.grh.hop_limit = qpcb->hop_limit_al; 2013 qp_attr->alt_ah_attr.grh.sgid_index = qpcb->source_gid_idx_al; 2014 qp_attr->alt_ah_attr.grh.flow_label = qpcb->flow_label_al; 2015 2016 for (cnt = 0; cnt < 16; cnt++) 2017 qp_attr->alt_ah_attr.grh.dgid.raw[cnt] = 2018 qpcb->dest_gid_al.byte[cnt]; 2019 2020 /* return init attributes given in ehca_create_qp */ 2021 if (qp_init_attr) 2022 *qp_init_attr = my_qp->init_attr; 2023 2024 if (ehca_debug_level >= 2) 2025 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); 2026 2027query_qp_exit1: 2028 ehca_free_fw_ctrlblock(qpcb); 2029 2030 return ret; 2031} 2032 2033int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 2034 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) 2035{ 2036 struct ehca_qp *my_qp = 2037 container_of(ibsrq, struct ehca_qp, ib_srq); 2038 struct ehca_shca *shca = 2039 container_of(ibsrq->pd->device, struct ehca_shca, ib_device); 2040 struct hcp_modify_qp_control_block *mqpcb; 2041 u64 update_mask; 2042 u64 h_ret; 2043 int ret = 0; 2044 2045 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 2046 if (!mqpcb) { 2047 ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb " 2048 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num); 2049 return -ENOMEM; 2050 } 2051 2052 update_mask = 0; 2053 if (attr_mask & IB_SRQ_LIMIT) { 2054 attr_mask &= ~IB_SRQ_LIMIT; 2055 update_mask |= 2056 EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1) 2057 | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1); 2058 mqpcb->curr_srq_limit = attr->srq_limit; 2059 mqpcb->qp_aff_asyn_ev_log_reg = 2060 EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1); 2061 } 2062 2063 /* by now, all bits in attr_mask should have been cleared */ 2064 if (attr_mask) { 2065 ehca_err(ibsrq->device, "invalid attribute mask bits set " 2066 "attr_mask=%x", attr_mask); 2067 ret = -EINVAL; 2068 goto modify_srq_exit0; 2069 } 2070 2071 if (ehca_debug_level >= 2) 2072 ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); 2073 2074 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle, 2075 NULL, update_mask, mqpcb, 2076 my_qp->galpas.kernel); 2077 2078 if (h_ret != H_SUCCESS) { 2079 ret = ehca2ib_return_code(h_ret); 2080 ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli " 2081 "ehca_qp=%p qp_num=%x", 2082 h_ret, my_qp, my_qp->real_qp_num); 2083 } 2084 2085modify_srq_exit0: 2086 ehca_free_fw_ctrlblock(mqpcb); 2087 2088 return ret; 2089} 2090 2091int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr) 2092{ 2093 struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq); 2094 struct ehca_shca *shca = container_of(srq->device, struct ehca_shca, 2095 ib_device); 2096 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle; 2097 struct hcp_modify_qp_control_block *qpcb; 2098 int ret = 0; 2099 u64 h_ret; 2100 2101 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 2102 if (!qpcb) { 2103 ehca_err(srq->device, "Out of memory for qpcb " 2104 "ehca_qp=%p qp_num=%x", my_qp, my_qp->real_qp_num); 2105 return -ENOMEM; 2106 } 2107 2108 h_ret = hipz_h_query_qp(adapter_handle, my_qp->ipz_qp_handle, 2109 NULL, qpcb, my_qp->galpas.kernel); 2110 2111 if (h_ret != H_SUCCESS) { 2112 ret = ehca2ib_return_code(h_ret); 2113 ehca_err(srq->device, "hipz_h_query_qp() failed " 2114 "ehca_qp=%p qp_num=%x h_ret=%lli", 2115 my_qp, my_qp->real_qp_num, h_ret); 2116 goto query_srq_exit1; 2117 } 2118 2119 srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1; 2120 srq_attr->max_sge = 3; 2121 srq_attr->srq_limit = qpcb->curr_srq_limit; 2122 2123 if (ehca_debug_level >= 2) 2124 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); 2125 2126query_srq_exit1: 2127 ehca_free_fw_ctrlblock(qpcb); 2128 2129 return ret; 2130} 2131 2132static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, 2133 struct ib_uobject *uobject) 2134{ 2135 struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device); 2136 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, 2137 ib_pd); 2138 struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1]; 2139 u32 qp_num = my_qp->real_qp_num; 2140 int ret; 2141 u64 h_ret; 2142 u8 port_num; 2143 int is_user = 0; 2144 enum ib_qp_type qp_type; 2145 unsigned long flags; 2146 2147 if (uobject) { 2148 is_user = 1; 2149 if (my_qp->mm_count_galpa || 2150 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) { 2151 ehca_err(dev, "Resources still referenced in " 2152 "user space qp_num=%x", qp_num); 2153 return -EINVAL; 2154 } 2155 } 2156 2157 if (my_qp->send_cq) { 2158 ret = ehca_cq_unassign_qp(my_qp->send_cq, qp_num); 2159 if (ret) { 2160 ehca_err(dev, "Couldn't unassign qp from " 2161 "send_cq ret=%i qp_num=%x cq_num=%x", ret, 2162 qp_num, my_qp->send_cq->cq_number); 2163 return ret; 2164 } 2165 } 2166 2167 write_lock_irqsave(&ehca_qp_idr_lock, flags); 2168 idr_remove(&ehca_qp_idr, my_qp->token); 2169 write_unlock_irqrestore(&ehca_qp_idr_lock, flags); 2170 2171 /* 2172 * SRQs will never get into an error list and do not have a recv_cq, 2173 * so we need to skip them here. 2174 */ 2175 if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user) 2176 del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node); 2177 2178 if (HAS_SQ(my_qp) && !is_user) 2179 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); 2180 2181 /* now wait until all pending events have completed */ 2182 wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events)); 2183 2184 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); 2185 if (h_ret != H_SUCCESS) { 2186 ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli " 2187 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num); 2188 return ehca2ib_return_code(h_ret); 2189 } 2190 2191 port_num = my_qp->init_attr.port_num; 2192 qp_type = my_qp->init_attr.qp_type; 2193 2194 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) { 2195 spin_lock_irqsave(&sport->mod_sqp_lock, flags); 2196 kfree(my_qp->mod_qp_parm); 2197 my_qp->mod_qp_parm = NULL; 2198 shca->sport[port_num - 1].ibqp_sqp[qp_type] = NULL; 2199 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); 2200 } 2201 2202 /* no support for IB_QPT_SMI yet */ 2203 if (qp_type == IB_QPT_GSI) { 2204 struct ib_event event; 2205 ehca_info(dev, "device %s: port %x is inactive.", 2206 shca->ib_device.name, port_num); 2207 event.device = &shca->ib_device; 2208 event.event = IB_EVENT_PORT_ERR; 2209 event.element.port_num = port_num; 2210 shca->sport[port_num - 1].port_state = IB_PORT_DOWN; 2211 ib_dispatch_event(&event); 2212 } 2213 2214 if (HAS_RQ(my_qp)) { 2215 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 2216 if (!is_user) 2217 vfree(my_qp->rq_map.map); 2218 } 2219 if (HAS_SQ(my_qp)) { 2220 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); 2221 if (!is_user) 2222 vfree(my_qp->sq_map.map); 2223 } 2224 kmem_cache_free(qp_cache, my_qp); 2225 atomic_dec(&shca->num_qps); 2226 return 0; 2227} 2228 2229int ehca_destroy_qp(struct ib_qp *qp) 2230{ 2231 return internal_destroy_qp(qp->device, 2232 container_of(qp, struct ehca_qp, ib_qp), 2233 qp->uobject); 2234} 2235 2236int ehca_destroy_srq(struct ib_srq *srq) 2237{ 2238 return internal_destroy_qp(srq->device, 2239 container_of(srq, struct ehca_qp, ib_srq), 2240 srq->uobject); 2241} 2242 2243int ehca_init_qp_cache(void) 2244{ 2245 qp_cache = kmem_cache_create("ehca_cache_qp", 2246 sizeof(struct ehca_qp), 0, 2247 SLAB_HWCACHE_ALIGN, 2248 NULL); 2249 if (!qp_cache) 2250 return -ENOMEM; 2251 return 0; 2252} 2253 2254void ehca_cleanup_qp_cache(void) 2255{ 2256 if (qp_cache) 2257 kmem_cache_destroy(qp_cache); 2258} 2259