1/* 2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32#include <linux/module.h> 33#include <linux/moduleparam.h> 34#include <linux/device.h> 35#include <linux/netdevice.h> 36#include <linux/etherdevice.h> 37#include <linux/delay.h> 38#include <linux/errno.h> 39#include <linux/list.h> 40#include <linux/spinlock.h> 41#include <linux/ethtool.h> 42 43#include <asm/io.h> 44#include <asm/irq.h> 45#include <asm/byteorder.h> 46 47#include <rdma/iw_cm.h> 48#include <rdma/ib_verbs.h> 49#include <rdma/ib_smi.h> 50#include <rdma/ib_umem.h> 51#include <rdma/ib_user_verbs.h> 52 53#include "cxio_hal.h" 54#include "iwch.h" 55#include "iwch_provider.h" 56#include "iwch_cm.h" 57#include "iwch_user.h" 58 59static int iwch_modify_port(struct ib_device *ibdev, 60 u8 port, int port_modify_mask, 61 struct ib_port_modify *props) 62{ 63 return -ENOSYS; 64} 65 66static struct ib_ah *iwch_ah_create(struct ib_pd *pd, 67 struct ib_ah_attr *ah_attr) 68{ 69 return ERR_PTR(-ENOSYS); 70} 71 72static int iwch_ah_destroy(struct ib_ah *ah) 73{ 74 return -ENOSYS; 75} 76 77static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 78{ 79 return -ENOSYS; 80} 81 82static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 83{ 84 return -ENOSYS; 85} 86 87static int iwch_process_mad(struct ib_device *ibdev, 88 int mad_flags, 89 u8 port_num, 90 struct ib_wc *in_wc, 91 struct ib_grh *in_grh, 92 struct ib_mad *in_mad, struct ib_mad *out_mad) 93{ 94 return -ENOSYS; 95} 96 97static int iwch_dealloc_ucontext(struct ib_ucontext *context) 98{ 99 struct iwch_dev *rhp = to_iwch_dev(context->device); 100 struct iwch_ucontext *ucontext = to_iwch_ucontext(context); 101 struct iwch_mm_entry *mm, *tmp; 102 103 PDBG("%s context %p\n", __FUNCTION__, context); 104 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) 105 kfree(mm); 106 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx); 107 kfree(ucontext); 108 return 0; 109} 110 111static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev, 112 struct ib_udata *udata) 113{ 114 struct iwch_ucontext *context; 115 struct iwch_dev *rhp = to_iwch_dev(ibdev); 116 117 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 118 context = kzalloc(sizeof(*context), GFP_KERNEL); 119 if (!context) 120 return ERR_PTR(-ENOMEM); 121 cxio_init_ucontext(&rhp->rdev, &context->uctx); 122 INIT_LIST_HEAD(&context->mmaps); 123 spin_lock_init(&context->mmap_lock); 124 return &context->ibucontext; 125} 126 127static int iwch_destroy_cq(struct ib_cq *ib_cq) 128{ 129 struct iwch_cq *chp; 130 131 PDBG("%s ib_cq %p\n", __FUNCTION__, ib_cq); 132 chp = to_iwch_cq(ib_cq); 133 134 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); 135 atomic_dec(&chp->refcnt); 136 wait_event(chp->wait, !atomic_read(&chp->refcnt)); 137 138 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); 139 kfree(chp); 140 return 0; 141} 142 143static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector, 144 struct ib_ucontext *ib_context, 145 struct ib_udata *udata) 146{ 147 struct iwch_dev *rhp; 148 struct iwch_cq *chp; 149 struct iwch_create_cq_resp uresp; 150 struct iwch_create_cq_req ureq; 151 struct iwch_ucontext *ucontext = NULL; 152 153 PDBG("%s ib_dev %p entries %d\n", __FUNCTION__, ibdev, entries); 154 rhp = to_iwch_dev(ibdev); 155 chp = kzalloc(sizeof(*chp), GFP_KERNEL); 156 if (!chp) 157 return ERR_PTR(-ENOMEM); 158 159 if (ib_context) { 160 ucontext = to_iwch_ucontext(ib_context); 161 if (!t3a_device(rhp)) { 162 if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) { 163 kfree(chp); 164 return ERR_PTR(-EFAULT); 165 } 166 chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr; 167 } 168 } 169 170 if (t3a_device(rhp)) { 171 172 /* 173 * T3A: Add some fluff to handle extra CQEs inserted 174 * for various errors. 175 * Additional CQE possibilities: 176 * TERMINATE, 177 * incoming RDMA WRITE Failures 178 * incoming RDMA READ REQUEST FAILUREs 179 * NOTE: We cannot ensure the CQ won't overflow. 180 */ 181 entries += 16; 182 } 183 entries = roundup_pow_of_two(entries); 184 chp->cq.size_log2 = ilog2(entries); 185 186 if (cxio_create_cq(&rhp->rdev, &chp->cq)) { 187 kfree(chp); 188 return ERR_PTR(-ENOMEM); 189 } 190 chp->rhp = rhp; 191 chp->ibcq.cqe = (1 << chp->cq.size_log2) - 1; 192 spin_lock_init(&chp->lock); 193 atomic_set(&chp->refcnt, 1); 194 init_waitqueue_head(&chp->wait); 195 insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); 196 197 if (ucontext) { 198 struct iwch_mm_entry *mm; 199 200 mm = kmalloc(sizeof *mm, GFP_KERNEL); 201 if (!mm) { 202 iwch_destroy_cq(&chp->ibcq); 203 return ERR_PTR(-ENOMEM); 204 } 205 uresp.cqid = chp->cq.cqid; 206 uresp.size_log2 = chp->cq.size_log2; 207 spin_lock(&ucontext->mmap_lock); 208 uresp.key = ucontext->key; 209 ucontext->key += PAGE_SIZE; 210 spin_unlock(&ucontext->mmap_lock); 211 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { 212 kfree(mm); 213 iwch_destroy_cq(&chp->ibcq); 214 return ERR_PTR(-EFAULT); 215 } 216 mm->key = uresp.key; 217 mm->addr = virt_to_phys(chp->cq.queue); 218 mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * 219 sizeof (struct t3_cqe)); 220 insert_mmap(ucontext, mm); 221 } 222 PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n", 223 chp->cq.cqid, chp, (1 << chp->cq.size_log2), 224 (unsigned long long) chp->cq.dma_addr); 225 return &chp->ibcq; 226} 227 228static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) 229{ 230#ifdef notyet 231 struct iwch_cq *chp = to_iwch_cq(cq); 232 struct t3_cq oldcq, newcq; 233 int ret; 234 235 PDBG("%s ib_cq %p cqe %d\n", __FUNCTION__, cq, cqe); 236 237 /* We don't downsize... */ 238 if (cqe <= cq->cqe) 239 return 0; 240 241 /* create new t3_cq with new size */ 242 cqe = roundup_pow_of_two(cqe+1); 243 newcq.size_log2 = ilog2(cqe); 244 245 /* Dont allow resize to less than the current wce count */ 246 if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) { 247 return -ENOMEM; 248 } 249 250 /* Quiesce all QPs using this CQ */ 251 ret = iwch_quiesce_qps(chp); 252 if (ret) { 253 return ret; 254 } 255 256 ret = cxio_create_cq(&chp->rhp->rdev, &newcq); 257 if (ret) { 258 return ret; 259 } 260 261 /* copy CQEs */ 262 memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) * 263 sizeof(struct t3_cqe)); 264 265 /* old iwch_qp gets new t3_cq but keeps old cqid */ 266 oldcq = chp->cq; 267 chp->cq = newcq; 268 chp->cq.cqid = oldcq.cqid; 269 270 /* resize new t3_cq to update the HW context */ 271 ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq); 272 if (ret) { 273 chp->cq = oldcq; 274 return ret; 275 } 276 chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1; 277 278 /* destroy old t3_cq */ 279 oldcq.cqid = newcq.cqid; 280 ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq); 281 if (ret) { 282 printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n", 283 __FUNCTION__, ret); 284 } 285 286 /* add user hooks here */ 287 288 /* resume qps */ 289 ret = iwch_resume_qps(chp); 290 return ret; 291#else 292 return -ENOSYS; 293#endif 294} 295 296static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 297{ 298 struct iwch_dev *rhp; 299 struct iwch_cq *chp; 300 enum t3_cq_opcode cq_op; 301 int err; 302 unsigned long flag; 303 u32 rptr; 304 305 chp = to_iwch_cq(ibcq); 306 rhp = chp->rhp; 307 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) 308 cq_op = CQ_ARM_SE; 309 else 310 cq_op = CQ_ARM_AN; 311 if (chp->user_rptr_addr) { 312 if (get_user(rptr, chp->user_rptr_addr)) 313 return -EFAULT; 314 spin_lock_irqsave(&chp->lock, flag); 315 chp->cq.rptr = rptr; 316 } else 317 spin_lock_irqsave(&chp->lock, flag); 318 PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr); 319 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0); 320 spin_unlock_irqrestore(&chp->lock, flag); 321 if (err < 0) 322 printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err, 323 chp->cq.cqid); 324 if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS)) 325 err = 0; 326 return err; 327} 328 329static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 330{ 331 int len = vma->vm_end - vma->vm_start; 332 u32 key = vma->vm_pgoff << PAGE_SHIFT; 333 struct cxio_rdev *rdev_p; 334 int ret = 0; 335 struct iwch_mm_entry *mm; 336 struct iwch_ucontext *ucontext; 337 u64 addr; 338 339 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__, vma->vm_pgoff, 340 key, len); 341 342 if (vma->vm_start & (PAGE_SIZE-1)) { 343 return -EINVAL; 344 } 345 346 rdev_p = &(to_iwch_dev(context->device)->rdev); 347 ucontext = to_iwch_ucontext(context); 348 349 mm = remove_mmap(ucontext, key, len); 350 if (!mm) 351 return -EINVAL; 352 addr = mm->addr; 353 kfree(mm); 354 355 if ((addr >= rdev_p->rnic_info.udbell_physbase) && 356 (addr < (rdev_p->rnic_info.udbell_physbase + 357 rdev_p->rnic_info.udbell_len))) { 358 359 /* 360 * Map T3 DB register. 361 */ 362 if (vma->vm_flags & VM_READ) { 363 return -EPERM; 364 } 365 366 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 367 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; 368 vma->vm_flags &= ~VM_MAYREAD; 369 ret = io_remap_pfn_range(vma, vma->vm_start, 370 addr >> PAGE_SHIFT, 371 len, vma->vm_page_prot); 372 } else { 373 374 /* 375 * Map WQ or CQ contig dma memory... 376 */ 377 ret = remap_pfn_range(vma, vma->vm_start, 378 addr >> PAGE_SHIFT, 379 len, vma->vm_page_prot); 380 } 381 382 return ret; 383} 384 385static int iwch_deallocate_pd(struct ib_pd *pd) 386{ 387 struct iwch_dev *rhp; 388 struct iwch_pd *php; 389 390 php = to_iwch_pd(pd); 391 rhp = php->rhp; 392 PDBG("%s ibpd %p pdid 0x%x\n", __FUNCTION__, pd, php->pdid); 393 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid); 394 kfree(php); 395 return 0; 396} 397 398static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev, 399 struct ib_ucontext *context, 400 struct ib_udata *udata) 401{ 402 struct iwch_pd *php; 403 u32 pdid; 404 struct iwch_dev *rhp; 405 406 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 407 rhp = (struct iwch_dev *) ibdev; 408 pdid = cxio_hal_get_pdid(rhp->rdev.rscp); 409 if (!pdid) 410 return ERR_PTR(-EINVAL); 411 php = kzalloc(sizeof(*php), GFP_KERNEL); 412 if (!php) { 413 cxio_hal_put_pdid(rhp->rdev.rscp, pdid); 414 return ERR_PTR(-ENOMEM); 415 } 416 php->pdid = pdid; 417 php->rhp = rhp; 418 if (context) { 419 if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) { 420 iwch_deallocate_pd(&php->ibpd); 421 return ERR_PTR(-EFAULT); 422 } 423 } 424 PDBG("%s pdid 0x%0x ptr 0x%p\n", __FUNCTION__, pdid, php); 425 return &php->ibpd; 426} 427 428static int iwch_dereg_mr(struct ib_mr *ib_mr) 429{ 430 struct iwch_dev *rhp; 431 struct iwch_mr *mhp; 432 u32 mmid; 433 434 PDBG("%s ib_mr %p\n", __FUNCTION__, ib_mr); 435 /* There can be no memory windows */ 436 if (atomic_read(&ib_mr->usecnt)) 437 return -EINVAL; 438 439 mhp = to_iwch_mr(ib_mr); 440 rhp = mhp->rhp; 441 mmid = mhp->attr.stag >> 8; 442 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 443 mhp->attr.pbl_addr); 444 remove_handle(rhp, &rhp->mmidr, mmid); 445 if (mhp->kva) 446 kfree((void *) (unsigned long) mhp->kva); 447 if (mhp->umem) 448 ib_umem_release(mhp->umem); 449 PDBG("%s mmid 0x%x ptr %p\n", __FUNCTION__, mmid, mhp); 450 kfree(mhp); 451 return 0; 452} 453 454static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd, 455 struct ib_phys_buf *buffer_list, 456 int num_phys_buf, 457 int acc, 458 u64 *iova_start) 459{ 460 __be64 *page_list; 461 int shift; 462 u64 total_size; 463 int npages; 464 struct iwch_dev *rhp; 465 struct iwch_pd *php; 466 struct iwch_mr *mhp; 467 int ret; 468 469 PDBG("%s ib_pd %p\n", __FUNCTION__, pd); 470 php = to_iwch_pd(pd); 471 rhp = php->rhp; 472 473 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 474 if (!mhp) 475 return ERR_PTR(-ENOMEM); 476 477 /* First check that we have enough alignment */ 478 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) { 479 ret = -EINVAL; 480 goto err; 481 } 482 483 if (num_phys_buf > 1 && 484 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) { 485 ret = -EINVAL; 486 goto err; 487 } 488 489 ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start, 490 &total_size, &npages, &shift, &page_list); 491 if (ret) 492 goto err; 493 494 mhp->rhp = rhp; 495 mhp->attr.pdid = php->pdid; 496 mhp->attr.zbva = 0; 497 498 mhp->attr.perms = iwch_ib_to_tpt_access(acc); 499 mhp->attr.va_fbo = *iova_start; 500 mhp->attr.page_size = shift - 12; 501 502 mhp->attr.len = (u32) total_size; 503 mhp->attr.pbl_size = npages; 504 ret = iwch_register_mem(rhp, php, mhp, shift, page_list); 505 kfree(page_list); 506 if (ret) { 507 goto err; 508 } 509 return &mhp->ibmr; 510err: 511 kfree(mhp); 512 return ERR_PTR(ret); 513 514} 515 516static int iwch_reregister_phys_mem(struct ib_mr *mr, 517 int mr_rereg_mask, 518 struct ib_pd *pd, 519 struct ib_phys_buf *buffer_list, 520 int num_phys_buf, 521 int acc, u64 * iova_start) 522{ 523 524 struct iwch_mr mh, *mhp; 525 struct iwch_pd *php; 526 struct iwch_dev *rhp; 527 __be64 *page_list = NULL; 528 int shift = 0; 529 u64 total_size; 530 int npages; 531 int ret; 532 533 PDBG("%s ib_mr %p ib_pd %p\n", __FUNCTION__, mr, pd); 534 535 /* There can be no memory windows */ 536 if (atomic_read(&mr->usecnt)) 537 return -EINVAL; 538 539 mhp = to_iwch_mr(mr); 540 rhp = mhp->rhp; 541 php = to_iwch_pd(mr->pd); 542 543 /* make sure we are on the same adapter */ 544 if (rhp != php->rhp) 545 return -EINVAL; 546 547 memcpy(&mh, mhp, sizeof *mhp); 548 549 if (mr_rereg_mask & IB_MR_REREG_PD) 550 php = to_iwch_pd(pd); 551 if (mr_rereg_mask & IB_MR_REREG_ACCESS) 552 mh.attr.perms = iwch_ib_to_tpt_access(acc); 553 if (mr_rereg_mask & IB_MR_REREG_TRANS) { 554 ret = build_phys_page_list(buffer_list, num_phys_buf, 555 iova_start, 556 &total_size, &npages, 557 &shift, &page_list); 558 if (ret) 559 return ret; 560 } 561 562 ret = iwch_reregister_mem(rhp, php, &mh, shift, page_list, npages); 563 kfree(page_list); 564 if (ret) { 565 return ret; 566 } 567 if (mr_rereg_mask & IB_MR_REREG_PD) 568 mhp->attr.pdid = php->pdid; 569 if (mr_rereg_mask & IB_MR_REREG_ACCESS) 570 mhp->attr.perms = iwch_ib_to_tpt_access(acc); 571 if (mr_rereg_mask & IB_MR_REREG_TRANS) { 572 mhp->attr.zbva = 0; 573 mhp->attr.va_fbo = *iova_start; 574 mhp->attr.page_size = shift - 12; 575 mhp->attr.len = (u32) total_size; 576 mhp->attr.pbl_size = npages; 577 } 578 579 return 0; 580} 581 582 583static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 584 u64 virt, int acc, struct ib_udata *udata) 585{ 586 __be64 *pages; 587 int shift, n, len; 588 int i, j, k; 589 int err = 0; 590 struct ib_umem_chunk *chunk; 591 struct iwch_dev *rhp; 592 struct iwch_pd *php; 593 struct iwch_mr *mhp; 594 struct iwch_reg_user_mr_resp uresp; 595 596 PDBG("%s ib_pd %p\n", __FUNCTION__, pd); 597 598 php = to_iwch_pd(pd); 599 rhp = php->rhp; 600 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 601 if (!mhp) 602 return ERR_PTR(-ENOMEM); 603 604 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc); 605 if (IS_ERR(mhp->umem)) { 606 err = PTR_ERR(mhp->umem); 607 kfree(mhp); 608 return ERR_PTR(err); 609 } 610 611 shift = ffs(mhp->umem->page_size) - 1; 612 613 n = 0; 614 list_for_each_entry(chunk, &mhp->umem->chunk_list, list) 615 n += chunk->nents; 616 617 pages = kmalloc(n * sizeof(u64), GFP_KERNEL); 618 if (!pages) { 619 err = -ENOMEM; 620 goto err; 621 } 622 623 i = n = 0; 624 625 list_for_each_entry(chunk, &mhp->umem->chunk_list, list) 626 for (j = 0; j < chunk->nmap; ++j) { 627 len = sg_dma_len(&chunk->page_list[j]) >> shift; 628 for (k = 0; k < len; ++k) { 629 pages[i++] = cpu_to_be64(sg_dma_address( 630 &chunk->page_list[j]) + 631 mhp->umem->page_size * k); 632 } 633 } 634 635 mhp->rhp = rhp; 636 mhp->attr.pdid = php->pdid; 637 mhp->attr.zbva = 0; 638 mhp->attr.perms = iwch_ib_to_tpt_access(acc); 639 mhp->attr.va_fbo = virt; 640 mhp->attr.page_size = shift - 12; 641 mhp->attr.len = (u32) length; 642 mhp->attr.pbl_size = i; 643 err = iwch_register_mem(rhp, php, mhp, shift, pages); 644 kfree(pages); 645 if (err) 646 goto err; 647 648 if (udata && t3b_device(rhp)) { 649 uresp.pbl_addr = (mhp->attr.pbl_addr - 650 rhp->rdev.rnic_info.pbl_base) >> 3; 651 PDBG("%s user resp pbl_addr 0x%x\n", __FUNCTION__, 652 uresp.pbl_addr); 653 654 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { 655 iwch_dereg_mr(&mhp->ibmr); 656 err = -EFAULT; 657 goto err; 658 } 659 } 660 661 return &mhp->ibmr; 662 663err: 664 ib_umem_release(mhp->umem); 665 kfree(mhp); 666 return ERR_PTR(err); 667} 668 669static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc) 670{ 671 struct ib_phys_buf bl; 672 u64 kva; 673 struct ib_mr *ibmr; 674 675 PDBG("%s ib_pd %p\n", __FUNCTION__, pd); 676 677 /* 678 * T3 only supports 32 bits of size. 679 */ 680 bl.size = 0xffffffff; 681 bl.addr = 0; 682 kva = 0; 683 ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva); 684 return ibmr; 685} 686 687static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd) 688{ 689 struct iwch_dev *rhp; 690 struct iwch_pd *php; 691 struct iwch_mw *mhp; 692 u32 mmid; 693 u32 stag = 0; 694 int ret; 695 696 php = to_iwch_pd(pd); 697 rhp = php->rhp; 698 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 699 if (!mhp) 700 return ERR_PTR(-ENOMEM); 701 ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid); 702 if (ret) { 703 kfree(mhp); 704 return ERR_PTR(ret); 705 } 706 mhp->rhp = rhp; 707 mhp->attr.pdid = php->pdid; 708 mhp->attr.type = TPT_MW; 709 mhp->attr.stag = stag; 710 mmid = (stag) >> 8; 711 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 712 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __FUNCTION__, mmid, mhp, stag); 713 return &(mhp->ibmw); 714} 715 716static int iwch_dealloc_mw(struct ib_mw *mw) 717{ 718 struct iwch_dev *rhp; 719 struct iwch_mw *mhp; 720 u32 mmid; 721 722 mhp = to_iwch_mw(mw); 723 rhp = mhp->rhp; 724 mmid = (mw->rkey) >> 8; 725 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag); 726 remove_handle(rhp, &rhp->mmidr, mmid); 727 kfree(mhp); 728 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __FUNCTION__, mw, mmid, mhp); 729 return 0; 730} 731 732static int iwch_destroy_qp(struct ib_qp *ib_qp) 733{ 734 struct iwch_dev *rhp; 735 struct iwch_qp *qhp; 736 struct iwch_qp_attributes attrs; 737 struct iwch_ucontext *ucontext; 738 739 qhp = to_iwch_qp(ib_qp); 740 rhp = qhp->rhp; 741 742 attrs.next_state = IWCH_QP_STATE_ERROR; 743 iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0); 744 wait_event(qhp->wait, !qhp->ep); 745 746 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid); 747 748 atomic_dec(&qhp->refcnt); 749 wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); 750 751 ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context) 752 : NULL; 753 cxio_destroy_qp(&rhp->rdev, &qhp->wq, 754 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 755 756 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __FUNCTION__, 757 ib_qp, qhp->wq.qpid, qhp); 758 kfree(qhp); 759 return 0; 760} 761 762static struct ib_qp *iwch_create_qp(struct ib_pd *pd, 763 struct ib_qp_init_attr *attrs, 764 struct ib_udata *udata) 765{ 766 struct iwch_dev *rhp; 767 struct iwch_qp *qhp; 768 struct iwch_pd *php; 769 struct iwch_cq *schp; 770 struct iwch_cq *rchp; 771 struct iwch_create_qp_resp uresp; 772 int wqsize, sqsize, rqsize; 773 struct iwch_ucontext *ucontext; 774 775 PDBG("%s ib_pd %p\n", __FUNCTION__, pd); 776 if (attrs->qp_type != IB_QPT_RC) 777 return ERR_PTR(-EINVAL); 778 php = to_iwch_pd(pd); 779 rhp = php->rhp; 780 schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid); 781 rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid); 782 if (!schp || !rchp) 783 return ERR_PTR(-EINVAL); 784 785 /* The RQT size must be # of entries + 1 rounded up to a power of two */ 786 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr); 787 if (rqsize == attrs->cap.max_recv_wr) 788 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1); 789 790 /* T3 doesn't support RQT depth < 16 */ 791 if (rqsize < 16) 792 rqsize = 16; 793 794 if (rqsize > T3_MAX_RQ_SIZE) 795 return ERR_PTR(-EINVAL); 796 797 if (attrs->cap.max_inline_data > T3_MAX_INLINE) 798 return ERR_PTR(-EINVAL); 799 800 /* 801 * NOTE: The SQ and total WQ sizes don't need to be 802 * a power of two. However, all the code assumes 803 * they are. EG: Q_FREECNT() and friends. 804 */ 805 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr); 806 wqsize = roundup_pow_of_two(rqsize + sqsize); 807 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __FUNCTION__, 808 wqsize, sqsize, rqsize); 809 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); 810 if (!qhp) 811 return ERR_PTR(-ENOMEM); 812 qhp->wq.size_log2 = ilog2(wqsize); 813 qhp->wq.rq_size_log2 = ilog2(rqsize); 814 qhp->wq.sq_size_log2 = ilog2(sqsize); 815 ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL; 816 if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq, 817 ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) { 818 kfree(qhp); 819 return ERR_PTR(-ENOMEM); 820 } 821 attrs->cap.max_recv_wr = rqsize - 1; 822 attrs->cap.max_send_wr = sqsize; 823 qhp->rhp = rhp; 824 qhp->attr.pd = php->pdid; 825 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid; 826 qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid; 827 qhp->attr.sq_num_entries = attrs->cap.max_send_wr; 828 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; 829 qhp->attr.sq_max_sges = attrs->cap.max_send_sge; 830 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; 831 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; 832 qhp->attr.state = IWCH_QP_STATE_IDLE; 833 qhp->attr.next_state = IWCH_QP_STATE_IDLE; 834 835 qhp->attr.enable_rdma_read = 1; 836 qhp->attr.enable_rdma_write = 1; 837 qhp->attr.enable_bind = 1; 838 qhp->attr.max_ord = 1; 839 qhp->attr.max_ird = 1; 840 841 spin_lock_init(&qhp->lock); 842 init_waitqueue_head(&qhp->wait); 843 atomic_set(&qhp->refcnt, 1); 844 insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid); 845 846 if (udata) { 847 848 struct iwch_mm_entry *mm1, *mm2; 849 850 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); 851 if (!mm1) { 852 iwch_destroy_qp(&qhp->ibqp); 853 return ERR_PTR(-ENOMEM); 854 } 855 856 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); 857 if (!mm2) { 858 kfree(mm1); 859 iwch_destroy_qp(&qhp->ibqp); 860 return ERR_PTR(-ENOMEM); 861 } 862 863 uresp.qpid = qhp->wq.qpid; 864 uresp.size_log2 = qhp->wq.size_log2; 865 uresp.sq_size_log2 = qhp->wq.sq_size_log2; 866 uresp.rq_size_log2 = qhp->wq.rq_size_log2; 867 spin_lock(&ucontext->mmap_lock); 868 uresp.key = ucontext->key; 869 ucontext->key += PAGE_SIZE; 870 uresp.db_key = ucontext->key; 871 ucontext->key += PAGE_SIZE; 872 spin_unlock(&ucontext->mmap_lock); 873 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { 874 kfree(mm1); 875 kfree(mm2); 876 iwch_destroy_qp(&qhp->ibqp); 877 return ERR_PTR(-EFAULT); 878 } 879 mm1->key = uresp.key; 880 mm1->addr = virt_to_phys(qhp->wq.queue); 881 mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr)); 882 insert_mmap(ucontext, mm1); 883 mm2->key = uresp.db_key; 884 mm2->addr = qhp->wq.udb & PAGE_MASK; 885 mm2->len = PAGE_SIZE; 886 insert_mmap(ucontext, mm2); 887 } 888 qhp->ibqp.qp_num = qhp->wq.qpid; 889 init_timer(&(qhp->timer)); 890 PDBG("%s sq_num_entries %d, rq_num_entries %d " 891 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d\n", 892 __FUNCTION__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, 893 qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr, 894 1 << qhp->wq.size_log2); 895 return &qhp->ibqp; 896} 897 898static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 899 int attr_mask, struct ib_udata *udata) 900{ 901 struct iwch_dev *rhp; 902 struct iwch_qp *qhp; 903 enum iwch_qp_attr_mask mask = 0; 904 struct iwch_qp_attributes attrs; 905 906 PDBG("%s ib_qp %p\n", __FUNCTION__, ibqp); 907 908 /* iwarp does not support the RTR state */ 909 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) 910 attr_mask &= ~IB_QP_STATE; 911 912 /* Make sure we still have something left to do */ 913 if (!attr_mask) 914 return 0; 915 916 memset(&attrs, 0, sizeof attrs); 917 qhp = to_iwch_qp(ibqp); 918 rhp = qhp->rhp; 919 920 attrs.next_state = iwch_convert_state(attr->qp_state); 921 attrs.enable_rdma_read = (attr->qp_access_flags & 922 IB_ACCESS_REMOTE_READ) ? 1 : 0; 923 attrs.enable_rdma_write = (attr->qp_access_flags & 924 IB_ACCESS_REMOTE_WRITE) ? 1 : 0; 925 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0; 926 927 928 mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0; 929 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ? 930 (IWCH_QP_ATTR_ENABLE_RDMA_READ | 931 IWCH_QP_ATTR_ENABLE_RDMA_WRITE | 932 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0; 933 934 return iwch_modify_qp(rhp, qhp, mask, &attrs, 0); 935} 936 937void iwch_qp_add_ref(struct ib_qp *qp) 938{ 939 PDBG("%s ib_qp %p\n", __FUNCTION__, qp); 940 atomic_inc(&(to_iwch_qp(qp)->refcnt)); 941} 942 943void iwch_qp_rem_ref(struct ib_qp *qp) 944{ 945 PDBG("%s ib_qp %p\n", __FUNCTION__, qp); 946 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt))) 947 wake_up(&(to_iwch_qp(qp)->wait)); 948} 949 950static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn) 951{ 952 PDBG("%s ib_dev %p qpn 0x%x\n", __FUNCTION__, dev, qpn); 953 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn); 954} 955 956 957static int iwch_query_pkey(struct ib_device *ibdev, 958 u8 port, u16 index, u16 * pkey) 959{ 960 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 961 *pkey = 0; 962 return 0; 963} 964 965static int iwch_query_gid(struct ib_device *ibdev, u8 port, 966 int index, union ib_gid *gid) 967{ 968 struct iwch_dev *dev; 969 970 PDBG("%s ibdev %p, port %d, index %d, gid %p\n", 971 __FUNCTION__, ibdev, port, index, gid); 972 dev = to_iwch_dev(ibdev); 973 BUG_ON(port == 0 || port > 2); 974 memset(&(gid->raw[0]), 0, sizeof(gid->raw)); 975 memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6); 976 return 0; 977} 978 979static int iwch_query_device(struct ib_device *ibdev, 980 struct ib_device_attr *props) 981{ 982 983 struct iwch_dev *dev; 984 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 985 986 dev = to_iwch_dev(ibdev); 987 memset(props, 0, sizeof *props); 988 memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); 989 props->device_cap_flags = dev->device_cap_flags; 990 props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor; 991 props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device; 992 props->max_mr_size = ~0ull; 993 props->max_qp = dev->attr.max_qps; 994 props->max_qp_wr = dev->attr.max_wrs; 995 props->max_sge = dev->attr.max_sge_per_wr; 996 props->max_sge_rd = 1; 997 props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp; 998 props->max_cq = dev->attr.max_cqs; 999 props->max_cqe = dev->attr.max_cqes_per_cq; 1000 props->max_mr = dev->attr.max_mem_regs; 1001 props->max_pd = dev->attr.max_pds; 1002 props->local_ca_ack_delay = 0; 1003 1004 return 0; 1005} 1006 1007static int iwch_query_port(struct ib_device *ibdev, 1008 u8 port, struct ib_port_attr *props) 1009{ 1010 PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); 1011 props->max_mtu = IB_MTU_4096; 1012 props->lid = 0; 1013 props->lmc = 0; 1014 props->sm_lid = 0; 1015 props->sm_sl = 0; 1016 props->state = IB_PORT_ACTIVE; 1017 props->phys_state = 0; 1018 props->port_cap_flags = 1019 IB_PORT_CM_SUP | 1020 IB_PORT_SNMP_TUNNEL_SUP | 1021 IB_PORT_REINIT_SUP | 1022 IB_PORT_DEVICE_MGMT_SUP | 1023 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; 1024 props->gid_tbl_len = 1; 1025 props->pkey_tbl_len = 1; 1026 props->qkey_viol_cntr = 0; 1027 props->active_width = 2; 1028 props->active_speed = 2; 1029 props->max_msg_sz = -1; 1030 1031 return 0; 1032} 1033 1034static ssize_t show_rev(struct class_device *cdev, char *buf) 1035{ 1036 struct iwch_dev *dev = container_of(cdev, struct iwch_dev, 1037 ibdev.class_dev); 1038 PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); 1039 return sprintf(buf, "%d\n", dev->rdev.t3cdev_p->type); 1040} 1041 1042static ssize_t show_fw_ver(struct class_device *cdev, char *buf) 1043{ 1044 struct iwch_dev *dev = container_of(cdev, struct iwch_dev, 1045 ibdev.class_dev); 1046 struct ethtool_drvinfo info; 1047 struct net_device *lldev = dev->rdev.t3cdev_p->lldev; 1048 1049 PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); 1050 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1051 return sprintf(buf, "%s\n", info.fw_version); 1052} 1053 1054static ssize_t show_hca(struct class_device *cdev, char *buf) 1055{ 1056 struct iwch_dev *dev = container_of(cdev, struct iwch_dev, 1057 ibdev.class_dev); 1058 struct ethtool_drvinfo info; 1059 struct net_device *lldev = dev->rdev.t3cdev_p->lldev; 1060 1061 PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); 1062 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1063 return sprintf(buf, "%s\n", info.driver); 1064} 1065 1066static ssize_t show_board(struct class_device *cdev, char *buf) 1067{ 1068 struct iwch_dev *dev = container_of(cdev, struct iwch_dev, 1069 ibdev.class_dev); 1070 PDBG("%s class dev 0x%p\n", __FUNCTION__, dev); 1071 return sprintf(buf, "%x.%x\n", dev->rdev.rnic_info.pdev->vendor, 1072 dev->rdev.rnic_info.pdev->device); 1073} 1074 1075static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 1076static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 1077static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 1078static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 1079 1080static struct class_device_attribute *iwch_class_attributes[] = { 1081 &class_device_attr_hw_rev, 1082 &class_device_attr_fw_ver, 1083 &class_device_attr_hca_type, 1084 &class_device_attr_board_id 1085}; 1086 1087int iwch_register_device(struct iwch_dev *dev) 1088{ 1089 int ret; 1090 int i; 1091 1092 PDBG("%s iwch_dev %p\n", __FUNCTION__, dev); 1093 strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX); 1094 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); 1095 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); 1096 dev->ibdev.owner = THIS_MODULE; 1097 dev->device_cap_flags = 1098 (IB_DEVICE_ZERO_STAG | 1099 IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW); 1100 1101 dev->ibdev.uverbs_cmd_mask = 1102 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 1103 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 1104 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 1105 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 1106 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 1107 (1ull << IB_USER_VERBS_CMD_REG_MR) | 1108 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 1109 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 1110 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 1111 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 1112 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | 1113 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 1114 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 1115 (1ull << IB_USER_VERBS_CMD_POLL_CQ) | 1116 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 1117 (1ull << IB_USER_VERBS_CMD_POST_SEND) | 1118 (1ull << IB_USER_VERBS_CMD_POST_RECV); 1119 dev->ibdev.node_type = RDMA_NODE_RNIC; 1120 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC)); 1121 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports; 1122 dev->ibdev.num_comp_vectors = 1; 1123 dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); 1124 dev->ibdev.query_device = iwch_query_device; 1125 dev->ibdev.query_port = iwch_query_port; 1126 dev->ibdev.modify_port = iwch_modify_port; 1127 dev->ibdev.query_pkey = iwch_query_pkey; 1128 dev->ibdev.query_gid = iwch_query_gid; 1129 dev->ibdev.alloc_ucontext = iwch_alloc_ucontext; 1130 dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext; 1131 dev->ibdev.mmap = iwch_mmap; 1132 dev->ibdev.alloc_pd = iwch_allocate_pd; 1133 dev->ibdev.dealloc_pd = iwch_deallocate_pd; 1134 dev->ibdev.create_ah = iwch_ah_create; 1135 dev->ibdev.destroy_ah = iwch_ah_destroy; 1136 dev->ibdev.create_qp = iwch_create_qp; 1137 dev->ibdev.modify_qp = iwch_ib_modify_qp; 1138 dev->ibdev.destroy_qp = iwch_destroy_qp; 1139 dev->ibdev.create_cq = iwch_create_cq; 1140 dev->ibdev.destroy_cq = iwch_destroy_cq; 1141 dev->ibdev.resize_cq = iwch_resize_cq; 1142 dev->ibdev.poll_cq = iwch_poll_cq; 1143 dev->ibdev.get_dma_mr = iwch_get_dma_mr; 1144 dev->ibdev.reg_phys_mr = iwch_register_phys_mem; 1145 dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem; 1146 dev->ibdev.reg_user_mr = iwch_reg_user_mr; 1147 dev->ibdev.dereg_mr = iwch_dereg_mr; 1148 dev->ibdev.alloc_mw = iwch_alloc_mw; 1149 dev->ibdev.bind_mw = iwch_bind_mw; 1150 dev->ibdev.dealloc_mw = iwch_dealloc_mw; 1151 1152 dev->ibdev.attach_mcast = iwch_multicast_attach; 1153 dev->ibdev.detach_mcast = iwch_multicast_detach; 1154 dev->ibdev.process_mad = iwch_process_mad; 1155 1156 dev->ibdev.req_notify_cq = iwch_arm_cq; 1157 dev->ibdev.post_send = iwch_post_send; 1158 dev->ibdev.post_recv = iwch_post_receive; 1159 1160 1161 dev->ibdev.iwcm = 1162 (struct iw_cm_verbs *) kmalloc(sizeof(struct iw_cm_verbs), 1163 GFP_KERNEL); 1164 dev->ibdev.iwcm->connect = iwch_connect; 1165 dev->ibdev.iwcm->accept = iwch_accept_cr; 1166 dev->ibdev.iwcm->reject = iwch_reject_cr; 1167 dev->ibdev.iwcm->create_listen = iwch_create_listen; 1168 dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen; 1169 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref; 1170 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; 1171 dev->ibdev.iwcm->get_qp = iwch_get_qp; 1172 1173 ret = ib_register_device(&dev->ibdev); 1174 if (ret) 1175 goto bail1; 1176 1177 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) { 1178 ret = class_device_create_file(&dev->ibdev.class_dev, 1179 iwch_class_attributes[i]); 1180 if (ret) { 1181 goto bail2; 1182 } 1183 } 1184 return 0; 1185bail2: 1186 ib_unregister_device(&dev->ibdev); 1187bail1: 1188 return ret; 1189} 1190 1191void iwch_unregister_device(struct iwch_dev *dev) 1192{ 1193 int i; 1194 1195 PDBG("%s iwch_dev %p\n", __FUNCTION__, dev); 1196 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) 1197 class_device_remove_file(&dev->ibdev.class_dev, 1198 iwch_class_attributes[i]); 1199 ib_unregister_device(&dev->ibdev); 1200 return; 1201} 1202