1/* 2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32#include <linux/module.h> 33#include <linux/moduleparam.h> 34#include <linux/device.h> 35#include <linux/netdevice.h> 36#include <linux/etherdevice.h> 37#include <linux/delay.h> 38#include <linux/errno.h> 39#include <linux/list.h> 40#include <linux/sched.h> 41#include <linux/spinlock.h> 42#include <linux/ethtool.h> 43#include <linux/rtnetlink.h> 44#include <linux/inetdevice.h> 45#include <linux/slab.h> 46 47#include <asm/io.h> 48#include <asm/irq.h> 49#include <asm/byteorder.h> 50 51#include <rdma/iw_cm.h> 52#include <rdma/ib_verbs.h> 53#include <rdma/ib_smi.h> 54#include <rdma/ib_umem.h> 55#include <rdma/ib_user_verbs.h> 56 57#include "cxio_hal.h" 58#include "iwch.h" 59#include "iwch_provider.h" 60#include "iwch_cm.h" 61#include "iwch_user.h" 62#include "common.h" 63 64static int iwch_modify_port(struct ib_device *ibdev, 65 u8 port, int port_modify_mask, 66 struct ib_port_modify *props) 67{ 68 return -ENOSYS; 69} 70 71static struct ib_ah *iwch_ah_create(struct ib_pd *pd, 72 struct ib_ah_attr *ah_attr) 73{ 74 return ERR_PTR(-ENOSYS); 75} 76 77static int iwch_ah_destroy(struct ib_ah *ah) 78{ 79 return -ENOSYS; 80} 81 82static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 83{ 84 return -ENOSYS; 85} 86 87static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 88{ 89 return -ENOSYS; 90} 91 92static int iwch_process_mad(struct ib_device *ibdev, 93 int mad_flags, 94 u8 port_num, 95 struct ib_wc *in_wc, 96 struct ib_grh *in_grh, 97 struct ib_mad *in_mad, struct ib_mad *out_mad) 98{ 99 return -ENOSYS; 100} 101 102static int iwch_dealloc_ucontext(struct ib_ucontext *context) 103{ 104 struct iwch_dev *rhp = to_iwch_dev(context->device); 105 struct iwch_ucontext *ucontext = to_iwch_ucontext(context); 106 struct iwch_mm_entry *mm, *tmp; 107 108 PDBG("%s context %p\n", __func__, context); 109 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) 110 kfree(mm); 111 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx); 112 kfree(ucontext); 113 return 0; 114} 115 116static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev, 117 struct ib_udata *udata) 118{ 119 struct iwch_ucontext *context; 120 struct iwch_dev *rhp = to_iwch_dev(ibdev); 121 122 PDBG("%s ibdev %p\n", __func__, ibdev); 123 context = kzalloc(sizeof(*context), GFP_KERNEL); 124 if (!context) 125 return ERR_PTR(-ENOMEM); 126 cxio_init_ucontext(&rhp->rdev, &context->uctx); 127 INIT_LIST_HEAD(&context->mmaps); 128 spin_lock_init(&context->mmap_lock); 129 return &context->ibucontext; 130} 131 132static int iwch_destroy_cq(struct ib_cq *ib_cq) 133{ 134 struct iwch_cq *chp; 135 136 PDBG("%s ib_cq %p\n", __func__, ib_cq); 137 chp = to_iwch_cq(ib_cq); 138 139 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); 140 atomic_dec(&chp->refcnt); 141 wait_event(chp->wait, !atomic_read(&chp->refcnt)); 142 143 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); 144 kfree(chp); 145 return 0; 146} 147 148static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector, 149 struct ib_ucontext *ib_context, 150 struct ib_udata *udata) 151{ 152 struct iwch_dev *rhp; 153 struct iwch_cq *chp; 154 struct iwch_create_cq_resp uresp; 155 struct iwch_create_cq_req ureq; 156 struct iwch_ucontext *ucontext = NULL; 157 158 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); 159 rhp = to_iwch_dev(ibdev); 160 chp = kzalloc(sizeof(*chp), GFP_KERNEL); 161 if (!chp) 162 return ERR_PTR(-ENOMEM); 163 164 if (ib_context) { 165 ucontext = to_iwch_ucontext(ib_context); 166 if (!t3a_device(rhp)) { 167 if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) { 168 kfree(chp); 169 return ERR_PTR(-EFAULT); 170 } 171 chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr; 172 } 173 } 174 175 if (t3a_device(rhp)) { 176 177 /* 178 * T3A: Add some fluff to handle extra CQEs inserted 179 * for various errors. 180 * Additional CQE possibilities: 181 * TERMINATE, 182 * incoming RDMA WRITE Failures 183 * incoming RDMA READ REQUEST FAILUREs 184 * NOTE: We cannot ensure the CQ won't overflow. 185 */ 186 entries += 16; 187 } 188 entries = roundup_pow_of_two(entries); 189 chp->cq.size_log2 = ilog2(entries); 190 191 if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) { 192 kfree(chp); 193 return ERR_PTR(-ENOMEM); 194 } 195 chp->rhp = rhp; 196 chp->ibcq.cqe = 1 << chp->cq.size_log2; 197 spin_lock_init(&chp->lock); 198 atomic_set(&chp->refcnt, 1); 199 init_waitqueue_head(&chp->wait); 200 if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) { 201 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); 202 kfree(chp); 203 return ERR_PTR(-ENOMEM); 204 } 205 206 if (ucontext) { 207 struct iwch_mm_entry *mm; 208 209 mm = kmalloc(sizeof *mm, GFP_KERNEL); 210 if (!mm) { 211 iwch_destroy_cq(&chp->ibcq); 212 return ERR_PTR(-ENOMEM); 213 } 214 uresp.cqid = chp->cq.cqid; 215 uresp.size_log2 = chp->cq.size_log2; 216 spin_lock(&ucontext->mmap_lock); 217 uresp.key = ucontext->key; 218 ucontext->key += PAGE_SIZE; 219 spin_unlock(&ucontext->mmap_lock); 220 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { 221 kfree(mm); 222 iwch_destroy_cq(&chp->ibcq); 223 return ERR_PTR(-EFAULT); 224 } 225 mm->key = uresp.key; 226 mm->addr = virt_to_phys(chp->cq.queue); 227 mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * 228 sizeof (struct t3_cqe)); 229 insert_mmap(ucontext, mm); 230 } 231 PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n", 232 chp->cq.cqid, chp, (1 << chp->cq.size_log2), 233 (unsigned long long) chp->cq.dma_addr); 234 return &chp->ibcq; 235} 236 237static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) 238{ 239#ifdef notyet 240 struct iwch_cq *chp = to_iwch_cq(cq); 241 struct t3_cq oldcq, newcq; 242 int ret; 243 244 PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe); 245 246 /* We don't downsize... */ 247 if (cqe <= cq->cqe) 248 return 0; 249 250 /* create new t3_cq with new size */ 251 cqe = roundup_pow_of_two(cqe+1); 252 newcq.size_log2 = ilog2(cqe); 253 254 /* Dont allow resize to less than the current wce count */ 255 if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) { 256 return -ENOMEM; 257 } 258 259 /* Quiesce all QPs using this CQ */ 260 ret = iwch_quiesce_qps(chp); 261 if (ret) { 262 return ret; 263 } 264 265 ret = cxio_create_cq(&chp->rhp->rdev, &newcq); 266 if (ret) { 267 return ret; 268 } 269 270 /* copy CQEs */ 271 memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) * 272 sizeof(struct t3_cqe)); 273 274 /* old iwch_qp gets new t3_cq but keeps old cqid */ 275 oldcq = chp->cq; 276 chp->cq = newcq; 277 chp->cq.cqid = oldcq.cqid; 278 279 /* resize new t3_cq to update the HW context */ 280 ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq); 281 if (ret) { 282 chp->cq = oldcq; 283 return ret; 284 } 285 chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1; 286 287 /* destroy old t3_cq */ 288 oldcq.cqid = newcq.cqid; 289 ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq); 290 if (ret) { 291 printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n", 292 __func__, ret); 293 } 294 295 /* add user hooks here */ 296 297 /* resume qps */ 298 ret = iwch_resume_qps(chp); 299 return ret; 300#else 301 return -ENOSYS; 302#endif 303} 304 305static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 306{ 307 struct iwch_dev *rhp; 308 struct iwch_cq *chp; 309 enum t3_cq_opcode cq_op; 310 int err; 311 unsigned long flag; 312 u32 rptr; 313 314 chp = to_iwch_cq(ibcq); 315 rhp = chp->rhp; 316 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) 317 cq_op = CQ_ARM_SE; 318 else 319 cq_op = CQ_ARM_AN; 320 if (chp->user_rptr_addr) { 321 if (get_user(rptr, chp->user_rptr_addr)) 322 return -EFAULT; 323 spin_lock_irqsave(&chp->lock, flag); 324 chp->cq.rptr = rptr; 325 } else 326 spin_lock_irqsave(&chp->lock, flag); 327 PDBG("%s rptr 0x%x\n", __func__, chp->cq.rptr); 328 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0); 329 spin_unlock_irqrestore(&chp->lock, flag); 330 if (err < 0) 331 printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err, 332 chp->cq.cqid); 333 if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS)) 334 err = 0; 335 return err; 336} 337 338static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 339{ 340 int len = vma->vm_end - vma->vm_start; 341 u32 key = vma->vm_pgoff << PAGE_SHIFT; 342 struct cxio_rdev *rdev_p; 343 int ret = 0; 344 struct iwch_mm_entry *mm; 345 struct iwch_ucontext *ucontext; 346 u64 addr; 347 348 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff, 349 key, len); 350 351 if (vma->vm_start & (PAGE_SIZE-1)) { 352 return -EINVAL; 353 } 354 355 rdev_p = &(to_iwch_dev(context->device)->rdev); 356 ucontext = to_iwch_ucontext(context); 357 358 mm = remove_mmap(ucontext, key, len); 359 if (!mm) 360 return -EINVAL; 361 addr = mm->addr; 362 kfree(mm); 363 364 if ((addr >= rdev_p->rnic_info.udbell_physbase) && 365 (addr < (rdev_p->rnic_info.udbell_physbase + 366 rdev_p->rnic_info.udbell_len))) { 367 368 /* 369 * Map T3 DB register. 370 */ 371 if (vma->vm_flags & VM_READ) { 372 return -EPERM; 373 } 374 375 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 376 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; 377 vma->vm_flags &= ~VM_MAYREAD; 378 ret = io_remap_pfn_range(vma, vma->vm_start, 379 addr >> PAGE_SHIFT, 380 len, vma->vm_page_prot); 381 } else { 382 383 /* 384 * Map WQ or CQ contig dma memory... 385 */ 386 ret = remap_pfn_range(vma, vma->vm_start, 387 addr >> PAGE_SHIFT, 388 len, vma->vm_page_prot); 389 } 390 391 return ret; 392} 393 394static int iwch_deallocate_pd(struct ib_pd *pd) 395{ 396 struct iwch_dev *rhp; 397 struct iwch_pd *php; 398 399 php = to_iwch_pd(pd); 400 rhp = php->rhp; 401 PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid); 402 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid); 403 kfree(php); 404 return 0; 405} 406 407static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev, 408 struct ib_ucontext *context, 409 struct ib_udata *udata) 410{ 411 struct iwch_pd *php; 412 u32 pdid; 413 struct iwch_dev *rhp; 414 415 PDBG("%s ibdev %p\n", __func__, ibdev); 416 rhp = (struct iwch_dev *) ibdev; 417 pdid = cxio_hal_get_pdid(rhp->rdev.rscp); 418 if (!pdid) 419 return ERR_PTR(-EINVAL); 420 php = kzalloc(sizeof(*php), GFP_KERNEL); 421 if (!php) { 422 cxio_hal_put_pdid(rhp->rdev.rscp, pdid); 423 return ERR_PTR(-ENOMEM); 424 } 425 php->pdid = pdid; 426 php->rhp = rhp; 427 if (context) { 428 if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) { 429 iwch_deallocate_pd(&php->ibpd); 430 return ERR_PTR(-EFAULT); 431 } 432 } 433 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php); 434 return &php->ibpd; 435} 436 437static int iwch_dereg_mr(struct ib_mr *ib_mr) 438{ 439 struct iwch_dev *rhp; 440 struct iwch_mr *mhp; 441 u32 mmid; 442 443 PDBG("%s ib_mr %p\n", __func__, ib_mr); 444 /* There can be no memory windows */ 445 if (atomic_read(&ib_mr->usecnt)) 446 return -EINVAL; 447 448 mhp = to_iwch_mr(ib_mr); 449 rhp = mhp->rhp; 450 mmid = mhp->attr.stag >> 8; 451 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 452 mhp->attr.pbl_addr); 453 iwch_free_pbl(mhp); 454 remove_handle(rhp, &rhp->mmidr, mmid); 455 if (mhp->kva) 456 kfree((void *) (unsigned long) mhp->kva); 457 if (mhp->umem) 458 ib_umem_release(mhp->umem); 459 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp); 460 kfree(mhp); 461 return 0; 462} 463 464static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd, 465 struct ib_phys_buf *buffer_list, 466 int num_phys_buf, 467 int acc, 468 u64 *iova_start) 469{ 470 __be64 *page_list; 471 int shift; 472 u64 total_size; 473 int npages; 474 struct iwch_dev *rhp; 475 struct iwch_pd *php; 476 struct iwch_mr *mhp; 477 int ret; 478 479 PDBG("%s ib_pd %p\n", __func__, pd); 480 php = to_iwch_pd(pd); 481 rhp = php->rhp; 482 483 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 484 if (!mhp) 485 return ERR_PTR(-ENOMEM); 486 487 mhp->rhp = rhp; 488 489 /* First check that we have enough alignment */ 490 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) { 491 ret = -EINVAL; 492 goto err; 493 } 494 495 if (num_phys_buf > 1 && 496 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) { 497 ret = -EINVAL; 498 goto err; 499 } 500 501 ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start, 502 &total_size, &npages, &shift, &page_list); 503 if (ret) 504 goto err; 505 506 ret = iwch_alloc_pbl(mhp, npages); 507 if (ret) { 508 kfree(page_list); 509 goto err_pbl; 510 } 511 512 ret = iwch_write_pbl(mhp, page_list, npages, 0); 513 kfree(page_list); 514 if (ret) 515 goto err_pbl; 516 517 mhp->attr.pdid = php->pdid; 518 mhp->attr.zbva = 0; 519 520 mhp->attr.perms = iwch_ib_to_tpt_access(acc); 521 mhp->attr.va_fbo = *iova_start; 522 mhp->attr.page_size = shift - 12; 523 524 mhp->attr.len = (u32) total_size; 525 mhp->attr.pbl_size = npages; 526 ret = iwch_register_mem(rhp, php, mhp, shift); 527 if (ret) 528 goto err_pbl; 529 530 return &mhp->ibmr; 531 532err_pbl: 533 iwch_free_pbl(mhp); 534 535err: 536 kfree(mhp); 537 return ERR_PTR(ret); 538 539} 540 541static int iwch_reregister_phys_mem(struct ib_mr *mr, 542 int mr_rereg_mask, 543 struct ib_pd *pd, 544 struct ib_phys_buf *buffer_list, 545 int num_phys_buf, 546 int acc, u64 * iova_start) 547{ 548 549 struct iwch_mr mh, *mhp; 550 struct iwch_pd *php; 551 struct iwch_dev *rhp; 552 __be64 *page_list = NULL; 553 int shift = 0; 554 u64 total_size; 555 int npages; 556 int ret; 557 558 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd); 559 560 /* There can be no memory windows */ 561 if (atomic_read(&mr->usecnt)) 562 return -EINVAL; 563 564 mhp = to_iwch_mr(mr); 565 rhp = mhp->rhp; 566 php = to_iwch_pd(mr->pd); 567 568 /* make sure we are on the same adapter */ 569 if (rhp != php->rhp) 570 return -EINVAL; 571 572 memcpy(&mh, mhp, sizeof *mhp); 573 574 if (mr_rereg_mask & IB_MR_REREG_PD) 575 php = to_iwch_pd(pd); 576 if (mr_rereg_mask & IB_MR_REREG_ACCESS) 577 mh.attr.perms = iwch_ib_to_tpt_access(acc); 578 if (mr_rereg_mask & IB_MR_REREG_TRANS) { 579 ret = build_phys_page_list(buffer_list, num_phys_buf, 580 iova_start, 581 &total_size, &npages, 582 &shift, &page_list); 583 if (ret) 584 return ret; 585 } 586 587 ret = iwch_reregister_mem(rhp, php, &mh, shift, npages); 588 kfree(page_list); 589 if (ret) { 590 return ret; 591 } 592 if (mr_rereg_mask & IB_MR_REREG_PD) 593 mhp->attr.pdid = php->pdid; 594 if (mr_rereg_mask & IB_MR_REREG_ACCESS) 595 mhp->attr.perms = iwch_ib_to_tpt_access(acc); 596 if (mr_rereg_mask & IB_MR_REREG_TRANS) { 597 mhp->attr.zbva = 0; 598 mhp->attr.va_fbo = *iova_start; 599 mhp->attr.page_size = shift - 12; 600 mhp->attr.len = (u32) total_size; 601 mhp->attr.pbl_size = npages; 602 } 603 604 return 0; 605} 606 607 608static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 609 u64 virt, int acc, struct ib_udata *udata) 610{ 611 __be64 *pages; 612 int shift, n, len; 613 int i, j, k; 614 int err = 0; 615 struct ib_umem_chunk *chunk; 616 struct iwch_dev *rhp; 617 struct iwch_pd *php; 618 struct iwch_mr *mhp; 619 struct iwch_reg_user_mr_resp uresp; 620 621 PDBG("%s ib_pd %p\n", __func__, pd); 622 623 php = to_iwch_pd(pd); 624 rhp = php->rhp; 625 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 626 if (!mhp) 627 return ERR_PTR(-ENOMEM); 628 629 mhp->rhp = rhp; 630 631 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); 632 if (IS_ERR(mhp->umem)) { 633 err = PTR_ERR(mhp->umem); 634 kfree(mhp); 635 return ERR_PTR(err); 636 } 637 638 shift = ffs(mhp->umem->page_size) - 1; 639 640 n = 0; 641 list_for_each_entry(chunk, &mhp->umem->chunk_list, list) 642 n += chunk->nents; 643 644 err = iwch_alloc_pbl(mhp, n); 645 if (err) 646 goto err; 647 648 pages = (__be64 *) __get_free_page(GFP_KERNEL); 649 if (!pages) { 650 err = -ENOMEM; 651 goto err_pbl; 652 } 653 654 i = n = 0; 655 656 list_for_each_entry(chunk, &mhp->umem->chunk_list, list) 657 for (j = 0; j < chunk->nmap; ++j) { 658 len = sg_dma_len(&chunk->page_list[j]) >> shift; 659 for (k = 0; k < len; ++k) { 660 pages[i++] = cpu_to_be64(sg_dma_address( 661 &chunk->page_list[j]) + 662 mhp->umem->page_size * k); 663 if (i == PAGE_SIZE / sizeof *pages) { 664 err = iwch_write_pbl(mhp, pages, i, n); 665 if (err) 666 goto pbl_done; 667 n += i; 668 i = 0; 669 } 670 } 671 } 672 673 if (i) 674 err = iwch_write_pbl(mhp, pages, i, n); 675 676pbl_done: 677 free_page((unsigned long) pages); 678 if (err) 679 goto err_pbl; 680 681 mhp->attr.pdid = php->pdid; 682 mhp->attr.zbva = 0; 683 mhp->attr.perms = iwch_ib_to_tpt_access(acc); 684 mhp->attr.va_fbo = virt; 685 mhp->attr.page_size = shift - 12; 686 mhp->attr.len = (u32) length; 687 688 err = iwch_register_mem(rhp, php, mhp, shift); 689 if (err) 690 goto err_pbl; 691 692 if (udata && !t3a_device(rhp)) { 693 uresp.pbl_addr = (mhp->attr.pbl_addr - 694 rhp->rdev.rnic_info.pbl_base) >> 3; 695 PDBG("%s user resp pbl_addr 0x%x\n", __func__, 696 uresp.pbl_addr); 697 698 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { 699 iwch_dereg_mr(&mhp->ibmr); 700 err = -EFAULT; 701 goto err; 702 } 703 } 704 705 return &mhp->ibmr; 706 707err_pbl: 708 iwch_free_pbl(mhp); 709 710err: 711 ib_umem_release(mhp->umem); 712 kfree(mhp); 713 return ERR_PTR(err); 714} 715 716static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc) 717{ 718 struct ib_phys_buf bl; 719 u64 kva; 720 struct ib_mr *ibmr; 721 722 PDBG("%s ib_pd %p\n", __func__, pd); 723 724 /* 725 * T3 only supports 32 bits of size. 726 */ 727 bl.size = 0xffffffff; 728 bl.addr = 0; 729 kva = 0; 730 ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva); 731 return ibmr; 732} 733 734static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd) 735{ 736 struct iwch_dev *rhp; 737 struct iwch_pd *php; 738 struct iwch_mw *mhp; 739 u32 mmid; 740 u32 stag = 0; 741 int ret; 742 743 php = to_iwch_pd(pd); 744 rhp = php->rhp; 745 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 746 if (!mhp) 747 return ERR_PTR(-ENOMEM); 748 ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid); 749 if (ret) { 750 kfree(mhp); 751 return ERR_PTR(ret); 752 } 753 mhp->rhp = rhp; 754 mhp->attr.pdid = php->pdid; 755 mhp->attr.type = TPT_MW; 756 mhp->attr.stag = stag; 757 mmid = (stag) >> 8; 758 mhp->ibmw.rkey = stag; 759 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { 760 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag); 761 kfree(mhp); 762 return ERR_PTR(-ENOMEM); 763 } 764 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); 765 return &(mhp->ibmw); 766} 767 768static int iwch_dealloc_mw(struct ib_mw *mw) 769{ 770 struct iwch_dev *rhp; 771 struct iwch_mw *mhp; 772 u32 mmid; 773 774 mhp = to_iwch_mw(mw); 775 rhp = mhp->rhp; 776 mmid = (mw->rkey) >> 8; 777 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag); 778 remove_handle(rhp, &rhp->mmidr, mmid); 779 kfree(mhp); 780 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp); 781 return 0; 782} 783 784static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth) 785{ 786 struct iwch_dev *rhp; 787 struct iwch_pd *php; 788 struct iwch_mr *mhp; 789 u32 mmid; 790 u32 stag = 0; 791 int ret = 0; 792 793 php = to_iwch_pd(pd); 794 rhp = php->rhp; 795 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 796 if (!mhp) 797 goto err; 798 799 mhp->rhp = rhp; 800 ret = iwch_alloc_pbl(mhp, pbl_depth); 801 if (ret) 802 goto err1; 803 mhp->attr.pbl_size = pbl_depth; 804 ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid, 805 mhp->attr.pbl_size, mhp->attr.pbl_addr); 806 if (ret) 807 goto err2; 808 mhp->attr.pdid = php->pdid; 809 mhp->attr.type = TPT_NON_SHARED_MR; 810 mhp->attr.stag = stag; 811 mhp->attr.state = 1; 812 mmid = (stag) >> 8; 813 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 814 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) 815 goto err3; 816 817 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); 818 return &(mhp->ibmr); 819err3: 820 cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, 821 mhp->attr.pbl_addr); 822err2: 823 iwch_free_pbl(mhp); 824err1: 825 kfree(mhp); 826err: 827 return ERR_PTR(ret); 828} 829 830static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl( 831 struct ib_device *device, 832 int page_list_len) 833{ 834 struct ib_fast_reg_page_list *page_list; 835 836 page_list = kmalloc(sizeof *page_list + page_list_len * sizeof(u64), 837 GFP_KERNEL); 838 if (!page_list) 839 return ERR_PTR(-ENOMEM); 840 841 page_list->page_list = (u64 *)(page_list + 1); 842 page_list->max_page_list_len = page_list_len; 843 844 return page_list; 845} 846 847static void iwch_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list) 848{ 849 kfree(page_list); 850} 851 852static int iwch_destroy_qp(struct ib_qp *ib_qp) 853{ 854 struct iwch_dev *rhp; 855 struct iwch_qp *qhp; 856 struct iwch_qp_attributes attrs; 857 struct iwch_ucontext *ucontext; 858 859 qhp = to_iwch_qp(ib_qp); 860 rhp = qhp->rhp; 861 862 attrs.next_state = IWCH_QP_STATE_ERROR; 863 iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0); 864 wait_event(qhp->wait, !qhp->ep); 865 866 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid); 867 868 atomic_dec(&qhp->refcnt); 869 wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); 870 871 ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context) 872 : NULL; 873 cxio_destroy_qp(&rhp->rdev, &qhp->wq, 874 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 875 876 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__, 877 ib_qp, qhp->wq.qpid, qhp); 878 kfree(qhp); 879 return 0; 880} 881 882static struct ib_qp *iwch_create_qp(struct ib_pd *pd, 883 struct ib_qp_init_attr *attrs, 884 struct ib_udata *udata) 885{ 886 struct iwch_dev *rhp; 887 struct iwch_qp *qhp; 888 struct iwch_pd *php; 889 struct iwch_cq *schp; 890 struct iwch_cq *rchp; 891 struct iwch_create_qp_resp uresp; 892 int wqsize, sqsize, rqsize; 893 struct iwch_ucontext *ucontext; 894 895 PDBG("%s ib_pd %p\n", __func__, pd); 896 if (attrs->qp_type != IB_QPT_RC) 897 return ERR_PTR(-EINVAL); 898 php = to_iwch_pd(pd); 899 rhp = php->rhp; 900 schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid); 901 rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid); 902 if (!schp || !rchp) 903 return ERR_PTR(-EINVAL); 904 905 /* The RQT size must be # of entries + 1 rounded up to a power of two */ 906 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr); 907 if (rqsize == attrs->cap.max_recv_wr) 908 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1); 909 910 /* T3 doesn't support RQT depth < 16 */ 911 if (rqsize < 16) 912 rqsize = 16; 913 914 if (rqsize > T3_MAX_RQ_SIZE) 915 return ERR_PTR(-EINVAL); 916 917 if (attrs->cap.max_inline_data > T3_MAX_INLINE) 918 return ERR_PTR(-EINVAL); 919 920 /* 921 * NOTE: The SQ and total WQ sizes don't need to be 922 * a power of two. However, all the code assumes 923 * they are. EG: Q_FREECNT() and friends. 924 */ 925 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr); 926 wqsize = roundup_pow_of_two(rqsize + sqsize); 927 928 /* 929 * Kernel users need more wq space for fastreg WRs which can take 930 * 2 WR fragments. 931 */ 932 ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL; 933 if (!ucontext && wqsize < (rqsize + (2 * sqsize))) 934 wqsize = roundup_pow_of_two(rqsize + 935 roundup_pow_of_two(attrs->cap.max_send_wr * 2)); 936 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__, 937 wqsize, sqsize, rqsize); 938 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); 939 if (!qhp) 940 return ERR_PTR(-ENOMEM); 941 qhp->wq.size_log2 = ilog2(wqsize); 942 qhp->wq.rq_size_log2 = ilog2(rqsize); 943 qhp->wq.sq_size_log2 = ilog2(sqsize); 944 if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq, 945 ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) { 946 kfree(qhp); 947 return ERR_PTR(-ENOMEM); 948 } 949 950 attrs->cap.max_recv_wr = rqsize - 1; 951 attrs->cap.max_send_wr = sqsize; 952 attrs->cap.max_inline_data = T3_MAX_INLINE; 953 954 qhp->rhp = rhp; 955 qhp->attr.pd = php->pdid; 956 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid; 957 qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid; 958 qhp->attr.sq_num_entries = attrs->cap.max_send_wr; 959 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; 960 qhp->attr.sq_max_sges = attrs->cap.max_send_sge; 961 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; 962 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; 963 qhp->attr.state = IWCH_QP_STATE_IDLE; 964 qhp->attr.next_state = IWCH_QP_STATE_IDLE; 965 966 qhp->attr.enable_rdma_read = 1; 967 qhp->attr.enable_rdma_write = 1; 968 qhp->attr.enable_bind = 1; 969 qhp->attr.max_ord = 1; 970 qhp->attr.max_ird = 1; 971 972 spin_lock_init(&qhp->lock); 973 init_waitqueue_head(&qhp->wait); 974 atomic_set(&qhp->refcnt, 1); 975 976 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) { 977 cxio_destroy_qp(&rhp->rdev, &qhp->wq, 978 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 979 kfree(qhp); 980 return ERR_PTR(-ENOMEM); 981 } 982 983 if (udata) { 984 985 struct iwch_mm_entry *mm1, *mm2; 986 987 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); 988 if (!mm1) { 989 iwch_destroy_qp(&qhp->ibqp); 990 return ERR_PTR(-ENOMEM); 991 } 992 993 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); 994 if (!mm2) { 995 kfree(mm1); 996 iwch_destroy_qp(&qhp->ibqp); 997 return ERR_PTR(-ENOMEM); 998 } 999 1000 uresp.qpid = qhp->wq.qpid; 1001 uresp.size_log2 = qhp->wq.size_log2; 1002 uresp.sq_size_log2 = qhp->wq.sq_size_log2; 1003 uresp.rq_size_log2 = qhp->wq.rq_size_log2; 1004 spin_lock(&ucontext->mmap_lock); 1005 uresp.key = ucontext->key; 1006 ucontext->key += PAGE_SIZE; 1007 uresp.db_key = ucontext->key; 1008 ucontext->key += PAGE_SIZE; 1009 spin_unlock(&ucontext->mmap_lock); 1010 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { 1011 kfree(mm1); 1012 kfree(mm2); 1013 iwch_destroy_qp(&qhp->ibqp); 1014 return ERR_PTR(-EFAULT); 1015 } 1016 mm1->key = uresp.key; 1017 mm1->addr = virt_to_phys(qhp->wq.queue); 1018 mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr)); 1019 insert_mmap(ucontext, mm1); 1020 mm2->key = uresp.db_key; 1021 mm2->addr = qhp->wq.udb & PAGE_MASK; 1022 mm2->len = PAGE_SIZE; 1023 insert_mmap(ucontext, mm2); 1024 } 1025 qhp->ibqp.qp_num = qhp->wq.qpid; 1026 init_timer(&(qhp->timer)); 1027 PDBG("%s sq_num_entries %d, rq_num_entries %d " 1028 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n", 1029 __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, 1030 qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr, 1031 1 << qhp->wq.size_log2, qhp->wq.rq_addr); 1032 return &qhp->ibqp; 1033} 1034 1035static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1036 int attr_mask, struct ib_udata *udata) 1037{ 1038 struct iwch_dev *rhp; 1039 struct iwch_qp *qhp; 1040 enum iwch_qp_attr_mask mask = 0; 1041 struct iwch_qp_attributes attrs; 1042 1043 PDBG("%s ib_qp %p\n", __func__, ibqp); 1044 1045 /* iwarp does not support the RTR state */ 1046 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) 1047 attr_mask &= ~IB_QP_STATE; 1048 1049 /* Make sure we still have something left to do */ 1050 if (!attr_mask) 1051 return 0; 1052 1053 memset(&attrs, 0, sizeof attrs); 1054 qhp = to_iwch_qp(ibqp); 1055 rhp = qhp->rhp; 1056 1057 attrs.next_state = iwch_convert_state(attr->qp_state); 1058 attrs.enable_rdma_read = (attr->qp_access_flags & 1059 IB_ACCESS_REMOTE_READ) ? 1 : 0; 1060 attrs.enable_rdma_write = (attr->qp_access_flags & 1061 IB_ACCESS_REMOTE_WRITE) ? 1 : 0; 1062 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0; 1063 1064 1065 mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0; 1066 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ? 1067 (IWCH_QP_ATTR_ENABLE_RDMA_READ | 1068 IWCH_QP_ATTR_ENABLE_RDMA_WRITE | 1069 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0; 1070 1071 return iwch_modify_qp(rhp, qhp, mask, &attrs, 0); 1072} 1073 1074void iwch_qp_add_ref(struct ib_qp *qp) 1075{ 1076 PDBG("%s ib_qp %p\n", __func__, qp); 1077 atomic_inc(&(to_iwch_qp(qp)->refcnt)); 1078} 1079 1080void iwch_qp_rem_ref(struct ib_qp *qp) 1081{ 1082 PDBG("%s ib_qp %p\n", __func__, qp); 1083 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt))) 1084 wake_up(&(to_iwch_qp(qp)->wait)); 1085} 1086 1087static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn) 1088{ 1089 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn); 1090 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn); 1091} 1092 1093 1094static int iwch_query_pkey(struct ib_device *ibdev, 1095 u8 port, u16 index, u16 * pkey) 1096{ 1097 PDBG("%s ibdev %p\n", __func__, ibdev); 1098 *pkey = 0; 1099 return 0; 1100} 1101 1102static int iwch_query_gid(struct ib_device *ibdev, u8 port, 1103 int index, union ib_gid *gid) 1104{ 1105 struct iwch_dev *dev; 1106 1107 PDBG("%s ibdev %p, port %d, index %d, gid %p\n", 1108 __func__, ibdev, port, index, gid); 1109 dev = to_iwch_dev(ibdev); 1110 BUG_ON(port == 0 || port > 2); 1111 memset(&(gid->raw[0]), 0, sizeof(gid->raw)); 1112 memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6); 1113 return 0; 1114} 1115 1116static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev) 1117{ 1118 struct ethtool_drvinfo info; 1119 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; 1120 char *cp, *next; 1121 unsigned fw_maj, fw_min, fw_mic; 1122 1123 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1124 1125 next = info.fw_version + 1; 1126 cp = strsep(&next, "."); 1127 sscanf(cp, "%i", &fw_maj); 1128 cp = strsep(&next, "."); 1129 sscanf(cp, "%i", &fw_min); 1130 cp = strsep(&next, "."); 1131 sscanf(cp, "%i", &fw_mic); 1132 1133 return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) | 1134 (fw_mic & 0xffff); 1135} 1136 1137static int iwch_query_device(struct ib_device *ibdev, 1138 struct ib_device_attr *props) 1139{ 1140 1141 struct iwch_dev *dev; 1142 PDBG("%s ibdev %p\n", __func__, ibdev); 1143 1144 dev = to_iwch_dev(ibdev); 1145 memset(props, 0, sizeof *props); 1146 memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); 1147 props->hw_ver = dev->rdev.t3cdev_p->type; 1148 props->fw_ver = fw_vers_string_to_u64(dev); 1149 props->device_cap_flags = dev->device_cap_flags; 1150 props->page_size_cap = dev->attr.mem_pgsizes_bitmask; 1151 props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor; 1152 props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device; 1153 props->max_mr_size = dev->attr.max_mr_size; 1154 props->max_qp = dev->attr.max_qps; 1155 props->max_qp_wr = dev->attr.max_wrs; 1156 props->max_sge = dev->attr.max_sge_per_wr; 1157 props->max_sge_rd = 1; 1158 props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp; 1159 props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp; 1160 props->max_cq = dev->attr.max_cqs; 1161 props->max_cqe = dev->attr.max_cqes_per_cq; 1162 props->max_mr = dev->attr.max_mem_regs; 1163 props->max_pd = dev->attr.max_pds; 1164 props->local_ca_ack_delay = 0; 1165 props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH; 1166 1167 return 0; 1168} 1169 1170static int iwch_query_port(struct ib_device *ibdev, 1171 u8 port, struct ib_port_attr *props) 1172{ 1173 struct iwch_dev *dev; 1174 struct net_device *netdev; 1175 struct in_device *inetdev; 1176 1177 PDBG("%s ibdev %p\n", __func__, ibdev); 1178 1179 dev = to_iwch_dev(ibdev); 1180 netdev = dev->rdev.port_info.lldevs[port-1]; 1181 1182 memset(props, 0, sizeof(struct ib_port_attr)); 1183 props->max_mtu = IB_MTU_4096; 1184 if (netdev->mtu >= 4096) 1185 props->active_mtu = IB_MTU_4096; 1186 else if (netdev->mtu >= 2048) 1187 props->active_mtu = IB_MTU_2048; 1188 else if (netdev->mtu >= 1024) 1189 props->active_mtu = IB_MTU_1024; 1190 else if (netdev->mtu >= 512) 1191 props->active_mtu = IB_MTU_512; 1192 else 1193 props->active_mtu = IB_MTU_256; 1194 1195 if (!netif_carrier_ok(netdev)) 1196 props->state = IB_PORT_DOWN; 1197 else { 1198 inetdev = in_dev_get(netdev); 1199 if (inetdev) { 1200 if (inetdev->ifa_list) 1201 props->state = IB_PORT_ACTIVE; 1202 else 1203 props->state = IB_PORT_INIT; 1204 in_dev_put(inetdev); 1205 } else 1206 props->state = IB_PORT_INIT; 1207 } 1208 1209 props->port_cap_flags = 1210 IB_PORT_CM_SUP | 1211 IB_PORT_SNMP_TUNNEL_SUP | 1212 IB_PORT_REINIT_SUP | 1213 IB_PORT_DEVICE_MGMT_SUP | 1214 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; 1215 props->gid_tbl_len = 1; 1216 props->pkey_tbl_len = 1; 1217 props->active_width = 2; 1218 props->active_speed = 2; 1219 props->max_msg_sz = -1; 1220 1221 return 0; 1222} 1223 1224static ssize_t show_rev(struct device *dev, struct device_attribute *attr, 1225 char *buf) 1226{ 1227 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev, 1228 ibdev.dev); 1229 PDBG("%s dev 0x%p\n", __func__, dev); 1230 return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type); 1231} 1232 1233static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf) 1234{ 1235 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev, 1236 ibdev.dev); 1237 struct ethtool_drvinfo info; 1238 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; 1239 1240 PDBG("%s dev 0x%p\n", __func__, dev); 1241 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1242 return sprintf(buf, "%s\n", info.fw_version); 1243} 1244 1245static ssize_t show_hca(struct device *dev, struct device_attribute *attr, 1246 char *buf) 1247{ 1248 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev, 1249 ibdev.dev); 1250 struct ethtool_drvinfo info; 1251 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; 1252 1253 PDBG("%s dev 0x%p\n", __func__, dev); 1254 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1255 return sprintf(buf, "%s\n", info.driver); 1256} 1257 1258static ssize_t show_board(struct device *dev, struct device_attribute *attr, 1259 char *buf) 1260{ 1261 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev, 1262 ibdev.dev); 1263 PDBG("%s dev 0x%p\n", __func__, dev); 1264 return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor, 1265 iwch_dev->rdev.rnic_info.pdev->device); 1266} 1267 1268static int iwch_get_mib(struct ib_device *ibdev, 1269 union rdma_protocol_stats *stats) 1270{ 1271 struct iwch_dev *dev; 1272 struct tp_mib_stats m; 1273 int ret; 1274 1275 PDBG("%s ibdev %p\n", __func__, ibdev); 1276 dev = to_iwch_dev(ibdev); 1277 ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m); 1278 if (ret) 1279 return -ENOSYS; 1280 1281 memset(stats, 0, sizeof *stats); 1282 stats->iw.ipInReceives = ((u64) m.ipInReceive_hi << 32) + 1283 m.ipInReceive_lo; 1284 stats->iw.ipInHdrErrors = ((u64) m.ipInHdrErrors_hi << 32) + 1285 m.ipInHdrErrors_lo; 1286 stats->iw.ipInAddrErrors = ((u64) m.ipInAddrErrors_hi << 32) + 1287 m.ipInAddrErrors_lo; 1288 stats->iw.ipInUnknownProtos = ((u64) m.ipInUnknownProtos_hi << 32) + 1289 m.ipInUnknownProtos_lo; 1290 stats->iw.ipInDiscards = ((u64) m.ipInDiscards_hi << 32) + 1291 m.ipInDiscards_lo; 1292 stats->iw.ipInDelivers = ((u64) m.ipInDelivers_hi << 32) + 1293 m.ipInDelivers_lo; 1294 stats->iw.ipOutRequests = ((u64) m.ipOutRequests_hi << 32) + 1295 m.ipOutRequests_lo; 1296 stats->iw.ipOutDiscards = ((u64) m.ipOutDiscards_hi << 32) + 1297 m.ipOutDiscards_lo; 1298 stats->iw.ipOutNoRoutes = ((u64) m.ipOutNoRoutes_hi << 32) + 1299 m.ipOutNoRoutes_lo; 1300 stats->iw.ipReasmTimeout = (u64) m.ipReasmTimeout; 1301 stats->iw.ipReasmReqds = (u64) m.ipReasmReqds; 1302 stats->iw.ipReasmOKs = (u64) m.ipReasmOKs; 1303 stats->iw.ipReasmFails = (u64) m.ipReasmFails; 1304 stats->iw.tcpActiveOpens = (u64) m.tcpActiveOpens; 1305 stats->iw.tcpPassiveOpens = (u64) m.tcpPassiveOpens; 1306 stats->iw.tcpAttemptFails = (u64) m.tcpAttemptFails; 1307 stats->iw.tcpEstabResets = (u64) m.tcpEstabResets; 1308 stats->iw.tcpOutRsts = (u64) m.tcpOutRsts; 1309 stats->iw.tcpCurrEstab = (u64) m.tcpCurrEstab; 1310 stats->iw.tcpInSegs = ((u64) m.tcpInSegs_hi << 32) + 1311 m.tcpInSegs_lo; 1312 stats->iw.tcpOutSegs = ((u64) m.tcpOutSegs_hi << 32) + 1313 m.tcpOutSegs_lo; 1314 stats->iw.tcpRetransSegs = ((u64) m.tcpRetransSeg_hi << 32) + 1315 m.tcpRetransSeg_lo; 1316 stats->iw.tcpInErrs = ((u64) m.tcpInErrs_hi << 32) + 1317 m.tcpInErrs_lo; 1318 stats->iw.tcpRtoMin = (u64) m.tcpRtoMin; 1319 stats->iw.tcpRtoMax = (u64) m.tcpRtoMax; 1320 return 0; 1321} 1322 1323static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 1324static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 1325static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 1326static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 1327 1328static struct device_attribute *iwch_class_attributes[] = { 1329 &dev_attr_hw_rev, 1330 &dev_attr_fw_ver, 1331 &dev_attr_hca_type, 1332 &dev_attr_board_id, 1333}; 1334 1335int iwch_register_device(struct iwch_dev *dev) 1336{ 1337 int ret; 1338 int i; 1339 1340 PDBG("%s iwch_dev %p\n", __func__, dev); 1341 strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX); 1342 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); 1343 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); 1344 dev->ibdev.owner = THIS_MODULE; 1345 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | 1346 IB_DEVICE_MEM_WINDOW | 1347 IB_DEVICE_MEM_MGT_EXTENSIONS; 1348 1349 /* cxgb3 supports STag 0. */ 1350 dev->ibdev.local_dma_lkey = 0; 1351 1352 dev->ibdev.uverbs_cmd_mask = 1353 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 1354 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 1355 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 1356 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 1357 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 1358 (1ull << IB_USER_VERBS_CMD_REG_MR) | 1359 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 1360 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 1361 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 1362 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 1363 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | 1364 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 1365 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 1366 (1ull << IB_USER_VERBS_CMD_POLL_CQ) | 1367 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 1368 (1ull << IB_USER_VERBS_CMD_POST_SEND) | 1369 (1ull << IB_USER_VERBS_CMD_POST_RECV); 1370 dev->ibdev.node_type = RDMA_NODE_RNIC; 1371 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC)); 1372 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports; 1373 dev->ibdev.num_comp_vectors = 1; 1374 dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); 1375 dev->ibdev.query_device = iwch_query_device; 1376 dev->ibdev.query_port = iwch_query_port; 1377 dev->ibdev.modify_port = iwch_modify_port; 1378 dev->ibdev.query_pkey = iwch_query_pkey; 1379 dev->ibdev.query_gid = iwch_query_gid; 1380 dev->ibdev.alloc_ucontext = iwch_alloc_ucontext; 1381 dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext; 1382 dev->ibdev.mmap = iwch_mmap; 1383 dev->ibdev.alloc_pd = iwch_allocate_pd; 1384 dev->ibdev.dealloc_pd = iwch_deallocate_pd; 1385 dev->ibdev.create_ah = iwch_ah_create; 1386 dev->ibdev.destroy_ah = iwch_ah_destroy; 1387 dev->ibdev.create_qp = iwch_create_qp; 1388 dev->ibdev.modify_qp = iwch_ib_modify_qp; 1389 dev->ibdev.destroy_qp = iwch_destroy_qp; 1390 dev->ibdev.create_cq = iwch_create_cq; 1391 dev->ibdev.destroy_cq = iwch_destroy_cq; 1392 dev->ibdev.resize_cq = iwch_resize_cq; 1393 dev->ibdev.poll_cq = iwch_poll_cq; 1394 dev->ibdev.get_dma_mr = iwch_get_dma_mr; 1395 dev->ibdev.reg_phys_mr = iwch_register_phys_mem; 1396 dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem; 1397 dev->ibdev.reg_user_mr = iwch_reg_user_mr; 1398 dev->ibdev.dereg_mr = iwch_dereg_mr; 1399 dev->ibdev.alloc_mw = iwch_alloc_mw; 1400 dev->ibdev.bind_mw = iwch_bind_mw; 1401 dev->ibdev.dealloc_mw = iwch_dealloc_mw; 1402 dev->ibdev.alloc_fast_reg_mr = iwch_alloc_fast_reg_mr; 1403 dev->ibdev.alloc_fast_reg_page_list = iwch_alloc_fastreg_pbl; 1404 dev->ibdev.free_fast_reg_page_list = iwch_free_fastreg_pbl; 1405 dev->ibdev.attach_mcast = iwch_multicast_attach; 1406 dev->ibdev.detach_mcast = iwch_multicast_detach; 1407 dev->ibdev.process_mad = iwch_process_mad; 1408 dev->ibdev.req_notify_cq = iwch_arm_cq; 1409 dev->ibdev.post_send = iwch_post_send; 1410 dev->ibdev.post_recv = iwch_post_receive; 1411 dev->ibdev.get_protocol_stats = iwch_get_mib; 1412 1413 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); 1414 if (!dev->ibdev.iwcm) 1415 return -ENOMEM; 1416 1417 dev->ibdev.iwcm->connect = iwch_connect; 1418 dev->ibdev.iwcm->accept = iwch_accept_cr; 1419 dev->ibdev.iwcm->reject = iwch_reject_cr; 1420 dev->ibdev.iwcm->create_listen = iwch_create_listen; 1421 dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen; 1422 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref; 1423 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; 1424 dev->ibdev.iwcm->get_qp = iwch_get_qp; 1425 1426 ret = ib_register_device(&dev->ibdev, NULL); 1427 if (ret) 1428 goto bail1; 1429 1430 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) { 1431 ret = device_create_file(&dev->ibdev.dev, 1432 iwch_class_attributes[i]); 1433 if (ret) { 1434 goto bail2; 1435 } 1436 } 1437 return 0; 1438bail2: 1439 ib_unregister_device(&dev->ibdev); 1440bail1: 1441 kfree(dev->ibdev.iwcm); 1442 return ret; 1443} 1444 1445void iwch_unregister_device(struct iwch_dev *dev) 1446{ 1447 int i; 1448 1449 PDBG("%s iwch_dev %p\n", __func__, dev); 1450 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) 1451 device_remove_file(&dev->ibdev.dev, 1452 iwch_class_attributes[i]); 1453 ib_unregister_device(&dev->ibdev); 1454 kfree(dev->ibdev.iwcm); 1455 return; 1456} 1457