• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/sys/dev/cxgbe/iw_cxgbe/

Lines Matching defs:mhp

362 static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
366 mhp->attr.state = 1;
367 mhp->attr.stag = stag;
369 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
370 CTR3(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p", __func__, mmid, mhp);
371 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
375 struct c4iw_mr *mhp, int shift)
380 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
381 FW_RI_STAG_NSMR, mhp->attr.len ? mhp->attr.perms : 0,
382 mhp->attr.mw_bind_enable, mhp->attr.zbva,
383 mhp->attr.va_fbo, mhp->attr.len ? mhp->attr.len : -1, shift - 12,
384 mhp->attr.pbl_size, mhp->attr.pbl_addr);
388 ret = finish_mem_reg(mhp, stag);
390 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
391 mhp->attr.pbl_addr);
395 static int alloc_pbl(struct c4iw_mr *mhp, int npages)
397 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
400 if (!mhp->attr.pbl_addr)
403 mhp->attr.pbl_size = npages;
412 struct c4iw_mr *mhp;
420 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
421 if (!mhp)
424 mhp->rhp = rhp;
425 mhp->attr.pdid = php->pdid;
426 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
427 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
428 mhp->attr.zbva = 0;
429 mhp->attr.va_fbo = 0;
430 mhp->attr.page_size = 0;
431 mhp->attr.len = ~0ULL;
432 mhp->attr.pbl_size = 0;
435 FW_RI_STAG_NSMR, mhp->attr.perms,
436 mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0);
440 ret = finish_mem_reg(mhp, stag);
443 return &mhp->ibmr;
445 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
446 mhp->attr.pbl_addr);
448 kfree(mhp);
462 struct c4iw_mr *mhp;
478 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
479 if (!mhp)
482 mhp->rhp = rhp;
484 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
485 if (IS_ERR(mhp->umem)) {
486 err = PTR_ERR(mhp->umem);
487 kfree(mhp);
491 shift = ffs(mhp->umem->page_size) - 1;
493 n = mhp->umem->nmap;
494 err = alloc_pbl(mhp, n);
505 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
509 mhp->umem->page_size * k);
511 err = write_pbl(&mhp->rhp->rdev,
513 mhp->attr.pbl_addr + (n << 3), i);
524 err = write_pbl(&mhp->rhp->rdev, pages,
525 mhp->attr.pbl_addr + (n << 3), i);
532 mhp->attr.pdid = php->pdid;
533 mhp->attr.zbva = 0;
534 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
535 mhp->attr.va_fbo = virt;
536 mhp->attr.page_size = shift - 12;
537 mhp->attr.len = length;
539 err = register_mem(rhp, php, mhp, shift);
543 return &mhp->ibmr;
546 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
547 mhp->attr.pbl_size << 3);
550 ib_umem_release(mhp->umem);
551 kfree(mhp);
560 struct c4iw_mw *mhp;
570 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
571 if (!mhp)
575 kfree(mhp);
578 mhp->rhp = rhp;
579 mhp->attr.pdid = php->pdid;
580 mhp->attr.type = FW_RI_STAG_MW;
581 mhp->attr.stag = stag;
583 mhp->ibmw.rkey = stag;
584 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
585 deallocate_window(&rhp->rdev, mhp->attr.stag);
586 kfree(mhp);
589 CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp,
591 return &(mhp->ibmw);
597 struct c4iw_mw *mhp;
600 mhp = to_c4iw_mw(mw);
601 rhp = mhp->rhp;
604 deallocate_window(&rhp->rdev, mhp->attr.stag);
605 kfree(mhp);
607 mhp);
617 struct c4iw_mr *mhp;
631 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
632 if (!mhp) {
637 mhp->mpl = dma_alloc_coherent(rhp->ibdev.dma_device,
638 length, &mhp->mpl_addr, GFP_KERNEL);
639 if (!mhp->mpl) {
643 mhp->max_mpl_len = length;
645 mhp->rhp = rhp;
646 ret = alloc_pbl(mhp, max_num_sg);
649 mhp->attr.pbl_size = max_num_sg;
651 mhp->attr.pbl_size, mhp->attr.pbl_addr);
654 mhp->attr.pdid = php->pdid;
655 mhp->attr.type = FW_RI_STAG_NSMR;
656 mhp->attr.stag = stag;
657 mhp->attr.state = 0;
659 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
660 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
665 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
666 return &(mhp->ibmr);
668 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
669 mhp->attr.pbl_addr);
671 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
672 mhp->attr.pbl_size << 3);
675 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
677 kfree(mhp);
683 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
685 if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
688 mhp->mpl[mhp->mpl_len++] = addr;
696 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
698 mhp->mpl_len = 0;
707 struct c4iw_mr *mhp;
712 mhp = to_c4iw_mr(ib_mr);
713 rhp = mhp->rhp;
714 mmid = mhp->attr.stag >> 8;
716 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
717 mhp->attr.pbl_addr);
718 if (mhp->attr.pbl_size)
719 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
720 mhp->attr.pbl_size << 3);
721 if (mhp->kva)
722 kfree((void *) (unsigned long) mhp->kva);
723 if (mhp->umem)
724 ib_umem_release(mhp->umem);
725 CTR3(KTR_IW_CXGBE, "%s mmid 0x%x ptr %p", __func__, mmid, mhp);
726 kfree(mhp);
732 struct c4iw_mr *mhp;
736 mhp = get_mhp(rhp, rkey >> 8);
737 if (mhp)
738 mhp->attr.state = 0;