mem.c revision 331769
1/* 2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/iw_cxgbe/mem.c 331769 2018-03-30 18:06:29Z hselasky $"); 34 35#include "opt_inet.h" 36 37#ifdef TCP_OFFLOAD 38#include <linux/types.h> 39#include <linux/kref.h> 40#include <rdma/ib_umem.h> 41#include <asm/atomic.h> 42 43#include <common/t4_msg.h> 44#include "iw_cxgbe.h" 45 46int use_dsgl = 1; 47#define T4_ULPTX_MIN_IO 32 48#define C4IW_MAX_INLINE_SIZE 96 49 50static int 51mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length) 52{ 53 54 return (is_t5(dev->rdev.adap) && length >= 8*1024*1024*1024ULL); 55} 56 57static int 58write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) 59{ 60 struct adapter *sc = rdev->adap; 61 struct ulp_mem_io *ulpmc; 62 struct ulptx_idata *ulpsc; 63 u8 wr_len, *to_dp, *from_dp; 64 int copy_len, num_wqe, i, ret = 0; 65 struct c4iw_wr_wait wr_wait; 66 struct wrqe *wr; 67 u32 cmd; 68 69 cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 70 71 cmd |= cpu_to_be32(F_T5_ULP_MEMIO_IMM); 72 73 addr &= 0x7FFFFFF; 74 CTR3(KTR_IW_CXGBE, "%s addr 0x%x len %u", __func__, addr, len); 75 num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE); 76 c4iw_init_wr_wait(&wr_wait); 77 for (i = 0; i < num_wqe; i++) { 78 79 copy_len = min(len, C4IW_MAX_INLINE_SIZE); 80 wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc + 81 roundup(copy_len, T4_ULPTX_MIN_IO), 16); 82 83 wr = alloc_wrqe(wr_len, &sc->sge.mgmtq); 84 if (wr == NULL) 85 return (0); 86 ulpmc = wrtod(wr); 87 88 memset(ulpmc, 0, wr_len); 89 INIT_ULPTX_WR(ulpmc, wr_len, 0, 0); 90 91 if (i == (num_wqe-1)) { 92 ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) | 93 F_FW_WR_COMPL); 94 ulpmc->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait; 95 } else 96 ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR)); 97 ulpmc->wr.wr_mid = cpu_to_be32( 98 V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); 99 100 ulpmc->cmd = cmd; 101 ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN( 102 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO))); 103 ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr), 104 16)); 105 ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3)); 106 107 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 108 ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 109 ulpsc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO)); 110 111 to_dp = (u8 *)(ulpsc + 1); 112 from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE; 113 if (data) 114 memcpy(to_dp, from_dp, copy_len); 115 else 116 memset(to_dp, 0, copy_len); 117 if (copy_len % T4_ULPTX_MIN_IO) 118 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO - 119 (copy_len % T4_ULPTX_MIN_IO)); 120 t4_wrq_tx(sc, wr); 121 len -= C4IW_MAX_INLINE_SIZE; 122 } 123 124 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__); 125 return ret; 126} 127 128/* 129 * Build and write a TPT entry. 130 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size, 131 * pbl_size and pbl_addr 132 * OUT: stag index 133 */ 134static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, 135 u32 *stag, u8 stag_state, u32 pdid, 136 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm, 137 int bind_enabled, u32 zbva, u64 to, 138 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr) 139{ 140 int err; 141 struct fw_ri_tpte tpt; 142 u32 stag_idx; 143 static atomic_t key; 144 145 if (c4iw_fatal_error(rdev)) 146 return -EIO; 147 148 stag_state = stag_state > 0; 149 stag_idx = (*stag) >> 8; 150 151 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { 152 stag_idx = c4iw_get_resource(&rdev->resource.tpt_table); 153 if (!stag_idx) { 154 mutex_lock(&rdev->stats.lock); 155 rdev->stats.stag.fail++; 156 mutex_unlock(&rdev->stats.lock); 157 return -ENOMEM; 158 } 159 mutex_lock(&rdev->stats.lock); 160 rdev->stats.stag.cur += 32; 161 if (rdev->stats.stag.cur > rdev->stats.stag.max) 162 rdev->stats.stag.max = rdev->stats.stag.cur; 163 mutex_unlock(&rdev->stats.lock); 164 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); 165 } 166 CTR5(KTR_IW_CXGBE, 167 "%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x", 168 __func__, stag_state, type, pdid, stag_idx); 169 170 /* write TPT entry */ 171 if (reset_tpt_entry) 172 memset(&tpt, 0, sizeof(tpt)); 173 else { 174 tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID | 175 V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) | 176 V_FW_RI_TPTE_STAGSTATE(stag_state) | 177 V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid)); 178 tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) | 179 (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) | 180 V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO : 181 FW_RI_VA_BASED_TO))| 182 V_FW_RI_TPTE_PS(page_size)); 183 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( 184 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3)); 185 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); 186 tpt.va_hi = cpu_to_be32((u32)(to >> 32)); 187 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); 188 tpt.dca_mwbcnt_pstag = cpu_to_be32(0); 189 tpt.len_hi = cpu_to_be32((u32)(len >> 32)); 190 } 191 err = write_adapter_mem(rdev, stag_idx + 192 (rdev->adap->vres.stag.start >> 5), 193 sizeof(tpt), &tpt); 194 195 if (reset_tpt_entry) { 196 c4iw_put_resource(&rdev->resource.tpt_table, stag_idx); 197 mutex_lock(&rdev->stats.lock); 198 rdev->stats.stag.cur -= 32; 199 mutex_unlock(&rdev->stats.lock); 200 } 201 return err; 202} 203 204static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl, 205 u32 pbl_addr, u32 pbl_size) 206{ 207 int err; 208 209 CTR4(KTR_IW_CXGBE, "%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d", 210 __func__, pbl_addr, rdev->adap->vres.pbl.start, pbl_size); 211 212 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl); 213 return err; 214} 215 216static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size, 217 u32 pbl_addr) 218{ 219 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 220 pbl_size, pbl_addr); 221} 222 223static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid) 224{ 225 *stag = T4_STAG_UNSET; 226 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0, 227 0UL, 0, 0, 0, 0); 228} 229 230static int deallocate_window(struct c4iw_rdev *rdev, u32 stag) 231{ 232 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0, 233 0); 234} 235 236static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid, 237 u32 pbl_size, u32 pbl_addr) 238{ 239 *stag = T4_STAG_UNSET; 240 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0, 241 0UL, 0, 0, pbl_size, pbl_addr); 242} 243 244static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag) 245{ 246 u32 mmid; 247 248 mhp->attr.state = 1; 249 mhp->attr.stag = stag; 250 mmid = stag >> 8; 251 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 252 CTR3(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p", __func__, mmid, mhp); 253 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid); 254} 255 256static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, 257 struct c4iw_mr *mhp, int shift) 258{ 259 u32 stag = T4_STAG_UNSET; 260 int ret; 261 262 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, 263 FW_RI_STAG_NSMR, mhp->attr.len ? mhp->attr.perms : 0, 264 mhp->attr.mw_bind_enable, mhp->attr.zbva, 265 mhp->attr.va_fbo, mhp->attr.len ? mhp->attr.len : -1, shift - 12, 266 mhp->attr.pbl_size, mhp->attr.pbl_addr); 267 if (ret) 268 return ret; 269 270 ret = finish_mem_reg(mhp, stag); 271 if (ret) 272 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 273 mhp->attr.pbl_addr); 274 return ret; 275} 276 277static int alloc_pbl(struct c4iw_mr *mhp, int npages) 278{ 279 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev, 280 npages << 3); 281 282 if (!mhp->attr.pbl_addr) 283 return -ENOMEM; 284 285 mhp->attr.pbl_size = npages; 286 287 return 0; 288} 289 290struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) 291{ 292 struct c4iw_dev *rhp; 293 struct c4iw_pd *php; 294 struct c4iw_mr *mhp; 295 int ret; 296 u32 stag = T4_STAG_UNSET; 297 298 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd); 299 php = to_c4iw_pd(pd); 300 rhp = php->rhp; 301 302 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 303 if (!mhp) 304 return ERR_PTR(-ENOMEM); 305 306 mhp->rhp = rhp; 307 mhp->attr.pdid = php->pdid; 308 mhp->attr.perms = c4iw_ib_to_tpt_access(acc); 309 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND; 310 mhp->attr.zbva = 0; 311 mhp->attr.va_fbo = 0; 312 mhp->attr.page_size = 0; 313 mhp->attr.len = ~0ULL; 314 mhp->attr.pbl_size = 0; 315 316 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, 317 FW_RI_STAG_NSMR, mhp->attr.perms, 318 mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0); 319 if (ret) 320 goto err1; 321 322 ret = finish_mem_reg(mhp, stag); 323 if (ret) 324 goto err2; 325 return &mhp->ibmr; 326err2: 327 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 328 mhp->attr.pbl_addr); 329err1: 330 kfree(mhp); 331 return ERR_PTR(ret); 332} 333 334struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 335 u64 virt, int acc, struct ib_udata *udata) 336{ 337 __be64 *pages; 338 int shift, n, len; 339 int i, k, entry; 340 int err = 0; 341 struct scatterlist *sg; 342 struct c4iw_dev *rhp; 343 struct c4iw_pd *php; 344 struct c4iw_mr *mhp; 345 346 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd); 347 348 if (length == ~0ULL) 349 return ERR_PTR(-EINVAL); 350 351 if ((length + start) < start) 352 return ERR_PTR(-EINVAL); 353 354 php = to_c4iw_pd(pd); 355 rhp = php->rhp; 356 357 if (mr_exceeds_hw_limits(rhp, length)) 358 return ERR_PTR(-EINVAL); 359 360 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 361 if (!mhp) 362 return ERR_PTR(-ENOMEM); 363 364 mhp->rhp = rhp; 365 366 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); 367 if (IS_ERR(mhp->umem)) { 368 err = PTR_ERR(mhp->umem); 369 kfree(mhp); 370 return ERR_PTR(err); 371 } 372 373 shift = ffs(mhp->umem->page_size) - 1; 374 375 n = mhp->umem->nmap; 376 err = alloc_pbl(mhp, n); 377 if (err) 378 goto err; 379 380 pages = (__be64 *) __get_free_page(GFP_KERNEL); 381 if (!pages) { 382 err = -ENOMEM; 383 goto err_pbl; 384 } 385 386 i = n = 0; 387 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { 388 len = sg_dma_len(sg) >> shift; 389 for (k = 0; k < len; ++k) { 390 pages[i++] = cpu_to_be64(sg_dma_address(sg) + 391 mhp->umem->page_size * k); 392 if (i == PAGE_SIZE / sizeof *pages) { 393 err = write_pbl(&mhp->rhp->rdev, 394 pages, 395 mhp->attr.pbl_addr + (n << 3), i); 396 if (err) 397 goto pbl_done; 398 n += i; 399 i = 0; 400 401 } 402 } 403 } 404 405 if (i) 406 err = write_pbl(&mhp->rhp->rdev, pages, 407 mhp->attr.pbl_addr + (n << 3), i); 408 409pbl_done: 410 free_page((unsigned long) pages); 411 if (err) 412 goto err_pbl; 413 414 mhp->attr.pdid = php->pdid; 415 mhp->attr.zbva = 0; 416 mhp->attr.perms = c4iw_ib_to_tpt_access(acc); 417 mhp->attr.va_fbo = virt; 418 mhp->attr.page_size = shift - 12; 419 mhp->attr.len = length; 420 421 err = register_mem(rhp, php, mhp, shift); 422 if (err) 423 goto err_pbl; 424 425 return &mhp->ibmr; 426 427err_pbl: 428 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, 429 mhp->attr.pbl_size << 3); 430 431err: 432 ib_umem_release(mhp->umem); 433 kfree(mhp); 434 return ERR_PTR(err); 435} 436 437struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, 438 struct ib_udata *udata) 439{ 440 struct c4iw_dev *rhp; 441 struct c4iw_pd *php; 442 struct c4iw_mw *mhp; 443 u32 mmid; 444 u32 stag = 0; 445 int ret; 446 447 if (type != IB_MW_TYPE_1) 448 return ERR_PTR(-EINVAL); 449 450 php = to_c4iw_pd(pd); 451 rhp = php->rhp; 452 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 453 if (!mhp) 454 return ERR_PTR(-ENOMEM); 455 ret = allocate_window(&rhp->rdev, &stag, php->pdid); 456 if (ret) { 457 kfree(mhp); 458 return ERR_PTR(ret); 459 } 460 mhp->rhp = rhp; 461 mhp->attr.pdid = php->pdid; 462 mhp->attr.type = FW_RI_STAG_MW; 463 mhp->attr.stag = stag; 464 mmid = (stag) >> 8; 465 mhp->ibmw.rkey = stag; 466 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { 467 deallocate_window(&rhp->rdev, mhp->attr.stag); 468 kfree(mhp); 469 return ERR_PTR(-ENOMEM); 470 } 471 CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp, 472 stag); 473 return &(mhp->ibmw); 474} 475 476int c4iw_dealloc_mw(struct ib_mw *mw) 477{ 478 struct c4iw_dev *rhp; 479 struct c4iw_mw *mhp; 480 u32 mmid; 481 482 mhp = to_c4iw_mw(mw); 483 rhp = mhp->rhp; 484 mmid = (mw->rkey) >> 8; 485 remove_handle(rhp, &rhp->mmidr, mmid); 486 deallocate_window(&rhp->rdev, mhp->attr.stag); 487 kfree(mhp); 488 CTR4(KTR_IW_CXGBE, "%s ib_mw %p mmid 0x%x ptr %p", __func__, mw, mmid, 489 mhp); 490 return 0; 491} 492 493struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, 494 enum ib_mr_type mr_type, 495 u32 max_num_sg) 496{ 497 struct c4iw_dev *rhp; 498 struct c4iw_pd *php; 499 struct c4iw_mr *mhp; 500 u32 mmid; 501 u32 stag = 0; 502 int ret = 0; 503 int length = roundup(max_num_sg * sizeof(u64), 32); 504 505 php = to_c4iw_pd(pd); 506 rhp = php->rhp; 507 508 if (mr_type != IB_MR_TYPE_MEM_REG || 509 max_num_sg > t4_max_fr_depth( 510 rhp->rdev.adap->params.ulptx_memwrite_dsgl && use_dsgl)) 511 return ERR_PTR(-EINVAL); 512 513 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 514 if (!mhp) { 515 ret = -ENOMEM; 516 goto err; 517 } 518 519 mhp->mpl = dma_alloc_coherent(rhp->ibdev.dma_device, 520 length, &mhp->mpl_addr, GFP_KERNEL); 521 if (!mhp->mpl) { 522 ret = -ENOMEM; 523 goto err_mpl; 524 } 525 mhp->max_mpl_len = length; 526 527 mhp->rhp = rhp; 528 ret = alloc_pbl(mhp, max_num_sg); 529 if (ret) 530 goto err1; 531 mhp->attr.pbl_size = max_num_sg; 532 ret = allocate_stag(&rhp->rdev, &stag, php->pdid, 533 mhp->attr.pbl_size, mhp->attr.pbl_addr); 534 if (ret) 535 goto err2; 536 mhp->attr.pdid = php->pdid; 537 mhp->attr.type = FW_RI_STAG_NSMR; 538 mhp->attr.stag = stag; 539 mhp->attr.state = 0; 540 mmid = (stag) >> 8; 541 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 542 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { 543 ret = -ENOMEM; 544 goto err3; 545 } 546 547 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); 548 return &(mhp->ibmr); 549err3: 550 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, 551 mhp->attr.pbl_addr); 552err2: 553 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, 554 mhp->attr.pbl_size << 3); 555err1: 556 dma_free_coherent(rhp->ibdev.dma_device, 557 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr); 558err_mpl: 559 kfree(mhp); 560err: 561 return ERR_PTR(ret); 562} 563static int c4iw_set_page(struct ib_mr *ibmr, u64 addr) 564{ 565 struct c4iw_mr *mhp = to_c4iw_mr(ibmr); 566 567 if (unlikely(mhp->mpl_len == mhp->max_mpl_len)) 568 return -ENOMEM; 569 570 mhp->mpl[mhp->mpl_len++] = addr; 571 572 return 0; 573} 574 575int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, 576 int sg_nents, unsigned int *sg_offset) 577{ 578 struct c4iw_mr *mhp = to_c4iw_mr(ibmr); 579 580 mhp->mpl_len = 0; 581 582 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page); 583} 584 585 586int c4iw_dereg_mr(struct ib_mr *ib_mr) 587{ 588 struct c4iw_dev *rhp; 589 struct c4iw_mr *mhp; 590 u32 mmid; 591 592 CTR2(KTR_IW_CXGBE, "%s ib_mr %p", __func__, ib_mr); 593 594 mhp = to_c4iw_mr(ib_mr); 595 rhp = mhp->rhp; 596 mmid = mhp->attr.stag >> 8; 597 remove_handle(rhp, &rhp->mmidr, mmid); 598 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 599 mhp->attr.pbl_addr); 600 if (mhp->attr.pbl_size) 601 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, 602 mhp->attr.pbl_size << 3); 603 if (mhp->kva) 604 kfree((void *) (unsigned long) mhp->kva); 605 if (mhp->umem) 606 ib_umem_release(mhp->umem); 607 CTR3(KTR_IW_CXGBE, "%s mmid 0x%x ptr %p", __func__, mmid, mhp); 608 kfree(mhp); 609 return 0; 610} 611 612void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey) 613{ 614 struct c4iw_mr *mhp; 615 unsigned long flags; 616 617 spin_lock_irqsave(&rhp->lock, flags); 618 mhp = get_mhp(rhp, rkey >> 8); 619 if (mhp) 620 mhp->attr.state = 0; 621 spin_unlock_irqrestore(&rhp->lock, flags); 622} 623#endif 624