mem.c revision 346923
1/* 2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/iw_cxgbe/mem.c 346923 2019-04-29 20:10:28Z np $"); 34 35#include "opt_inet.h" 36 37#ifdef TCP_OFFLOAD 38#include <linux/types.h> 39#include <linux/kref.h> 40#include <rdma/ib_umem.h> 41#include <asm/atomic.h> 42 43#include <common/t4_msg.h> 44#include "iw_cxgbe.h" 45 46#define T4_ULPTX_MIN_IO 32 47#define C4IW_MAX_INLINE_SIZE 96 48#define T4_ULPTX_MAX_DMA 1024 49 50static int 51mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length) 52{ 53 54 return (is_t5(dev->rdev.adap) && length >= 8*1024*1024*1024ULL); 55} 56 57static int 58_c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, u32 len, 59 void *data, int wait) 60{ 61 struct adapter *sc = rdev->adap; 62 struct ulp_mem_io *ulpmc; 63 struct ulptx_sgl *sgl; 64 u8 wr_len; 65 int ret = 0; 66 struct c4iw_wr_wait wr_wait; 67 struct wrqe *wr; 68 69 addr &= 0x7FFFFFF; 70 71 if (wait) 72 c4iw_init_wr_wait(&wr_wait); 73 wr_len = roundup(sizeof *ulpmc + sizeof *sgl, 16); 74 75 wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]); 76 if (wr == NULL) 77 return -ENOMEM; 78 ulpmc = wrtod(wr); 79 80 memset(ulpmc, 0, wr_len); 81 INIT_ULPTX_WR(ulpmc, wr_len, 0, 0); 82 ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) | 83 (wait ? F_FW_WR_COMPL : 0)); 84 ulpmc->wr.wr_lo = wait ? (u64)(unsigned long)&wr_wait : 0; 85 ulpmc->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); 86 ulpmc->cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) | 87 V_T5_ULP_MEMIO_ORDER(1) | 88 V_T5_ULP_MEMIO_FID(sc->sge.ofld_rxq[0].iq.abs_id)); 89 ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(len>>5)); 90 ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr), 16)); 91 ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr)); 92 93 sgl = (struct ulptx_sgl *)(ulpmc + 1); 94 sgl->cmd_nsge = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 95 V_ULPTX_NSGE(1)); 96 sgl->len0 = cpu_to_be32(len); 97 sgl->addr0 = cpu_to_be64((u64)data); 98 99 t4_wrq_tx(sc, wr); 100 101 if (wait) 102 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__); 103 return ret; 104} 105 106 107static int 108_c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) 109{ 110 struct adapter *sc = rdev->adap; 111 struct ulp_mem_io *ulpmc; 112 struct ulptx_idata *ulpsc; 113 u8 wr_len, *to_dp, *from_dp; 114 int copy_len, num_wqe, i, ret = 0; 115 struct c4iw_wr_wait wr_wait; 116 struct wrqe *wr; 117 u32 cmd; 118 119 cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 120 121 cmd |= cpu_to_be32(F_T5_ULP_MEMIO_IMM); 122 123 addr &= 0x7FFFFFF; 124 CTR3(KTR_IW_CXGBE, "%s addr 0x%x len %u", __func__, addr, len); 125 num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE); 126 c4iw_init_wr_wait(&wr_wait); 127 for (i = 0; i < num_wqe; i++) { 128 129 copy_len = min(len, C4IW_MAX_INLINE_SIZE); 130 wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc + 131 roundup(copy_len, T4_ULPTX_MIN_IO), 16); 132 133 wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]); 134 if (wr == NULL) 135 return -ENOMEM; 136 ulpmc = wrtod(wr); 137 138 memset(ulpmc, 0, wr_len); 139 INIT_ULPTX_WR(ulpmc, wr_len, 0, 0); 140 141 if (i == (num_wqe-1)) { 142 ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) | 143 F_FW_WR_COMPL); 144 ulpmc->wr.wr_lo = 145 (__force __be64)(unsigned long) &wr_wait; 146 } else 147 ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR)); 148 ulpmc->wr.wr_mid = cpu_to_be32( 149 V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); 150 151 ulpmc->cmd = cmd; 152 ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN( 153 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO))); 154 ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr), 155 16)); 156 ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3)); 157 158 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 159 ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 160 ulpsc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO)); 161 162 to_dp = (u8 *)(ulpsc + 1); 163 from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE; 164 if (data) 165 memcpy(to_dp, from_dp, copy_len); 166 else 167 memset(to_dp, 0, copy_len); 168 if (copy_len % T4_ULPTX_MIN_IO) 169 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO - 170 (copy_len % T4_ULPTX_MIN_IO)); 171 t4_wrq_tx(sc, wr); 172 len -= C4IW_MAX_INLINE_SIZE; 173 } 174 175 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__); 176 return ret; 177} 178 179static int 180_c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) 181{ 182 struct c4iw_dev *rhp = rdev_to_c4iw_dev(rdev); 183 u32 remain = len; 184 u32 dmalen; 185 int ret = 0; 186 dma_addr_t daddr; 187 dma_addr_t save; 188 189 daddr = dma_map_single(rhp->ibdev.dma_device, data, len, DMA_TO_DEVICE); 190 if (dma_mapping_error(rhp->ibdev.dma_device, daddr)) 191 return -1; 192 save = daddr; 193 194 while (remain > inline_threshold) { 195 if (remain < T4_ULPTX_MAX_DMA) { 196 if (remain & ~T4_ULPTX_MIN_IO) 197 dmalen = remain & ~(T4_ULPTX_MIN_IO-1); 198 else 199 dmalen = remain; 200 } else 201 dmalen = T4_ULPTX_MAX_DMA; 202 remain -= dmalen; 203 ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, 204 (void *)daddr, !remain); 205 if (ret) 206 goto out; 207 addr += dmalen >> 5; 208 data = (u64 *)data + dmalen; 209 daddr = daddr + dmalen; 210 } 211 if (remain) 212 ret = _c4iw_write_mem_inline(rdev, addr, remain, data); 213out: 214 dma_unmap_single(rhp->ibdev.dma_device, save, len, DMA_TO_DEVICE); 215 return ret; 216} 217 218/* 219 * write len bytes of data into addr (32B aligned address) 220 * If data is NULL, clear len byte of memory to zero. 221 */ 222static int 223write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, 224 void *data) 225{ 226 if (rdev->adap->params.ulptx_memwrite_dsgl && use_dsgl) { 227 if (len > inline_threshold) { 228 if (_c4iw_write_mem_dma(rdev, addr, len, data)) { 229 log(LOG_ERR, "%s: dma map " 230 "failure (non fatal)\n", __func__); 231 return _c4iw_write_mem_inline(rdev, addr, len, 232 data); 233 } else 234 return 0; 235 } else 236 return _c4iw_write_mem_inline(rdev, addr, len, data); 237 } else 238 return _c4iw_write_mem_inline(rdev, addr, len, data); 239} 240 241 242/* 243 * Build and write a TPT entry. 244 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size, 245 * pbl_size and pbl_addr 246 * OUT: stag index 247 */ 248static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, 249 u32 *stag, u8 stag_state, u32 pdid, 250 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm, 251 int bind_enabled, u32 zbva, u64 to, 252 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr) 253{ 254 int err; 255 struct fw_ri_tpte tpt; 256 u32 stag_idx; 257 static atomic_t key; 258 259 if (c4iw_fatal_error(rdev)) 260 return -EIO; 261 262 stag_state = stag_state > 0; 263 stag_idx = (*stag) >> 8; 264 265 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { 266 stag_idx = c4iw_get_resource(&rdev->resource.tpt_table); 267 if (!stag_idx) { 268 mutex_lock(&rdev->stats.lock); 269 rdev->stats.stag.fail++; 270 mutex_unlock(&rdev->stats.lock); 271 return -ENOMEM; 272 } 273 mutex_lock(&rdev->stats.lock); 274 rdev->stats.stag.cur += 32; 275 if (rdev->stats.stag.cur > rdev->stats.stag.max) 276 rdev->stats.stag.max = rdev->stats.stag.cur; 277 mutex_unlock(&rdev->stats.lock); 278 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); 279 } 280 CTR5(KTR_IW_CXGBE, 281 "%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x", 282 __func__, stag_state, type, pdid, stag_idx); 283 284 /* write TPT entry */ 285 if (reset_tpt_entry) 286 memset(&tpt, 0, sizeof(tpt)); 287 else { 288 tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID | 289 V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) | 290 V_FW_RI_TPTE_STAGSTATE(stag_state) | 291 V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid)); 292 tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) | 293 (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) | 294 V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO : 295 FW_RI_VA_BASED_TO))| 296 V_FW_RI_TPTE_PS(page_size)); 297 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( 298 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3)); 299 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); 300 tpt.va_hi = cpu_to_be32((u32)(to >> 32)); 301 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); 302 tpt.dca_mwbcnt_pstag = cpu_to_be32(0); 303 tpt.len_hi = cpu_to_be32((u32)(len >> 32)); 304 } 305 err = write_adapter_mem(rdev, stag_idx + 306 (rdev->adap->vres.stag.start >> 5), 307 sizeof(tpt), &tpt); 308 309 if (reset_tpt_entry) { 310 c4iw_put_resource(&rdev->resource.tpt_table, stag_idx); 311 mutex_lock(&rdev->stats.lock); 312 rdev->stats.stag.cur -= 32; 313 mutex_unlock(&rdev->stats.lock); 314 } 315 return err; 316} 317 318static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl, 319 u32 pbl_addr, u32 pbl_size) 320{ 321 int err; 322 323 CTR4(KTR_IW_CXGBE, "%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d", 324 __func__, pbl_addr, rdev->adap->vres.pbl.start, pbl_size); 325 326 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl); 327 return err; 328} 329 330static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size, 331 u32 pbl_addr) 332{ 333 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 334 pbl_size, pbl_addr); 335} 336 337static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid) 338{ 339 *stag = T4_STAG_UNSET; 340 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0, 341 0UL, 0, 0, 0, 0); 342} 343 344static int deallocate_window(struct c4iw_rdev *rdev, u32 stag) 345{ 346 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0, 347 0); 348} 349 350static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid, 351 u32 pbl_size, u32 pbl_addr) 352{ 353 *stag = T4_STAG_UNSET; 354 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0, 355 0UL, 0, 0, pbl_size, pbl_addr); 356} 357 358static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag) 359{ 360 u32 mmid; 361 362 mhp->attr.state = 1; 363 mhp->attr.stag = stag; 364 mmid = stag >> 8; 365 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 366 CTR3(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p", __func__, mmid, mhp); 367 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid); 368} 369 370static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, 371 struct c4iw_mr *mhp, int shift) 372{ 373 u32 stag = T4_STAG_UNSET; 374 int ret; 375 376 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, 377 FW_RI_STAG_NSMR, mhp->attr.len ? mhp->attr.perms : 0, 378 mhp->attr.mw_bind_enable, mhp->attr.zbva, 379 mhp->attr.va_fbo, mhp->attr.len ? mhp->attr.len : -1, shift - 12, 380 mhp->attr.pbl_size, mhp->attr.pbl_addr); 381 if (ret) 382 return ret; 383 384 ret = finish_mem_reg(mhp, stag); 385 if (ret) 386 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 387 mhp->attr.pbl_addr); 388 return ret; 389} 390 391static int alloc_pbl(struct c4iw_mr *mhp, int npages) 392{ 393 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev, 394 npages << 3); 395 396 if (!mhp->attr.pbl_addr) 397 return -ENOMEM; 398 399 mhp->attr.pbl_size = npages; 400 401 return 0; 402} 403 404struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) 405{ 406 struct c4iw_dev *rhp; 407 struct c4iw_pd *php; 408 struct c4iw_mr *mhp; 409 int ret; 410 u32 stag = T4_STAG_UNSET; 411 412 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd); 413 php = to_c4iw_pd(pd); 414 rhp = php->rhp; 415 416 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 417 if (!mhp) 418 return ERR_PTR(-ENOMEM); 419 420 mhp->rhp = rhp; 421 mhp->attr.pdid = php->pdid; 422 mhp->attr.perms = c4iw_ib_to_tpt_access(acc); 423 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND; 424 mhp->attr.zbva = 0; 425 mhp->attr.va_fbo = 0; 426 mhp->attr.page_size = 0; 427 mhp->attr.len = ~0ULL; 428 mhp->attr.pbl_size = 0; 429 430 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, 431 FW_RI_STAG_NSMR, mhp->attr.perms, 432 mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0); 433 if (ret) 434 goto err1; 435 436 ret = finish_mem_reg(mhp, stag); 437 if (ret) 438 goto err2; 439 return &mhp->ibmr; 440err2: 441 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 442 mhp->attr.pbl_addr); 443err1: 444 kfree(mhp); 445 return ERR_PTR(ret); 446} 447 448struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 449 u64 virt, int acc, struct ib_udata *udata) 450{ 451 __be64 *pages; 452 int shift, n, len; 453 int i, k, entry; 454 int err = 0; 455 struct scatterlist *sg; 456 struct c4iw_dev *rhp; 457 struct c4iw_pd *php; 458 struct c4iw_mr *mhp; 459 460 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd); 461 462 if (length == ~0ULL) 463 return ERR_PTR(-EINVAL); 464 465 if ((length + start) < start) 466 return ERR_PTR(-EINVAL); 467 468 php = to_c4iw_pd(pd); 469 rhp = php->rhp; 470 471 if (mr_exceeds_hw_limits(rhp, length)) 472 return ERR_PTR(-EINVAL); 473 474 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 475 if (!mhp) 476 return ERR_PTR(-ENOMEM); 477 478 mhp->rhp = rhp; 479 480 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); 481 if (IS_ERR(mhp->umem)) { 482 err = PTR_ERR(mhp->umem); 483 kfree(mhp); 484 return ERR_PTR(err); 485 } 486 487 shift = ffs(mhp->umem->page_size) - 1; 488 489 n = mhp->umem->nmap; 490 err = alloc_pbl(mhp, n); 491 if (err) 492 goto err; 493 494 pages = (__be64 *) __get_free_page(GFP_KERNEL); 495 if (!pages) { 496 err = -ENOMEM; 497 goto err_pbl; 498 } 499 500 i = n = 0; 501 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { 502 len = sg_dma_len(sg) >> shift; 503 for (k = 0; k < len; ++k) { 504 pages[i++] = cpu_to_be64(sg_dma_address(sg) + 505 mhp->umem->page_size * k); 506 if (i == PAGE_SIZE / sizeof *pages) { 507 err = write_pbl(&mhp->rhp->rdev, 508 pages, 509 mhp->attr.pbl_addr + (n << 3), i); 510 if (err) 511 goto pbl_done; 512 n += i; 513 i = 0; 514 515 } 516 } 517 } 518 519 if (i) 520 err = write_pbl(&mhp->rhp->rdev, pages, 521 mhp->attr.pbl_addr + (n << 3), i); 522 523pbl_done: 524 free_page((unsigned long) pages); 525 if (err) 526 goto err_pbl; 527 528 mhp->attr.pdid = php->pdid; 529 mhp->attr.zbva = 0; 530 mhp->attr.perms = c4iw_ib_to_tpt_access(acc); 531 mhp->attr.va_fbo = virt; 532 mhp->attr.page_size = shift - 12; 533 mhp->attr.len = length; 534 535 err = register_mem(rhp, php, mhp, shift); 536 if (err) 537 goto err_pbl; 538 539 return &mhp->ibmr; 540 541err_pbl: 542 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, 543 mhp->attr.pbl_size << 3); 544 545err: 546 ib_umem_release(mhp->umem); 547 kfree(mhp); 548 return ERR_PTR(err); 549} 550 551struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, 552 struct ib_udata *udata) 553{ 554 struct c4iw_dev *rhp; 555 struct c4iw_pd *php; 556 struct c4iw_mw *mhp; 557 u32 mmid; 558 u32 stag = 0; 559 int ret; 560 561 if (type != IB_MW_TYPE_1) 562 return ERR_PTR(-EINVAL); 563 564 php = to_c4iw_pd(pd); 565 rhp = php->rhp; 566 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 567 if (!mhp) 568 return ERR_PTR(-ENOMEM); 569 ret = allocate_window(&rhp->rdev, &stag, php->pdid); 570 if (ret) { 571 kfree(mhp); 572 return ERR_PTR(ret); 573 } 574 mhp->rhp = rhp; 575 mhp->attr.pdid = php->pdid; 576 mhp->attr.type = FW_RI_STAG_MW; 577 mhp->attr.stag = stag; 578 mmid = (stag) >> 8; 579 mhp->ibmw.rkey = stag; 580 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { 581 deallocate_window(&rhp->rdev, mhp->attr.stag); 582 kfree(mhp); 583 return ERR_PTR(-ENOMEM); 584 } 585 CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp, 586 stag); 587 return &(mhp->ibmw); 588} 589 590int c4iw_dealloc_mw(struct ib_mw *mw) 591{ 592 struct c4iw_dev *rhp; 593 struct c4iw_mw *mhp; 594 u32 mmid; 595 596 mhp = to_c4iw_mw(mw); 597 rhp = mhp->rhp; 598 mmid = (mw->rkey) >> 8; 599 remove_handle(rhp, &rhp->mmidr, mmid); 600 deallocate_window(&rhp->rdev, mhp->attr.stag); 601 kfree(mhp); 602 CTR4(KTR_IW_CXGBE, "%s ib_mw %p mmid 0x%x ptr %p", __func__, mw, mmid, 603 mhp); 604 return 0; 605} 606 607struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, 608 enum ib_mr_type mr_type, 609 u32 max_num_sg) 610{ 611 struct c4iw_dev *rhp; 612 struct c4iw_pd *php; 613 struct c4iw_mr *mhp; 614 u32 mmid; 615 u32 stag = 0; 616 int ret = 0; 617 int length = roundup(max_num_sg * sizeof(u64), 32); 618 619 php = to_c4iw_pd(pd); 620 rhp = php->rhp; 621 622 if (mr_type != IB_MR_TYPE_MEM_REG || 623 max_num_sg > t4_max_fr_depth( 624 rhp->rdev.adap->params.ulptx_memwrite_dsgl && use_dsgl)) 625 return ERR_PTR(-EINVAL); 626 627 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 628 if (!mhp) { 629 ret = -ENOMEM; 630 goto err; 631 } 632 633 mhp->mpl = dma_alloc_coherent(rhp->ibdev.dma_device, 634 length, &mhp->mpl_addr, GFP_KERNEL); 635 if (!mhp->mpl) { 636 ret = -ENOMEM; 637 goto err_mpl; 638 } 639 mhp->max_mpl_len = length; 640 641 mhp->rhp = rhp; 642 ret = alloc_pbl(mhp, max_num_sg); 643 if (ret) 644 goto err1; 645 mhp->attr.pbl_size = max_num_sg; 646 ret = allocate_stag(&rhp->rdev, &stag, php->pdid, 647 mhp->attr.pbl_size, mhp->attr.pbl_addr); 648 if (ret) 649 goto err2; 650 mhp->attr.pdid = php->pdid; 651 mhp->attr.type = FW_RI_STAG_NSMR; 652 mhp->attr.stag = stag; 653 mhp->attr.state = 0; 654 mmid = (stag) >> 8; 655 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 656 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { 657 ret = -ENOMEM; 658 goto err3; 659 } 660 661 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); 662 return &(mhp->ibmr); 663err3: 664 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, 665 mhp->attr.pbl_addr); 666err2: 667 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, 668 mhp->attr.pbl_size << 3); 669err1: 670 dma_free_coherent(rhp->ibdev.dma_device, 671 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr); 672err_mpl: 673 kfree(mhp); 674err: 675 return ERR_PTR(ret); 676} 677static int c4iw_set_page(struct ib_mr *ibmr, u64 addr) 678{ 679 struct c4iw_mr *mhp = to_c4iw_mr(ibmr); 680 681 if (unlikely(mhp->mpl_len == mhp->max_mpl_len)) 682 return -ENOMEM; 683 684 mhp->mpl[mhp->mpl_len++] = addr; 685 686 return 0; 687} 688 689int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, 690 int sg_nents, unsigned int *sg_offset) 691{ 692 struct c4iw_mr *mhp = to_c4iw_mr(ibmr); 693 694 mhp->mpl_len = 0; 695 696 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page); 697} 698 699 700int c4iw_dereg_mr(struct ib_mr *ib_mr) 701{ 702 struct c4iw_dev *rhp; 703 struct c4iw_mr *mhp; 704 u32 mmid; 705 706 CTR2(KTR_IW_CXGBE, "%s ib_mr %p", __func__, ib_mr); 707 708 mhp = to_c4iw_mr(ib_mr); 709 rhp = mhp->rhp; 710 mmid = mhp->attr.stag >> 8; 711 remove_handle(rhp, &rhp->mmidr, mmid); 712 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 713 mhp->attr.pbl_addr); 714 if (mhp->attr.pbl_size) 715 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, 716 mhp->attr.pbl_size << 3); 717 if (mhp->kva) 718 kfree((void *) (unsigned long) mhp->kva); 719 if (mhp->umem) 720 ib_umem_release(mhp->umem); 721 CTR3(KTR_IW_CXGBE, "%s mmid 0x%x ptr %p", __func__, mmid, mhp); 722 kfree(mhp); 723 return 0; 724} 725 726void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey) 727{ 728 struct c4iw_mr *mhp; 729 unsigned long flags; 730 731 spin_lock_irqsave(&rhp->lock, flags); 732 mhp = get_mhp(rhp, rkey >> 8); 733 if (mhp) 734 mhp->attr.state = 0; 735 spin_unlock_irqrestore(&rhp->lock, flags); 736} 737#endif 738