mr.c revision 267654
1/* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35#include <linux/init.h> 36#include <linux/errno.h> 37 38#include <linux/mlx4/cmd.h> 39 40#include "mlx4.h" 41#include "icm.h" 42 43/* 44 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. 45 */ 46struct mlx4_mpt_entry { 47 __be32 flags; 48 __be32 qpn; 49 __be32 key; 50 __be32 pd_flags; 51 __be64 start; 52 __be64 length; 53 __be32 lkey; 54 __be32 win_cnt; 55 u8 reserved1; 56 u8 flags2; 57 u8 reserved2; 58 u8 mtt_rep; 59 __be64 mtt_seg; 60 __be32 mtt_sz; 61 __be32 entity_size; 62 __be32 first_byte_offset; 63} __attribute__((packed)); 64 65#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) 66#define MLX4_MPT_FLAG_FREE (0x3UL << 28) 67#define MLX4_MPT_FLAG_MIO (1 << 17) 68#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) 69#define MLX4_MPT_FLAG_PHYSICAL (1 << 9) 70#define MLX4_MPT_FLAG_REGION (1 << 8) 71 72#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) 73#define MLX4_MPT_PD_FLAG_RAE (1 << 28) 74#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) 75 76#define MLX4_MPT_FLAG2_FBO_EN (1 << 7) 77 78#define MLX4_MPT_STATUS_SW 0xF0 79#define MLX4_MPT_STATUS_HW 0x00 80 81static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) 82{ 83 int o; 84 int m; 85 u32 seg; 86 87 spin_lock(&buddy->lock); 88 89 for (o = order; o <= buddy->max_order; ++o) 90 if (buddy->num_free[o]) { 91 m = 1 << (buddy->max_order - o); 92 seg = find_first_bit(buddy->bits[o], m); 93 if (seg < m) 94 goto found; 95 } 96 97 spin_unlock(&buddy->lock); 98 return -1; 99 100 found: 101 clear_bit(seg, buddy->bits[o]); 102 --buddy->num_free[o]; 103 104 while (o > order) { 105 --o; 106 seg <<= 1; 107 set_bit(seg ^ 1, buddy->bits[o]); 108 ++buddy->num_free[o]; 109 } 110 111 spin_unlock(&buddy->lock); 112 113 seg <<= order; 114 115 return seg; 116} 117 118static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) 119{ 120 seg >>= order; 121 122 spin_lock(&buddy->lock); 123 124 while (test_bit(seg ^ 1, buddy->bits[order])) { 125 clear_bit(seg ^ 1, buddy->bits[order]); 126 --buddy->num_free[order]; 127 seg >>= 1; 128 ++order; 129 } 130 131 set_bit(seg, buddy->bits[order]); 132 ++buddy->num_free[order]; 133 134 spin_unlock(&buddy->lock); 135} 136 137static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) 138{ 139 int i, s; 140 141 buddy->max_order = max_order; 142 spin_lock_init(&buddy->lock); 143 144 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), 145 GFP_KERNEL); 146 buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *), 147 GFP_KERNEL); 148 if (!buddy->bits || !buddy->num_free) 149 goto err_out; 150 151 for (i = 0; i <= buddy->max_order; ++i) { 152 s = BITS_TO_LONGS(1 << (buddy->max_order - i)); 153 buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); 154 if (!buddy->bits[i]) 155 goto err_out_free; 156 bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i)); 157 } 158 159 set_bit(0, buddy->bits[buddy->max_order]); 160 buddy->num_free[buddy->max_order] = 1; 161 162 return 0; 163 164err_out_free: 165 for (i = 0; i <= buddy->max_order; ++i) 166 kfree(buddy->bits[i]); 167 168err_out: 169 kfree(buddy->bits); 170 kfree(buddy->num_free); 171 172 return -ENOMEM; 173} 174 175static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) 176{ 177 int i; 178 179 for (i = 0; i <= buddy->max_order; ++i) 180 kfree(buddy->bits[i]); 181 182 kfree(buddy->bits); 183 kfree(buddy->num_free); 184} 185 186static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 187{ 188 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 189 u32 seg; 190 191 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order); 192 if (seg == -1) 193 return -1; 194 195 if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg, 196 seg + (1 << order) - 1)) { 197 mlx4_buddy_free(&mr_table->mtt_buddy, seg, order); 198 return -1; 199 } 200 201 return seg; 202} 203 204int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, 205 struct mlx4_mtt *mtt) 206{ 207 int i; 208 209 if (!npages) { 210 mtt->order = -1; 211 mtt->page_shift = MLX4_ICM_PAGE_SHIFT; 212 return 0; 213 } else 214 mtt->page_shift = page_shift; 215 216 for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1) 217 ++mtt->order; 218 219 mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); 220 if (mtt->first_seg == -1) 221 return -ENOMEM; 222 223 return 0; 224} 225EXPORT_SYMBOL_GPL(mlx4_mtt_init); 226 227void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) 228{ 229 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 230 231 if (mtt->order < 0) 232 return; 233 234 mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order); 235 mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg, 236 mtt->first_seg + (1 << mtt->order) - 1); 237} 238EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); 239 240u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) 241{ 242 return (u64) mtt->first_seg * dev->caps.mtt_entry_sz; 243} 244EXPORT_SYMBOL_GPL(mlx4_mtt_addr); 245 246static u32 hw_index_to_key(u32 ind) 247{ 248 return (ind >> 24) | (ind << 8); 249} 250 251static u32 key_to_hw_index(u32 key) 252{ 253 return (key << 24) | (key >> 8); 254} 255 256static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 257 int mpt_index) 258{ 259 return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT, 260 MLX4_CMD_TIME_CLASS_B); 261} 262 263static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 264 int mpt_index) 265{ 266 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, 267 !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B); 268} 269 270int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align, u32 *base_mridx) 271{ 272 struct mlx4_priv *priv = mlx4_priv(dev); 273 u32 mridx; 274 275 mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align); 276 if (mridx == -1) 277 return -ENOMEM; 278 279 *base_mridx = mridx; 280 return 0; 281 282} 283EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range); 284 285void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt) 286{ 287 struct mlx4_priv *priv = mlx4_priv(dev); 288 mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt); 289} 290EXPORT_SYMBOL_GPL(mlx4_mr_release_range); 291 292int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, 293 u64 iova, u64 size, u32 access, int npages, 294 int page_shift, struct mlx4_mr *mr) 295{ 296 mr->iova = iova; 297 mr->size = size; 298 mr->pd = pd; 299 mr->access = access; 300 mr->enabled = 0; 301 mr->key = hw_index_to_key(mridx); 302 303 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 304} 305EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved); 306 307int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, 308 int npages, int page_shift, struct mlx4_mr *mr) 309{ 310 struct mlx4_priv *priv = mlx4_priv(dev); 311 u32 index; 312 int err; 313 314 index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); 315 if (index == -1) 316 return -ENOMEM; 317 318 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, 319 access, npages, page_shift, mr); 320 if (err) 321 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); 322 323 return err; 324} 325EXPORT_SYMBOL_GPL(mlx4_mr_alloc); 326 327void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) 328{ 329 int err; 330 331 if (mr->enabled) { 332 err = mlx4_HW2SW_MPT(dev, NULL, 333 key_to_hw_index(mr->key) & 334 (dev->caps.num_mpts - 1)); 335 if (err) 336 mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err); 337 } 338 339 mlx4_mtt_cleanup(dev, &mr->mtt); 340} 341EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved); 342 343void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) 344{ 345 struct mlx4_priv *priv = mlx4_priv(dev); 346 mlx4_mr_free_reserved(dev, mr); 347 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key)); 348} 349EXPORT_SYMBOL_GPL(mlx4_mr_free); 350 351int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) 352{ 353 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 354 struct mlx4_cmd_mailbox *mailbox; 355 struct mlx4_mpt_entry *mpt_entry; 356 int err; 357 358 err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key)); 359 if (err) 360 return err; 361 362 mailbox = mlx4_alloc_cmd_mailbox(dev); 363 if (IS_ERR(mailbox)) { 364 err = PTR_ERR(mailbox); 365 goto err_table; 366 } 367 mpt_entry = mailbox->buf; 368 369 memset(mpt_entry, 0, sizeof *mpt_entry); 370 371 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO | 372 MLX4_MPT_FLAG_REGION | 373 mr->access); 374 375 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key)); 376 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV); 377 mpt_entry->start = cpu_to_be64(mr->iova); 378 mpt_entry->length = cpu_to_be64(mr->size); 379 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); 380 381 if (mr->mtt.order < 0) { 382 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); 383 mpt_entry->mtt_seg = 0; 384 } else { 385 mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt)); 386 } 387 388 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { 389 /* fast register MR in free state */ 390 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); 391 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | 392 MLX4_MPT_PD_FLAG_RAE); 393 mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) * 394 dev->caps.mtts_per_seg); 395 } else { 396 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); 397 } 398 399 err = mlx4_SW2HW_MPT(dev, mailbox, 400 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); 401 if (err) { 402 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); 403 goto err_cmd; 404 } 405 406 mr->enabled = 1; 407 408 mlx4_free_cmd_mailbox(dev, mailbox); 409 410 return 0; 411 412err_cmd: 413 mlx4_free_cmd_mailbox(dev, mailbox); 414 415err_table: 416 mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key)); 417 return err; 418} 419EXPORT_SYMBOL_GPL(mlx4_mr_enable); 420 421static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 422 int start_index, int npages, u64 *page_list) 423{ 424 struct mlx4_priv *priv = mlx4_priv(dev); 425 __be64 *mtts; 426 dma_addr_t dma_handle; 427 int i; 428 int s = start_index * sizeof (u64); 429 430 /* All MTTs must fit in the same page */ 431 if (start_index / (PAGE_SIZE / sizeof (u64)) != 432 (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) 433 return -EINVAL; 434 435 if (start_index & (dev->caps.mtts_per_seg - 1)) 436 return -EINVAL; 437 438 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg + 439 s / dev->caps.mtt_entry_sz, &dma_handle); 440 if (!mtts) 441 return -ENOMEM; 442 443 for (i = 0; i < npages; ++i) 444 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 445 446 dma_sync_single(&dev->pdev->dev, dma_handle, npages * sizeof (u64), DMA_TO_DEVICE); 447 448 return 0; 449} 450 451int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 452 int start_index, int npages, u64 *page_list) 453{ 454 int chunk; 455 int err; 456 457 if (mtt->order < 0) 458 return -EINVAL; 459 460 while (npages > 0) { 461 chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages); 462 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); 463 if (err) 464 return err; 465 466 npages -= chunk; 467 start_index += chunk; 468 page_list += chunk; 469 } 470 471 return 0; 472} 473EXPORT_SYMBOL_GPL(mlx4_write_mtt); 474 475int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 476 struct mlx4_buf *buf) 477{ 478 u64 *page_list; 479 int err; 480 int i; 481 482 page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL); 483 if (!page_list) 484 return -ENOMEM; 485 486 for (i = 0; i < buf->npages; ++i) 487 if (buf->direct.map) 488 page_list[i] = buf->direct.map + (i << buf->page_shift); 489 else 490 page_list[i] = buf->page_list[i].map; 491 492 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); 493 494 kfree(page_list); 495 return err; 496} 497EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); 498 499int mlx4_init_mr_table(struct mlx4_dev *dev) 500{ 501 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 502 int err; 503 504 if (!is_power_of_2(dev->caps.num_mpts)) 505 return -EINVAL; 506 507 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, 508 ~0, dev->caps.reserved_mrws, 0); 509 if (err) 510 return err; 511 512 err = mlx4_buddy_init(&mr_table->mtt_buddy, 513 ilog2(dev->caps.num_mtt_segs)); 514 if (err) 515 goto err_buddy; 516 517 if (dev->caps.reserved_mtts) { 518 if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) { 519 mlx4_warn(dev, "MTT table of order %d is too small.\n", 520 mr_table->mtt_buddy.max_order); 521 err = -ENOMEM; 522 goto err_reserve_mtts; 523 } 524 } 525 526 return 0; 527 528err_reserve_mtts: 529 mlx4_buddy_cleanup(&mr_table->mtt_buddy); 530 531err_buddy: 532 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); 533 534 return err; 535} 536 537void mlx4_cleanup_mr_table(struct mlx4_dev *dev) 538{ 539 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 540 541 mlx4_buddy_cleanup(&mr_table->mtt_buddy); 542 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); 543} 544 545static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, 546 int npages, u64 iova) 547{ 548 int i, page_mask; 549 550 if (npages > fmr->max_pages) 551 return -EINVAL; 552 553 page_mask = (1 << fmr->page_shift) - 1; 554 555 /* We are getting page lists, so va must be page aligned. */ 556 if (iova & page_mask) 557 return -EINVAL; 558 559 /* Trust the user not to pass misaligned data in page_list */ 560 if (0) 561 for (i = 0; i < npages; ++i) { 562 if (page_list[i] & ~page_mask) 563 return -EINVAL; 564 } 565 566 if (fmr->maps >= fmr->max_maps) 567 return -EINVAL; 568 569 return 0; 570} 571 572int mlx4_map_phys_fmr_fbo(struct mlx4_dev *dev, struct mlx4_fmr *fmr, 573 u64 *page_list, int npages, u64 iova, u32 fbo, 574 u32 len, u32 *lkey, u32 *rkey, int same_key) 575{ 576 u32 key; 577 int i, err; 578 579 err = mlx4_check_fmr(fmr, page_list, npages, iova); 580 if (err) 581 return err; 582 583 ++fmr->maps; 584 585 key = key_to_hw_index(fmr->mr.key); 586 if (!same_key) 587 key += dev->caps.num_mpts; 588 *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); 589 590 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; 591 592 /* Make sure MPT status is visible before writing MTT entries */ 593 wmb(); 594 595 for (i = 0; i < npages; ++i) 596 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 597 598 dma_sync_single(&dev->pdev->dev, fmr->dma_handle, 599 npages * sizeof(u64), DMA_TO_DEVICE); 600 601 fmr->mpt->key = cpu_to_be32(key); 602 fmr->mpt->lkey = cpu_to_be32(key); 603 fmr->mpt->length = cpu_to_be64(len); 604 fmr->mpt->start = cpu_to_be64(iova); 605 fmr->mpt->first_byte_offset = cpu_to_be32(fbo & 0x001fffff); 606 fmr->mpt->flags2 = (fbo ? MLX4_MPT_FLAG2_FBO_EN : 0); 607 608 /* Make MTT entries are visible before setting MPT status */ 609 wmb(); 610 611 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW; 612 613 /* Make sure MPT status is visible before consumer can use FMR */ 614 wmb(); 615 616 return 0; 617} 618EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr_fbo); 619 620int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, 621 int npages, u64 iova, u32 *lkey, u32 *rkey) 622{ 623 u32 len = npages * (1ull << fmr->page_shift); 624 625 return mlx4_map_phys_fmr_fbo(dev, fmr, page_list, npages, iova, 0, 626 len, lkey, rkey, 0); 627} 628EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr); 629 630int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, 631 int max_maps, u8 page_shift, struct mlx4_fmr *fmr) 632{ 633 struct mlx4_priv *priv = mlx4_priv(dev); 634 u64 mtt_seg; 635 int err = -ENOMEM; 636 637 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) 638 return -EINVAL; 639 640 /* All MTTs must fit in the same page */ 641 if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) 642 return -EINVAL; 643 644 fmr->page_shift = page_shift; 645 fmr->max_pages = max_pages; 646 fmr->max_maps = max_maps; 647 fmr->maps = 0; 648 649 err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages, 650 page_shift, &fmr->mr); 651 if (err) 652 return err; 653 654 mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz; 655 656 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, 657 fmr->mr.mtt.first_seg, 658 &fmr->dma_handle); 659 if (!fmr->mtts) { 660 err = -ENOMEM; 661 goto err_free; 662 } 663 664 return 0; 665 666err_free: 667 mlx4_mr_free(dev, &fmr->mr); 668 return err; 669} 670EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); 671 672int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, 673 u32 pd, u32 access, int max_pages, 674 int max_maps, u8 page_shift, struct mlx4_fmr *fmr) 675{ 676 struct mlx4_priv *priv = mlx4_priv(dev); 677 u64 mtt_seg; 678 int err = -ENOMEM; 679 680 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) 681 return -EINVAL; 682 683 /* All MTTs must fit in the same page */ 684 if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) 685 return -EINVAL; 686 687 fmr->page_shift = page_shift; 688 fmr->max_pages = max_pages; 689 fmr->max_maps = max_maps; 690 fmr->maps = 0; 691 692 err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages, 693 page_shift, &fmr->mr); 694 if (err) 695 return err; 696 697 mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz; 698 699 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, 700 fmr->mr.mtt.first_seg, 701 &fmr->dma_handle); 702 if (!fmr->mtts) { 703 err = -ENOMEM; 704 goto err_free; 705 } 706 707 return 0; 708 709err_free: 710 mlx4_mr_free_reserved(dev, &fmr->mr); 711 return err; 712} 713EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved); 714 715int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 716{ 717 struct mlx4_priv *priv = mlx4_priv(dev); 718 int err; 719 720 err = mlx4_mr_enable(dev, &fmr->mr); 721 if (err) 722 return err; 723 724 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table, 725 key_to_hw_index(fmr->mr.key), NULL); 726 if (!fmr->mpt) 727 return -ENOMEM; 728 729 return 0; 730} 731EXPORT_SYMBOL_GPL(mlx4_fmr_enable); 732 733void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, 734 u32 *lkey, u32 *rkey) 735{ 736 if (!fmr->maps) 737 return; 738 739 fmr->maps = 0; 740 741 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; 742} 743EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); 744 745int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 746{ 747 if (fmr->maps) 748 return -EBUSY; 749 750 fmr->mr.enabled = 0; 751 mlx4_mr_free(dev, &fmr->mr); 752 753 return 0; 754} 755EXPORT_SYMBOL_GPL(mlx4_fmr_free); 756 757int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 758{ 759 if (fmr->maps) 760 return -EBUSY; 761 762 fmr->mr.enabled = 0; 763 mlx4_mr_free_reserved(dev, &fmr->mr); 764 765 return 0; 766} 767EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved); 768 769int mlx4_SYNC_TPT(struct mlx4_dev *dev) 770{ 771 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000); 772} 773EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); 774