1/* 2 * Copyright (c) 2006 Oracle. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33#include <linux/kernel.h> 34#include <linux/slab.h> 35 36#include "rds.h" 37#include "rdma.h" 38#include "ib.h" 39 40 41/* 42 * This is stored as mr->r_trans_private. 43 */ 44struct rds_ib_mr { 45 struct rds_ib_device *device; 46 struct rds_ib_mr_pool *pool; 47 struct ib_fmr *fmr; 48 struct list_head list; 49 unsigned int remap_count; 50 51 struct scatterlist *sg; 52 unsigned int sg_len; 53 u64 *dma; 54 int sg_dma_len; 55}; 56 57/* 58 * Our own little FMR pool 59 */ 60struct rds_ib_mr_pool { 61 struct mutex flush_lock; /* serialize fmr invalidate */ 62 struct work_struct flush_worker; /* flush worker */ 63 64 spinlock_t list_lock; /* protect variables below */ 65 atomic_t item_count; /* total # of MRs */ 66 atomic_t dirty_count; /* # dirty of MRs */ 67 struct list_head drop_list; /* MRs that have reached their max_maps limit */ 68 struct list_head free_list; /* unused MRs */ 69 struct list_head clean_list; /* unused & unamapped MRs */ 70 atomic_t free_pinned; /* memory pinned by free MRs */ 71 unsigned long max_items; 72 unsigned long max_items_soft; 73 unsigned long max_free_pinned; 74 struct ib_fmr_attr fmr_attr; 75}; 76 77static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all); 78static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr); 79static void rds_ib_mr_pool_flush_worker(struct work_struct *work); 80 81static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr) 82{ 83 struct rds_ib_device *rds_ibdev; 84 struct rds_ib_ipaddr *i_ipaddr; 85 86 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { 87 spin_lock_irq(&rds_ibdev->spinlock); 88 list_for_each_entry(i_ipaddr, &rds_ibdev->ipaddr_list, list) { 89 if (i_ipaddr->ipaddr == ipaddr) { 90 spin_unlock_irq(&rds_ibdev->spinlock); 91 return rds_ibdev; 92 } 93 } 94 spin_unlock_irq(&rds_ibdev->spinlock); 95 } 96 97 return NULL; 98} 99 100static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) 101{ 102 struct rds_ib_ipaddr *i_ipaddr; 103 104 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL); 105 if (!i_ipaddr) 106 return -ENOMEM; 107 108 i_ipaddr->ipaddr = ipaddr; 109 110 spin_lock_irq(&rds_ibdev->spinlock); 111 list_add_tail(&i_ipaddr->list, &rds_ibdev->ipaddr_list); 112 spin_unlock_irq(&rds_ibdev->spinlock); 113 114 return 0; 115} 116 117static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) 118{ 119 struct rds_ib_ipaddr *i_ipaddr, *next; 120 121 spin_lock_irq(&rds_ibdev->spinlock); 122 list_for_each_entry_safe(i_ipaddr, next, &rds_ibdev->ipaddr_list, list) { 123 if (i_ipaddr->ipaddr == ipaddr) { 124 list_del(&i_ipaddr->list); 125 kfree(i_ipaddr); 126 break; 127 } 128 } 129 spin_unlock_irq(&rds_ibdev->spinlock); 130} 131 132int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) 133{ 134 struct rds_ib_device *rds_ibdev_old; 135 136 rds_ibdev_old = rds_ib_get_device(ipaddr); 137 if (rds_ibdev_old) 138 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr); 139 140 return rds_ib_add_ipaddr(rds_ibdev, ipaddr); 141} 142 143void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) 144{ 145 struct rds_ib_connection *ic = conn->c_transport_data; 146 147 /* conn was previously on the nodev_conns_list */ 148 spin_lock_irq(&ib_nodev_conns_lock); 149 BUG_ON(list_empty(&ib_nodev_conns)); 150 BUG_ON(list_empty(&ic->ib_node)); 151 list_del(&ic->ib_node); 152 153 spin_lock_irq(&rds_ibdev->spinlock); 154 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list); 155 spin_unlock_irq(&rds_ibdev->spinlock); 156 spin_unlock_irq(&ib_nodev_conns_lock); 157 158 ic->rds_ibdev = rds_ibdev; 159} 160 161void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) 162{ 163 struct rds_ib_connection *ic = conn->c_transport_data; 164 165 /* place conn on nodev_conns_list */ 166 spin_lock(&ib_nodev_conns_lock); 167 168 spin_lock_irq(&rds_ibdev->spinlock); 169 BUG_ON(list_empty(&ic->ib_node)); 170 list_del(&ic->ib_node); 171 spin_unlock_irq(&rds_ibdev->spinlock); 172 173 list_add_tail(&ic->ib_node, &ib_nodev_conns); 174 175 spin_unlock(&ib_nodev_conns_lock); 176 177 ic->rds_ibdev = NULL; 178} 179 180void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock) 181{ 182 struct rds_ib_connection *ic, *_ic; 183 LIST_HEAD(tmp_list); 184 185 /* avoid calling conn_destroy with irqs off */ 186 spin_lock_irq(list_lock); 187 list_splice(list, &tmp_list); 188 INIT_LIST_HEAD(list); 189 spin_unlock_irq(list_lock); 190 191 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) 192 rds_conn_destroy(ic->conn); 193} 194 195struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev) 196{ 197 struct rds_ib_mr_pool *pool; 198 199 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 200 if (!pool) 201 return ERR_PTR(-ENOMEM); 202 203 INIT_LIST_HEAD(&pool->free_list); 204 INIT_LIST_HEAD(&pool->drop_list); 205 INIT_LIST_HEAD(&pool->clean_list); 206 mutex_init(&pool->flush_lock); 207 spin_lock_init(&pool->list_lock); 208 INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); 209 210 pool->fmr_attr.max_pages = fmr_message_size; 211 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; 212 pool->fmr_attr.page_shift = PAGE_SHIFT; 213 pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4; 214 215 /* We never allow more than max_items MRs to be allocated. 216 * When we exceed more than max_items_soft, we start freeing 217 * items more aggressively. 218 * Make sure that max_items > max_items_soft > max_items / 2 219 */ 220 pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4; 221 pool->max_items = rds_ibdev->max_fmrs; 222 223 return pool; 224} 225 226void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo) 227{ 228 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; 229 230 iinfo->rdma_mr_max = pool->max_items; 231 iinfo->rdma_mr_size = pool->fmr_attr.max_pages; 232} 233 234void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) 235{ 236 flush_workqueue(rds_wq); 237 rds_ib_flush_mr_pool(pool, 1); 238 WARN_ON(atomic_read(&pool->item_count)); 239 WARN_ON(atomic_read(&pool->free_pinned)); 240 kfree(pool); 241} 242 243static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool) 244{ 245 struct rds_ib_mr *ibmr = NULL; 246 unsigned long flags; 247 248 spin_lock_irqsave(&pool->list_lock, flags); 249 if (!list_empty(&pool->clean_list)) { 250 ibmr = list_entry(pool->clean_list.next, struct rds_ib_mr, list); 251 list_del_init(&ibmr->list); 252 } 253 spin_unlock_irqrestore(&pool->list_lock, flags); 254 255 return ibmr; 256} 257 258static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev) 259{ 260 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; 261 struct rds_ib_mr *ibmr = NULL; 262 int err = 0, iter = 0; 263 264 while (1) { 265 ibmr = rds_ib_reuse_fmr(pool); 266 if (ibmr) 267 return ibmr; 268 269 /* No clean MRs - now we have the choice of either 270 * allocating a fresh MR up to the limit imposed by the 271 * driver, or flush any dirty unused MRs. 272 * We try to avoid stalling in the send path if possible, 273 * so we allocate as long as we're allowed to. 274 * 275 * We're fussy with enforcing the FMR limit, though. If the driver 276 * tells us we can't use more than N fmrs, we shouldn't start 277 * arguing with it */ 278 if (atomic_inc_return(&pool->item_count) <= pool->max_items) 279 break; 280 281 atomic_dec(&pool->item_count); 282 283 if (++iter > 2) { 284 rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted); 285 return ERR_PTR(-EAGAIN); 286 } 287 288 /* We do have some empty MRs. Flush them out. */ 289 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait); 290 rds_ib_flush_mr_pool(pool, 0); 291 } 292 293 ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL); 294 if (!ibmr) { 295 err = -ENOMEM; 296 goto out_no_cigar; 297 } 298 299 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd, 300 (IB_ACCESS_LOCAL_WRITE | 301 IB_ACCESS_REMOTE_READ | 302 IB_ACCESS_REMOTE_WRITE), 303 &pool->fmr_attr); 304 if (IS_ERR(ibmr->fmr)) { 305 err = PTR_ERR(ibmr->fmr); 306 ibmr->fmr = NULL; 307 printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err); 308 goto out_no_cigar; 309 } 310 311 rds_ib_stats_inc(s_ib_rdma_mr_alloc); 312 return ibmr; 313 314out_no_cigar: 315 if (ibmr) { 316 if (ibmr->fmr) 317 ib_dealloc_fmr(ibmr->fmr); 318 kfree(ibmr); 319 } 320 atomic_dec(&pool->item_count); 321 return ERR_PTR(err); 322} 323 324static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr, 325 struct scatterlist *sg, unsigned int nents) 326{ 327 struct ib_device *dev = rds_ibdev->dev; 328 struct scatterlist *scat = sg; 329 u64 io_addr = 0; 330 u64 *dma_pages; 331 u32 len; 332 int page_cnt, sg_dma_len; 333 int i, j; 334 int ret; 335 336 sg_dma_len = ib_dma_map_sg(dev, sg, nents, 337 DMA_BIDIRECTIONAL); 338 if (unlikely(!sg_dma_len)) { 339 printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n"); 340 return -EBUSY; 341 } 342 343 len = 0; 344 page_cnt = 0; 345 346 for (i = 0; i < sg_dma_len; ++i) { 347 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); 348 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); 349 350 if (dma_addr & ~PAGE_MASK) { 351 if (i > 0) 352 return -EINVAL; 353 else 354 ++page_cnt; 355 } 356 if ((dma_addr + dma_len) & ~PAGE_MASK) { 357 if (i < sg_dma_len - 1) 358 return -EINVAL; 359 else 360 ++page_cnt; 361 } 362 363 len += dma_len; 364 } 365 366 page_cnt += len >> PAGE_SHIFT; 367 if (page_cnt > fmr_message_size) 368 return -EINVAL; 369 370 dma_pages = kmalloc(sizeof(u64) * page_cnt, GFP_ATOMIC); 371 if (!dma_pages) 372 return -ENOMEM; 373 374 page_cnt = 0; 375 for (i = 0; i < sg_dma_len; ++i) { 376 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); 377 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); 378 379 for (j = 0; j < dma_len; j += PAGE_SIZE) 380 dma_pages[page_cnt++] = 381 (dma_addr & PAGE_MASK) + j; 382 } 383 384 ret = ib_map_phys_fmr(ibmr->fmr, 385 dma_pages, page_cnt, io_addr); 386 if (ret) 387 goto out; 388 389 /* Success - we successfully remapped the MR, so we can 390 * safely tear down the old mapping. */ 391 rds_ib_teardown_mr(ibmr); 392 393 ibmr->sg = scat; 394 ibmr->sg_len = nents; 395 ibmr->sg_dma_len = sg_dma_len; 396 ibmr->remap_count++; 397 398 rds_ib_stats_inc(s_ib_rdma_mr_used); 399 ret = 0; 400 401out: 402 kfree(dma_pages); 403 404 return ret; 405} 406 407void rds_ib_sync_mr(void *trans_private, int direction) 408{ 409 struct rds_ib_mr *ibmr = trans_private; 410 struct rds_ib_device *rds_ibdev = ibmr->device; 411 412 switch (direction) { 413 case DMA_FROM_DEVICE: 414 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg, 415 ibmr->sg_dma_len, DMA_BIDIRECTIONAL); 416 break; 417 case DMA_TO_DEVICE: 418 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg, 419 ibmr->sg_dma_len, DMA_BIDIRECTIONAL); 420 break; 421 } 422} 423 424static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr) 425{ 426 struct rds_ib_device *rds_ibdev = ibmr->device; 427 428 if (ibmr->sg_dma_len) { 429 ib_dma_unmap_sg(rds_ibdev->dev, 430 ibmr->sg, ibmr->sg_len, 431 DMA_BIDIRECTIONAL); 432 ibmr->sg_dma_len = 0; 433 } 434 435 /* Release the s/g list */ 436 if (ibmr->sg_len) { 437 unsigned int i; 438 439 for (i = 0; i < ibmr->sg_len; ++i) { 440 struct page *page = sg_page(&ibmr->sg[i]); 441 442 BUG_ON(in_interrupt()); 443 set_page_dirty(page); 444 put_page(page); 445 } 446 kfree(ibmr->sg); 447 448 ibmr->sg = NULL; 449 ibmr->sg_len = 0; 450 } 451} 452 453static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr) 454{ 455 unsigned int pinned = ibmr->sg_len; 456 457 __rds_ib_teardown_mr(ibmr); 458 if (pinned) { 459 struct rds_ib_device *rds_ibdev = ibmr->device; 460 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; 461 462 atomic_sub(pinned, &pool->free_pinned); 463 } 464} 465 466static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) 467{ 468 unsigned int item_count; 469 470 item_count = atomic_read(&pool->item_count); 471 if (free_all) 472 return item_count; 473 474 return 0; 475} 476 477/* 478 * Flush our pool of MRs. 479 * At a minimum, all currently unused MRs are unmapped. 480 * If the number of MRs allocated exceeds the limit, we also try 481 * to free as many MRs as needed to get back to this limit. 482 */ 483static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all) 484{ 485 struct rds_ib_mr *ibmr, *next; 486 LIST_HEAD(unmap_list); 487 LIST_HEAD(fmr_list); 488 unsigned long unpinned = 0; 489 unsigned long flags; 490 unsigned int nfreed = 0, ncleaned = 0, free_goal; 491 int ret = 0; 492 493 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush); 494 495 mutex_lock(&pool->flush_lock); 496 497 spin_lock_irqsave(&pool->list_lock, flags); 498 /* Get the list of all MRs to be dropped. Ordering matters - 499 * we want to put drop_list ahead of free_list. */ 500 list_splice_init(&pool->free_list, &unmap_list); 501 list_splice_init(&pool->drop_list, &unmap_list); 502 if (free_all) 503 list_splice_init(&pool->clean_list, &unmap_list); 504 spin_unlock_irqrestore(&pool->list_lock, flags); 505 506 free_goal = rds_ib_flush_goal(pool, free_all); 507 508 if (list_empty(&unmap_list)) 509 goto out; 510 511 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */ 512 list_for_each_entry(ibmr, &unmap_list, list) 513 list_add(&ibmr->fmr->list, &fmr_list); 514 ret = ib_unmap_fmr(&fmr_list); 515 if (ret) 516 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret); 517 518 /* Now we can destroy the DMA mapping and unpin any pages */ 519 list_for_each_entry_safe(ibmr, next, &unmap_list, list) { 520 unpinned += ibmr->sg_len; 521 __rds_ib_teardown_mr(ibmr); 522 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) { 523 rds_ib_stats_inc(s_ib_rdma_mr_free); 524 list_del(&ibmr->list); 525 ib_dealloc_fmr(ibmr->fmr); 526 kfree(ibmr); 527 nfreed++; 528 } 529 ncleaned++; 530 } 531 532 spin_lock_irqsave(&pool->list_lock, flags); 533 list_splice(&unmap_list, &pool->clean_list); 534 spin_unlock_irqrestore(&pool->list_lock, flags); 535 536 atomic_sub(unpinned, &pool->free_pinned); 537 atomic_sub(ncleaned, &pool->dirty_count); 538 atomic_sub(nfreed, &pool->item_count); 539 540out: 541 mutex_unlock(&pool->flush_lock); 542 return ret; 543} 544 545static void rds_ib_mr_pool_flush_worker(struct work_struct *work) 546{ 547 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker); 548 549 rds_ib_flush_mr_pool(pool, 0); 550} 551 552void rds_ib_free_mr(void *trans_private, int invalidate) 553{ 554 struct rds_ib_mr *ibmr = trans_private; 555 struct rds_ib_device *rds_ibdev = ibmr->device; 556 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; 557 unsigned long flags; 558 559 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len); 560 561 /* Return it to the pool's free list */ 562 spin_lock_irqsave(&pool->list_lock, flags); 563 if (ibmr->remap_count >= pool->fmr_attr.max_maps) 564 list_add(&ibmr->list, &pool->drop_list); 565 else 566 list_add(&ibmr->list, &pool->free_list); 567 568 atomic_add(ibmr->sg_len, &pool->free_pinned); 569 atomic_inc(&pool->dirty_count); 570 spin_unlock_irqrestore(&pool->list_lock, flags); 571 572 /* If we've pinned too many pages, request a flush */ 573 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || 574 atomic_read(&pool->dirty_count) >= pool->max_items / 10) 575 queue_work(rds_wq, &pool->flush_worker); 576 577 if (invalidate) { 578 if (likely(!in_interrupt())) { 579 rds_ib_flush_mr_pool(pool, 0); 580 } else { 581 /* We get here if the user created a MR marked 582 * as use_once and invalidate at the same time. */ 583 queue_work(rds_wq, &pool->flush_worker); 584 } 585 } 586} 587 588void rds_ib_flush_mrs(void) 589{ 590 struct rds_ib_device *rds_ibdev; 591 592 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { 593 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; 594 595 if (pool) 596 rds_ib_flush_mr_pool(pool, 0); 597 } 598} 599 600void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, 601 struct rds_sock *rs, u32 *key_ret) 602{ 603 struct rds_ib_device *rds_ibdev; 604 struct rds_ib_mr *ibmr = NULL; 605 int ret; 606 607 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr); 608 if (!rds_ibdev) { 609 ret = -ENODEV; 610 goto out; 611 } 612 613 if (!rds_ibdev->mr_pool) { 614 ret = -ENODEV; 615 goto out; 616 } 617 618 ibmr = rds_ib_alloc_fmr(rds_ibdev); 619 if (IS_ERR(ibmr)) 620 return ibmr; 621 622 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents); 623 if (ret == 0) 624 *key_ret = ibmr->fmr->rkey; 625 else 626 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret); 627 628 ibmr->device = rds_ibdev; 629 630 out: 631 if (ret) { 632 if (ibmr) 633 rds_ib_free_mr(ibmr, 0); 634 ibmr = ERR_PTR(ret); 635 } 636 return ibmr; 637} 638