1/*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2014 Mellanox Technologies. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35#include <sys/cdefs.h> 36__FBSDID("$FreeBSD: stable/11/sys/ofed/drivers/infiniband/core/ib_umem_odp.c 337096 2018-08-02 08:33:51Z hselasky $"); 37 38#include <linux/types.h> 39#include <linux/sched.h> 40#include <linux/slab.h> 41#include <linux/vmalloc.h> 42 43#include <rdma/ib_verbs.h> 44#include <rdma/ib_umem.h> 45#include <rdma/ib_umem_odp.h> 46 47static void ib_umem_notifier_start_account(struct ib_umem *item) 48{ 49 mutex_lock(&item->odp_data->umem_mutex); 50 51 /* Only update private counters for this umem if it has them. 52 * Otherwise skip it. All page faults will be delayed for this umem. */ 53 if (item->odp_data->mn_counters_active) { 54 int notifiers_count = item->odp_data->notifiers_count++; 55 56 if (notifiers_count == 0) 57 /* Initialize the completion object for waiting on 58 * notifiers. Since notifier_count is zero, no one 59 * should be waiting right now. */ 60 reinit_completion(&item->odp_data->notifier_completion); 61 } 62 mutex_unlock(&item->odp_data->umem_mutex); 63} 64 65static void ib_umem_notifier_end_account(struct ib_umem *item) 66{ 67 mutex_lock(&item->odp_data->umem_mutex); 68 69 /* Only update private counters for this umem if it has them. 70 * Otherwise skip it. All page faults will be delayed for this umem. */ 71 if (item->odp_data->mn_counters_active) { 72 /* 73 * This sequence increase will notify the QP page fault that 74 * the page that is going to be mapped in the spte could have 75 * been freed. 76 */ 77 ++item->odp_data->notifiers_seq; 78 if (--item->odp_data->notifiers_count == 0) 79 complete_all(&item->odp_data->notifier_completion); 80 } 81 mutex_unlock(&item->odp_data->umem_mutex); 82} 83 84/* Account for a new mmu notifier in an ib_ucontext. */ 85static void ib_ucontext_notifier_start_account(struct ib_ucontext *context) 86{ 87 atomic_inc(&context->notifier_count); 88} 89 90/* Account for a terminating mmu notifier in an ib_ucontext. 91 * 92 * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since 93 * the function takes the semaphore itself. */ 94static void ib_ucontext_notifier_end_account(struct ib_ucontext *context) 95{ 96 int zero_notifiers = atomic_dec_and_test(&context->notifier_count); 97 98 if (zero_notifiers && 99 !list_empty(&context->no_private_counters)) { 100 /* No currently running mmu notifiers. Now is the chance to 101 * add private accounting to all previously added umems. */ 102 struct ib_umem_odp *odp_data, *next; 103 104 /* Prevent concurrent mmu notifiers from working on the 105 * no_private_counters list. */ 106 down_write(&context->umem_rwsem); 107 108 /* Read the notifier_count again, with the umem_rwsem 109 * semaphore taken for write. */ 110 if (!atomic_read(&context->notifier_count)) { 111 list_for_each_entry_safe(odp_data, next, 112 &context->no_private_counters, 113 no_private_counters) { 114 mutex_lock(&odp_data->umem_mutex); 115 odp_data->mn_counters_active = true; 116 list_del(&odp_data->no_private_counters); 117 complete_all(&odp_data->notifier_completion); 118 mutex_unlock(&odp_data->umem_mutex); 119 } 120 } 121 122 up_write(&context->umem_rwsem); 123 } 124} 125 126static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start, 127 u64 end, void *cookie) { 128 /* 129 * Increase the number of notifiers running, to 130 * prevent any further fault handling on this MR. 131 */ 132 ib_umem_notifier_start_account(item); 133 item->odp_data->dying = 1; 134 /* Make sure that the fact the umem is dying is out before we release 135 * all pending page faults. */ 136 smp_wmb(); 137 complete_all(&item->odp_data->notifier_completion); 138 item->context->invalidate_range(item, ib_umem_start(item), 139 ib_umem_end(item)); 140 return 0; 141} 142 143static void ib_umem_notifier_release(struct mmu_notifier *mn, 144 struct mm_struct *mm) 145{ 146 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); 147 148 if (!context->invalidate_range) 149 return; 150 151 ib_ucontext_notifier_start_account(context); 152 down_read(&context->umem_rwsem); 153 rbt_ib_umem_for_each_in_range(&context->umem_tree, 0, 154 ULLONG_MAX, 155 ib_umem_notifier_release_trampoline, 156 NULL); 157 up_read(&context->umem_rwsem); 158} 159 160static int invalidate_page_trampoline(struct ib_umem *item, u64 start, 161 u64 end, void *cookie) 162{ 163 ib_umem_notifier_start_account(item); 164 item->context->invalidate_range(item, start, start + PAGE_SIZE); 165 ib_umem_notifier_end_account(item); 166 return 0; 167} 168 169static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn, 170 struct mm_struct *mm, 171 unsigned long address) 172{ 173 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); 174 175 if (!context->invalidate_range) 176 return; 177 178 ib_ucontext_notifier_start_account(context); 179 down_read(&context->umem_rwsem); 180 rbt_ib_umem_for_each_in_range(&context->umem_tree, address, 181 address + PAGE_SIZE, 182 invalidate_page_trampoline, NULL); 183 up_read(&context->umem_rwsem); 184 ib_ucontext_notifier_end_account(context); 185} 186 187static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start, 188 u64 end, void *cookie) 189{ 190 ib_umem_notifier_start_account(item); 191 item->context->invalidate_range(item, start, end); 192 return 0; 193} 194 195static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn, 196 struct mm_struct *mm, 197 unsigned long start, 198 unsigned long end) 199{ 200 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); 201 202 if (!context->invalidate_range) 203 return; 204 205 ib_ucontext_notifier_start_account(context); 206 down_read(&context->umem_rwsem); 207 rbt_ib_umem_for_each_in_range(&context->umem_tree, start, 208 end, 209 invalidate_range_start_trampoline, NULL); 210 up_read(&context->umem_rwsem); 211} 212 213static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start, 214 u64 end, void *cookie) 215{ 216 ib_umem_notifier_end_account(item); 217 return 0; 218} 219 220static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn, 221 struct mm_struct *mm, 222 unsigned long start, 223 unsigned long end) 224{ 225 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); 226 227 if (!context->invalidate_range) 228 return; 229 230 down_read(&context->umem_rwsem); 231 rbt_ib_umem_for_each_in_range(&context->umem_tree, start, 232 end, 233 invalidate_range_end_trampoline, NULL); 234 up_read(&context->umem_rwsem); 235 ib_ucontext_notifier_end_account(context); 236} 237 238static const struct mmu_notifier_ops ib_umem_notifiers = { 239 .release = ib_umem_notifier_release, 240 .invalidate_page = ib_umem_notifier_invalidate_page, 241 .invalidate_range_start = ib_umem_notifier_invalidate_range_start, 242 .invalidate_range_end = ib_umem_notifier_invalidate_range_end, 243}; 244 245int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem) 246{ 247 int ret_val; 248 pid_t our_pid; 249 struct mm_struct *mm = get_task_mm(current); 250 251 if (!mm) 252 return -EINVAL; 253 254 /* Prevent creating ODP MRs in child processes */ 255 rcu_read_lock(); 256 our_pid = get_pid(task_pid_group_leader(current)); 257 rcu_read_unlock(); 258 put_pid(our_pid); 259 if (context->tgid != our_pid) { 260 ret_val = -EINVAL; 261 goto out_mm; 262 } 263 264 umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL); 265 if (!umem->odp_data) { 266 ret_val = -ENOMEM; 267 goto out_mm; 268 } 269 umem->odp_data->umem = umem; 270 271 mutex_init(&umem->odp_data->umem_mutex); 272 273 init_completion(&umem->odp_data->notifier_completion); 274 275 umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) * 276 sizeof(*umem->odp_data->page_list)); 277 if (!umem->odp_data->page_list) { 278 ret_val = -ENOMEM; 279 goto out_odp_data; 280 } 281 282 umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) * 283 sizeof(*umem->odp_data->dma_list)); 284 if (!umem->odp_data->dma_list) { 285 ret_val = -ENOMEM; 286 goto out_page_list; 287 } 288 289 /* 290 * When using MMU notifiers, we will get a 291 * notification before the "current" task (and MM) is 292 * destroyed. We use the umem_rwsem semaphore to synchronize. 293 */ 294 down_write(&context->umem_rwsem); 295 context->odp_mrs_count++; 296 if (likely(ib_umem_start(umem) != ib_umem_end(umem))) 297 rbt_ib_umem_insert(&umem->odp_data->interval_tree, 298 &context->umem_tree); 299 if (likely(!atomic_read(&context->notifier_count)) || 300 context->odp_mrs_count == 1) 301 umem->odp_data->mn_counters_active = true; 302 else 303 list_add(&umem->odp_data->no_private_counters, 304 &context->no_private_counters); 305 downgrade_write(&context->umem_rwsem); 306 307 if (context->odp_mrs_count == 1) { 308 /* 309 * Note that at this point, no MMU notifier is running 310 * for this context! 311 */ 312 atomic_set(&context->notifier_count, 0); 313 INIT_HLIST_NODE(&context->mn.hlist); 314 context->mn.ops = &ib_umem_notifiers; 315 /* 316 * Lock-dep detects a false positive for mmap_sem vs. 317 * umem_rwsem, due to not grasping downgrade_write correctly. 318 */ 319 ret_val = mmu_notifier_register(&context->mn, mm); 320 if (ret_val) { 321 pr_err("Failed to register mmu_notifier %d\n", ret_val); 322 ret_val = -EBUSY; 323 goto out_mutex; 324 } 325 } 326 327 up_read(&context->umem_rwsem); 328 329 /* 330 * Note that doing an mmput can cause a notifier for the relevant mm. 331 * If the notifier is called while we hold the umem_rwsem, this will 332 * cause a deadlock. Therefore, we release the reference only after we 333 * released the semaphore. 334 */ 335 mmput(mm); 336 return 0; 337 338out_mutex: 339 up_read(&context->umem_rwsem); 340 vfree(umem->odp_data->dma_list); 341out_page_list: 342 vfree(umem->odp_data->page_list); 343out_odp_data: 344 kfree(umem->odp_data); 345out_mm: 346 mmput(mm); 347 return ret_val; 348} 349 350void ib_umem_odp_release(struct ib_umem *umem) 351{ 352 struct ib_ucontext *context = umem->context; 353 354 /* 355 * Ensure that no more pages are mapped in the umem. 356 * 357 * It is the driver's responsibility to ensure, before calling us, 358 * that the hardware will not attempt to access the MR any more. 359 */ 360 ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem), 361 ib_umem_end(umem)); 362 363 down_write(&context->umem_rwsem); 364 if (likely(ib_umem_start(umem) != ib_umem_end(umem))) 365 rbt_ib_umem_remove(&umem->odp_data->interval_tree, 366 &context->umem_tree); 367 context->odp_mrs_count--; 368 if (!umem->odp_data->mn_counters_active) { 369 list_del(&umem->odp_data->no_private_counters); 370 complete_all(&umem->odp_data->notifier_completion); 371 } 372 373 /* 374 * Downgrade the lock to a read lock. This ensures that the notifiers 375 * (who lock the mutex for reading) will be able to finish, and we 376 * will be able to enventually obtain the mmu notifiers SRCU. Note 377 * that since we are doing it atomically, no other user could register 378 * and unregister while we do the check. 379 */ 380 downgrade_write(&context->umem_rwsem); 381 if (!context->odp_mrs_count) { 382 struct task_struct *owning_process = NULL; 383 struct mm_struct *owning_mm = NULL; 384 385 owning_process = get_pid_task(context->tgid, 386 PIDTYPE_PID); 387 if (owning_process == NULL) 388 /* 389 * The process is already dead, notifier were removed 390 * already. 391 */ 392 goto out; 393 394 owning_mm = get_task_mm(owning_process); 395 if (owning_mm == NULL) 396 /* 397 * The process' mm is already dead, notifier were 398 * removed already. 399 */ 400 goto out_put_task; 401 mmu_notifier_unregister(&context->mn, owning_mm); 402 403 mmput(owning_mm); 404 405out_put_task: 406 put_task_struct(owning_process); 407 } 408out: 409 up_read(&context->umem_rwsem); 410 411 vfree(umem->odp_data->dma_list); 412 vfree(umem->odp_data->page_list); 413 kfree(umem->odp_data); 414 kfree(umem); 415} 416 417/* 418 * Map for DMA and insert a single page into the on-demand paging page tables. 419 * 420 * @umem: the umem to insert the page to. 421 * @page_index: index in the umem to add the page to. 422 * @page: the page struct to map and add. 423 * @access_mask: access permissions needed for this page. 424 * @current_seq: sequence number for synchronization with invalidations. 425 * the sequence number is taken from 426 * umem->odp_data->notifiers_seq. 427 * 428 * The function returns -EFAULT if the DMA mapping operation fails. It returns 429 * -EAGAIN if a concurrent invalidation prevents us from updating the page. 430 * 431 * The page is released via put_page even if the operation failed. For 432 * on-demand pinning, the page is released whenever it isn't stored in the 433 * umem. 434 */ 435static int ib_umem_odp_map_dma_single_page( 436 struct ib_umem *umem, 437 int page_index, 438 u64 base_virt_addr, 439 struct page *page, 440 u64 access_mask, 441 unsigned long current_seq) 442{ 443 struct ib_device *dev = umem->context->device; 444 dma_addr_t dma_addr; 445 int stored_page = 0; 446 int remove_existing_mapping = 0; 447 int ret = 0; 448 449 /* 450 * Note: we avoid writing if seq is different from the initial seq, to 451 * handle case of a racing notifier. This check also allows us to bail 452 * early if we have a notifier running in parallel with us. 453 */ 454 if (ib_umem_mmu_notifier_retry(umem, current_seq)) { 455 ret = -EAGAIN; 456 goto out; 457 } 458 if (!(umem->odp_data->dma_list[page_index])) { 459 dma_addr = ib_dma_map_page(dev, 460 page, 461 0, PAGE_SIZE, 462 DMA_BIDIRECTIONAL); 463 if (ib_dma_mapping_error(dev, dma_addr)) { 464 ret = -EFAULT; 465 goto out; 466 } 467 umem->odp_data->dma_list[page_index] = dma_addr | access_mask; 468 umem->odp_data->page_list[page_index] = page; 469 stored_page = 1; 470 } else if (umem->odp_data->page_list[page_index] == page) { 471 umem->odp_data->dma_list[page_index] |= access_mask; 472 } else { 473 pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n", 474 umem->odp_data->page_list[page_index], page); 475 /* Better remove the mapping now, to prevent any further 476 * damage. */ 477 remove_existing_mapping = 1; 478 } 479 480out: 481 /* On Demand Paging - avoid pinning the page */ 482 if (umem->context->invalidate_range || !stored_page) 483 put_page(page); 484 485 if (remove_existing_mapping && umem->context->invalidate_range) { 486 invalidate_page_trampoline( 487 umem, 488 base_virt_addr + (page_index * PAGE_SIZE), 489 base_virt_addr + ((page_index+1)*PAGE_SIZE), 490 NULL); 491 ret = -EAGAIN; 492 } 493 494 return ret; 495} 496 497/** 498 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR. 499 * 500 * Pins the range of pages passed in the argument, and maps them to 501 * DMA addresses. The DMA addresses of the mapped pages is updated in 502 * umem->odp_data->dma_list. 503 * 504 * Returns the number of pages mapped in success, negative error code 505 * for failure. 506 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents 507 * the function from completing its task. 508 * 509 * @umem: the umem to map and pin 510 * @user_virt: the address from which we need to map. 511 * @bcnt: the minimal number of bytes to pin and map. The mapping might be 512 * bigger due to alignment, and may also be smaller in case of an error 513 * pinning or mapping a page. The actual pages mapped is returned in 514 * the return value. 515 * @access_mask: bit mask of the requested access permissions for the given 516 * range. 517 * @current_seq: the MMU notifiers sequance value for synchronization with 518 * invalidations. the sequance number is read from 519 * umem->odp_data->notifiers_seq before calling this function 520 */ 521int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, 522 u64 access_mask, unsigned long current_seq) 523{ 524 struct task_struct *owning_process = NULL; 525 struct mm_struct *owning_mm = NULL; 526 struct page **local_page_list = NULL; 527 u64 off; 528 int j, k, ret = 0, start_idx, npages = 0; 529 u64 base_virt_addr; 530 unsigned int flags = 0; 531 532 if (access_mask == 0) 533 return -EINVAL; 534 535 if (user_virt < ib_umem_start(umem) || 536 user_virt + bcnt > ib_umem_end(umem)) 537 return -EFAULT; 538 539 local_page_list = (struct page **)__get_free_page(GFP_KERNEL); 540 if (!local_page_list) 541 return -ENOMEM; 542 543 off = user_virt & (~PAGE_MASK); 544 user_virt = user_virt & PAGE_MASK; 545 base_virt_addr = user_virt; 546 bcnt += off; /* Charge for the first page offset as well. */ 547 548 owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID); 549 if (owning_process == NULL) { 550 ret = -EINVAL; 551 goto out_no_task; 552 } 553 554 owning_mm = get_task_mm(owning_process); 555 if (owning_mm == NULL) { 556 ret = -EINVAL; 557 goto out_put_task; 558 } 559 560 if (access_mask & ODP_WRITE_ALLOWED_BIT) 561 flags |= FOLL_WRITE; 562 563 start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT; 564 k = start_idx; 565 566 while (bcnt > 0) { 567 const size_t gup_num_pages = 568 min_t(size_t, ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE, 569 PAGE_SIZE / sizeof(struct page *)); 570 571 down_read(&owning_mm->mmap_sem); 572 /* 573 * Note: this might result in redundent page getting. We can 574 * avoid this by checking dma_list to be 0 before calling 575 * get_user_pages. However, this make the code much more 576 * complex (and doesn't gain us much performance in most use 577 * cases). 578 */ 579 npages = get_user_pages_remote(owning_process, owning_mm, 580 user_virt, gup_num_pages, 581 flags, local_page_list, NULL); 582 up_read(&owning_mm->mmap_sem); 583 584 if (npages < 0) 585 break; 586 587 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); 588 user_virt += npages << PAGE_SHIFT; 589 mutex_lock(&umem->odp_data->umem_mutex); 590 for (j = 0; j < npages; ++j) { 591 ret = ib_umem_odp_map_dma_single_page( 592 umem, k, base_virt_addr, local_page_list[j], 593 access_mask, current_seq); 594 if (ret < 0) 595 break; 596 k++; 597 } 598 mutex_unlock(&umem->odp_data->umem_mutex); 599 600 if (ret < 0) { 601 /* Release left over pages when handling errors. */ 602 for (++j; j < npages; ++j) 603 put_page(local_page_list[j]); 604 break; 605 } 606 } 607 608 if (ret >= 0) { 609 if (npages < 0 && k == start_idx) 610 ret = npages; 611 else 612 ret = k - start_idx; 613 } 614 615 mmput(owning_mm); 616out_put_task: 617 put_task_struct(owning_process); 618out_no_task: 619 free_page((unsigned long)local_page_list); 620 return ret; 621} 622EXPORT_SYMBOL(ib_umem_odp_map_dma_pages); 623 624void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, 625 u64 bound) 626{ 627 int idx; 628 u64 addr; 629 struct ib_device *dev = umem->context->device; 630 631 virt = max_t(u64, virt, ib_umem_start(umem)); 632 bound = min_t(u64, bound, ib_umem_end(umem)); 633 /* Note that during the run of this function, the 634 * notifiers_count of the MR is > 0, preventing any racing 635 * faults from completion. We might be racing with other 636 * invalidations, so we must make sure we free each page only 637 * once. */ 638 mutex_lock(&umem->odp_data->umem_mutex); 639 for (addr = virt; addr < bound; addr += (u64)umem->page_size) { 640 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; 641 if (umem->odp_data->page_list[idx]) { 642 struct page *page = umem->odp_data->page_list[idx]; 643 dma_addr_t dma = umem->odp_data->dma_list[idx]; 644 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK; 645 646 WARN_ON(!dma_addr); 647 648 ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE, 649 DMA_BIDIRECTIONAL); 650 if (dma & ODP_WRITE_ALLOWED_BIT) { 651 struct page *head_page = compound_head(page); 652 /* 653 * set_page_dirty prefers being called with 654 * the page lock. However, MMU notifiers are 655 * called sometimes with and sometimes without 656 * the lock. We rely on the umem_mutex instead 657 * to prevent other mmu notifiers from 658 * continuing and allowing the page mapping to 659 * be removed. 660 */ 661 set_page_dirty(head_page); 662 } 663 /* on demand pinning support */ 664 if (!umem->context->invalidate_range) 665 put_page(page); 666 umem->odp_data->page_list[idx] = NULL; 667 umem->odp_data->dma_list[idx] = 0; 668 } 669 } 670 mutex_unlock(&umem->odp_data->umem_mutex); 671} 672EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); 673