1331772Shselasky/*- 2331772Shselasky * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3331772Shselasky * 4319974Shselasky * Copyright (c) 2014 Mellanox Technologies. All rights reserved. 5319974Shselasky * 6319974Shselasky * This software is available to you under a choice of one of two 7319974Shselasky * licenses. You may choose to be licensed under the terms of the GNU 8319974Shselasky * General Public License (GPL) Version 2, available from the file 9319974Shselasky * COPYING in the main directory of this source tree, or the 10319974Shselasky * OpenIB.org BSD license below: 11319974Shselasky * 12319974Shselasky * Redistribution and use in source and binary forms, with or 13319974Shselasky * without modification, are permitted provided that the following 14319974Shselasky * conditions are met: 15319974Shselasky * 16319974Shselasky * - Redistributions of source code must retain the above 17319974Shselasky * copyright notice, this list of conditions and the following 18319974Shselasky * disclaimer. 19319974Shselasky * 20319974Shselasky * - Redistributions in binary form must reproduce the above 21319974Shselasky * copyright notice, this list of conditions and the following 22319974Shselasky * disclaimer in the documentation and/or other materials 23319974Shselasky * provided with the distribution. 24319974Shselasky * 25319974Shselasky * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26319974Shselasky * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27319974Shselasky * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28319974Shselasky * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29319974Shselasky * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30319974Shselasky * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31319974Shselasky * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32319974Shselasky * SOFTWARE. 33319974Shselasky */ 34319974Shselasky 35337096Shselasky#include <sys/cdefs.h> 36337096Shselasky__FBSDID("$FreeBSD: stable/11/sys/ofed/drivers/infiniband/core/ib_umem_odp.c 337096 2018-08-02 08:33:51Z hselasky $"); 37337096Shselasky 38319974Shselasky#include <linux/types.h> 39319974Shselasky#include <linux/sched.h> 40319974Shselasky#include <linux/slab.h> 41319974Shselasky#include <linux/vmalloc.h> 42319974Shselasky 43319974Shselasky#include <rdma/ib_verbs.h> 44319974Shselasky#include <rdma/ib_umem.h> 45319974Shselasky#include <rdma/ib_umem_odp.h> 46319974Shselasky 47319974Shselaskystatic void ib_umem_notifier_start_account(struct ib_umem *item) 48319974Shselasky{ 49319974Shselasky mutex_lock(&item->odp_data->umem_mutex); 50319974Shselasky 51319974Shselasky /* Only update private counters for this umem if it has them. 52319974Shselasky * Otherwise skip it. All page faults will be delayed for this umem. */ 53319974Shselasky if (item->odp_data->mn_counters_active) { 54319974Shselasky int notifiers_count = item->odp_data->notifiers_count++; 55319974Shselasky 56319974Shselasky if (notifiers_count == 0) 57319974Shselasky /* Initialize the completion object for waiting on 58319974Shselasky * notifiers. Since notifier_count is zero, no one 59319974Shselasky * should be waiting right now. */ 60319974Shselasky reinit_completion(&item->odp_data->notifier_completion); 61319974Shselasky } 62319974Shselasky mutex_unlock(&item->odp_data->umem_mutex); 63319974Shselasky} 64319974Shselasky 65319974Shselaskystatic void ib_umem_notifier_end_account(struct ib_umem *item) 66319974Shselasky{ 67319974Shselasky mutex_lock(&item->odp_data->umem_mutex); 68319974Shselasky 69319974Shselasky /* Only update private counters for this umem if it has them. 70319974Shselasky * Otherwise skip it. All page faults will be delayed for this umem. */ 71319974Shselasky if (item->odp_data->mn_counters_active) { 72319974Shselasky /* 73319974Shselasky * This sequence increase will notify the QP page fault that 74319974Shselasky * the page that is going to be mapped in the spte could have 75319974Shselasky * been freed. 76319974Shselasky */ 77319974Shselasky ++item->odp_data->notifiers_seq; 78319974Shselasky if (--item->odp_data->notifiers_count == 0) 79319974Shselasky complete_all(&item->odp_data->notifier_completion); 80319974Shselasky } 81319974Shselasky mutex_unlock(&item->odp_data->umem_mutex); 82319974Shselasky} 83319974Shselasky 84319974Shselasky/* Account for a new mmu notifier in an ib_ucontext. */ 85319974Shselaskystatic void ib_ucontext_notifier_start_account(struct ib_ucontext *context) 86319974Shselasky{ 87319974Shselasky atomic_inc(&context->notifier_count); 88319974Shselasky} 89319974Shselasky 90319974Shselasky/* Account for a terminating mmu notifier in an ib_ucontext. 91319974Shselasky * 92319974Shselasky * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since 93319974Shselasky * the function takes the semaphore itself. */ 94319974Shselaskystatic void ib_ucontext_notifier_end_account(struct ib_ucontext *context) 95319974Shselasky{ 96319974Shselasky int zero_notifiers = atomic_dec_and_test(&context->notifier_count); 97319974Shselasky 98319974Shselasky if (zero_notifiers && 99319974Shselasky !list_empty(&context->no_private_counters)) { 100319974Shselasky /* No currently running mmu notifiers. Now is the chance to 101319974Shselasky * add private accounting to all previously added umems. */ 102319974Shselasky struct ib_umem_odp *odp_data, *next; 103319974Shselasky 104319974Shselasky /* Prevent concurrent mmu notifiers from working on the 105319974Shselasky * no_private_counters list. */ 106319974Shselasky down_write(&context->umem_rwsem); 107319974Shselasky 108319974Shselasky /* Read the notifier_count again, with the umem_rwsem 109319974Shselasky * semaphore taken for write. */ 110319974Shselasky if (!atomic_read(&context->notifier_count)) { 111319974Shselasky list_for_each_entry_safe(odp_data, next, 112319974Shselasky &context->no_private_counters, 113319974Shselasky no_private_counters) { 114319974Shselasky mutex_lock(&odp_data->umem_mutex); 115319974Shselasky odp_data->mn_counters_active = true; 116319974Shselasky list_del(&odp_data->no_private_counters); 117319974Shselasky complete_all(&odp_data->notifier_completion); 118319974Shselasky mutex_unlock(&odp_data->umem_mutex); 119319974Shselasky } 120319974Shselasky } 121319974Shselasky 122319974Shselasky up_write(&context->umem_rwsem); 123319974Shselasky } 124319974Shselasky} 125319974Shselasky 126319974Shselaskystatic int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start, 127319974Shselasky u64 end, void *cookie) { 128319974Shselasky /* 129319974Shselasky * Increase the number of notifiers running, to 130319974Shselasky * prevent any further fault handling on this MR. 131319974Shselasky */ 132319974Shselasky ib_umem_notifier_start_account(item); 133319974Shselasky item->odp_data->dying = 1; 134319974Shselasky /* Make sure that the fact the umem is dying is out before we release 135319974Shselasky * all pending page faults. */ 136319974Shselasky smp_wmb(); 137319974Shselasky complete_all(&item->odp_data->notifier_completion); 138319974Shselasky item->context->invalidate_range(item, ib_umem_start(item), 139319974Shselasky ib_umem_end(item)); 140319974Shselasky return 0; 141319974Shselasky} 142319974Shselasky 143319974Shselaskystatic void ib_umem_notifier_release(struct mmu_notifier *mn, 144319974Shselasky struct mm_struct *mm) 145319974Shselasky{ 146319974Shselasky struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); 147319974Shselasky 148319974Shselasky if (!context->invalidate_range) 149319974Shselasky return; 150319974Shselasky 151319974Shselasky ib_ucontext_notifier_start_account(context); 152319974Shselasky down_read(&context->umem_rwsem); 153319974Shselasky rbt_ib_umem_for_each_in_range(&context->umem_tree, 0, 154319974Shselasky ULLONG_MAX, 155319974Shselasky ib_umem_notifier_release_trampoline, 156319974Shselasky NULL); 157319974Shselasky up_read(&context->umem_rwsem); 158319974Shselasky} 159319974Shselasky 160319974Shselaskystatic int invalidate_page_trampoline(struct ib_umem *item, u64 start, 161319974Shselasky u64 end, void *cookie) 162319974Shselasky{ 163319974Shselasky ib_umem_notifier_start_account(item); 164319974Shselasky item->context->invalidate_range(item, start, start + PAGE_SIZE); 165319974Shselasky ib_umem_notifier_end_account(item); 166319974Shselasky return 0; 167319974Shselasky} 168319974Shselasky 169319974Shselaskystatic void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn, 170319974Shselasky struct mm_struct *mm, 171319974Shselasky unsigned long address) 172319974Shselasky{ 173319974Shselasky struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); 174319974Shselasky 175319974Shselasky if (!context->invalidate_range) 176319974Shselasky return; 177319974Shselasky 178319974Shselasky ib_ucontext_notifier_start_account(context); 179319974Shselasky down_read(&context->umem_rwsem); 180319974Shselasky rbt_ib_umem_for_each_in_range(&context->umem_tree, address, 181319974Shselasky address + PAGE_SIZE, 182319974Shselasky invalidate_page_trampoline, NULL); 183319974Shselasky up_read(&context->umem_rwsem); 184319974Shselasky ib_ucontext_notifier_end_account(context); 185319974Shselasky} 186319974Shselasky 187319974Shselaskystatic int invalidate_range_start_trampoline(struct ib_umem *item, u64 start, 188319974Shselasky u64 end, void *cookie) 189319974Shselasky{ 190319974Shselasky ib_umem_notifier_start_account(item); 191319974Shselasky item->context->invalidate_range(item, start, end); 192319974Shselasky return 0; 193319974Shselasky} 194319974Shselasky 195319974Shselaskystatic void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn, 196319974Shselasky struct mm_struct *mm, 197319974Shselasky unsigned long start, 198319974Shselasky unsigned long end) 199319974Shselasky{ 200319974Shselasky struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); 201319974Shselasky 202319974Shselasky if (!context->invalidate_range) 203319974Shselasky return; 204319974Shselasky 205319974Shselasky ib_ucontext_notifier_start_account(context); 206319974Shselasky down_read(&context->umem_rwsem); 207319974Shselasky rbt_ib_umem_for_each_in_range(&context->umem_tree, start, 208319974Shselasky end, 209319974Shselasky invalidate_range_start_trampoline, NULL); 210319974Shselasky up_read(&context->umem_rwsem); 211319974Shselasky} 212319974Shselasky 213319974Shselaskystatic int invalidate_range_end_trampoline(struct ib_umem *item, u64 start, 214319974Shselasky u64 end, void *cookie) 215319974Shselasky{ 216319974Shselasky ib_umem_notifier_end_account(item); 217319974Shselasky return 0; 218319974Shselasky} 219319974Shselasky 220319974Shselaskystatic void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn, 221319974Shselasky struct mm_struct *mm, 222319974Shselasky unsigned long start, 223319974Shselasky unsigned long end) 224319974Shselasky{ 225319974Shselasky struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); 226319974Shselasky 227319974Shselasky if (!context->invalidate_range) 228319974Shselasky return; 229319974Shselasky 230319974Shselasky down_read(&context->umem_rwsem); 231319974Shselasky rbt_ib_umem_for_each_in_range(&context->umem_tree, start, 232319974Shselasky end, 233319974Shselasky invalidate_range_end_trampoline, NULL); 234319974Shselasky up_read(&context->umem_rwsem); 235319974Shselasky ib_ucontext_notifier_end_account(context); 236319974Shselasky} 237319974Shselasky 238319974Shselaskystatic const struct mmu_notifier_ops ib_umem_notifiers = { 239319974Shselasky .release = ib_umem_notifier_release, 240319974Shselasky .invalidate_page = ib_umem_notifier_invalidate_page, 241319974Shselasky .invalidate_range_start = ib_umem_notifier_invalidate_range_start, 242319974Shselasky .invalidate_range_end = ib_umem_notifier_invalidate_range_end, 243319974Shselasky}; 244319974Shselasky 245319974Shselaskyint ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem) 246319974Shselasky{ 247319974Shselasky int ret_val; 248319974Shselasky pid_t our_pid; 249319974Shselasky struct mm_struct *mm = get_task_mm(current); 250319974Shselasky 251319974Shselasky if (!mm) 252319974Shselasky return -EINVAL; 253319974Shselasky 254319974Shselasky /* Prevent creating ODP MRs in child processes */ 255319974Shselasky rcu_read_lock(); 256319974Shselasky our_pid = get_pid(task_pid_group_leader(current)); 257319974Shselasky rcu_read_unlock(); 258319974Shselasky put_pid(our_pid); 259319974Shselasky if (context->tgid != our_pid) { 260319974Shselasky ret_val = -EINVAL; 261319974Shselasky goto out_mm; 262319974Shselasky } 263319974Shselasky 264319974Shselasky umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL); 265319974Shselasky if (!umem->odp_data) { 266319974Shselasky ret_val = -ENOMEM; 267319974Shselasky goto out_mm; 268319974Shselasky } 269319974Shselasky umem->odp_data->umem = umem; 270319974Shselasky 271319974Shselasky mutex_init(&umem->odp_data->umem_mutex); 272319974Shselasky 273319974Shselasky init_completion(&umem->odp_data->notifier_completion); 274319974Shselasky 275319974Shselasky umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) * 276319974Shselasky sizeof(*umem->odp_data->page_list)); 277319974Shselasky if (!umem->odp_data->page_list) { 278319974Shselasky ret_val = -ENOMEM; 279319974Shselasky goto out_odp_data; 280319974Shselasky } 281319974Shselasky 282319974Shselasky umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) * 283319974Shselasky sizeof(*umem->odp_data->dma_list)); 284319974Shselasky if (!umem->odp_data->dma_list) { 285319974Shselasky ret_val = -ENOMEM; 286319974Shselasky goto out_page_list; 287319974Shselasky } 288319974Shselasky 289319974Shselasky /* 290319974Shselasky * When using MMU notifiers, we will get a 291319974Shselasky * notification before the "current" task (and MM) is 292319974Shselasky * destroyed. We use the umem_rwsem semaphore to synchronize. 293319974Shselasky */ 294319974Shselasky down_write(&context->umem_rwsem); 295319974Shselasky context->odp_mrs_count++; 296319974Shselasky if (likely(ib_umem_start(umem) != ib_umem_end(umem))) 297319974Shselasky rbt_ib_umem_insert(&umem->odp_data->interval_tree, 298319974Shselasky &context->umem_tree); 299319974Shselasky if (likely(!atomic_read(&context->notifier_count)) || 300319974Shselasky context->odp_mrs_count == 1) 301319974Shselasky umem->odp_data->mn_counters_active = true; 302319974Shselasky else 303319974Shselasky list_add(&umem->odp_data->no_private_counters, 304319974Shselasky &context->no_private_counters); 305319974Shselasky downgrade_write(&context->umem_rwsem); 306319974Shselasky 307319974Shselasky if (context->odp_mrs_count == 1) { 308319974Shselasky /* 309319974Shselasky * Note that at this point, no MMU notifier is running 310319974Shselasky * for this context! 311319974Shselasky */ 312319974Shselasky atomic_set(&context->notifier_count, 0); 313319974Shselasky INIT_HLIST_NODE(&context->mn.hlist); 314319974Shselasky context->mn.ops = &ib_umem_notifiers; 315319974Shselasky /* 316319974Shselasky * Lock-dep detects a false positive for mmap_sem vs. 317319974Shselasky * umem_rwsem, due to not grasping downgrade_write correctly. 318319974Shselasky */ 319319974Shselasky ret_val = mmu_notifier_register(&context->mn, mm); 320319974Shselasky if (ret_val) { 321319974Shselasky pr_err("Failed to register mmu_notifier %d\n", ret_val); 322319974Shselasky ret_val = -EBUSY; 323319974Shselasky goto out_mutex; 324319974Shselasky } 325319974Shselasky } 326319974Shselasky 327319974Shselasky up_read(&context->umem_rwsem); 328319974Shselasky 329319974Shselasky /* 330319974Shselasky * Note that doing an mmput can cause a notifier for the relevant mm. 331319974Shselasky * If the notifier is called while we hold the umem_rwsem, this will 332319974Shselasky * cause a deadlock. Therefore, we release the reference only after we 333319974Shselasky * released the semaphore. 334319974Shselasky */ 335319974Shselasky mmput(mm); 336319974Shselasky return 0; 337319974Shselasky 338319974Shselaskyout_mutex: 339319974Shselasky up_read(&context->umem_rwsem); 340319974Shselasky vfree(umem->odp_data->dma_list); 341319974Shselaskyout_page_list: 342319974Shselasky vfree(umem->odp_data->page_list); 343319974Shselaskyout_odp_data: 344319974Shselasky kfree(umem->odp_data); 345319974Shselaskyout_mm: 346319974Shselasky mmput(mm); 347319974Shselasky return ret_val; 348319974Shselasky} 349319974Shselasky 350319974Shselaskyvoid ib_umem_odp_release(struct ib_umem *umem) 351319974Shselasky{ 352319974Shselasky struct ib_ucontext *context = umem->context; 353319974Shselasky 354319974Shselasky /* 355319974Shselasky * Ensure that no more pages are mapped in the umem. 356319974Shselasky * 357319974Shselasky * It is the driver's responsibility to ensure, before calling us, 358319974Shselasky * that the hardware will not attempt to access the MR any more. 359319974Shselasky */ 360319974Shselasky ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem), 361319974Shselasky ib_umem_end(umem)); 362319974Shselasky 363319974Shselasky down_write(&context->umem_rwsem); 364319974Shselasky if (likely(ib_umem_start(umem) != ib_umem_end(umem))) 365319974Shselasky rbt_ib_umem_remove(&umem->odp_data->interval_tree, 366319974Shselasky &context->umem_tree); 367319974Shselasky context->odp_mrs_count--; 368319974Shselasky if (!umem->odp_data->mn_counters_active) { 369319974Shselasky list_del(&umem->odp_data->no_private_counters); 370319974Shselasky complete_all(&umem->odp_data->notifier_completion); 371319974Shselasky } 372319974Shselasky 373319974Shselasky /* 374319974Shselasky * Downgrade the lock to a read lock. This ensures that the notifiers 375319974Shselasky * (who lock the mutex for reading) will be able to finish, and we 376319974Shselasky * will be able to enventually obtain the mmu notifiers SRCU. Note 377319974Shselasky * that since we are doing it atomically, no other user could register 378319974Shselasky * and unregister while we do the check. 379319974Shselasky */ 380319974Shselasky downgrade_write(&context->umem_rwsem); 381319974Shselasky if (!context->odp_mrs_count) { 382319974Shselasky struct task_struct *owning_process = NULL; 383319974Shselasky struct mm_struct *owning_mm = NULL; 384319974Shselasky 385319974Shselasky owning_process = get_pid_task(context->tgid, 386319974Shselasky PIDTYPE_PID); 387319974Shselasky if (owning_process == NULL) 388319974Shselasky /* 389319974Shselasky * The process is already dead, notifier were removed 390319974Shselasky * already. 391319974Shselasky */ 392319974Shselasky goto out; 393319974Shselasky 394319974Shselasky owning_mm = get_task_mm(owning_process); 395319974Shselasky if (owning_mm == NULL) 396319974Shselasky /* 397319974Shselasky * The process' mm is already dead, notifier were 398319974Shselasky * removed already. 399319974Shselasky */ 400319974Shselasky goto out_put_task; 401319974Shselasky mmu_notifier_unregister(&context->mn, owning_mm); 402319974Shselasky 403319974Shselasky mmput(owning_mm); 404319974Shselasky 405319974Shselaskyout_put_task: 406319974Shselasky put_task_struct(owning_process); 407319974Shselasky } 408319974Shselaskyout: 409319974Shselasky up_read(&context->umem_rwsem); 410319974Shselasky 411319974Shselasky vfree(umem->odp_data->dma_list); 412319974Shselasky vfree(umem->odp_data->page_list); 413319974Shselasky kfree(umem->odp_data); 414319974Shselasky kfree(umem); 415319974Shselasky} 416319974Shselasky 417319974Shselasky/* 418319974Shselasky * Map for DMA and insert a single page into the on-demand paging page tables. 419319974Shselasky * 420319974Shselasky * @umem: the umem to insert the page to. 421319974Shselasky * @page_index: index in the umem to add the page to. 422319974Shselasky * @page: the page struct to map and add. 423319974Shselasky * @access_mask: access permissions needed for this page. 424319974Shselasky * @current_seq: sequence number for synchronization with invalidations. 425319974Shselasky * the sequence number is taken from 426319974Shselasky * umem->odp_data->notifiers_seq. 427319974Shselasky * 428319974Shselasky * The function returns -EFAULT if the DMA mapping operation fails. It returns 429319974Shselasky * -EAGAIN if a concurrent invalidation prevents us from updating the page. 430319974Shselasky * 431319974Shselasky * The page is released via put_page even if the operation failed. For 432319974Shselasky * on-demand pinning, the page is released whenever it isn't stored in the 433319974Shselasky * umem. 434319974Shselasky */ 435319974Shselaskystatic int ib_umem_odp_map_dma_single_page( 436319974Shselasky struct ib_umem *umem, 437319974Shselasky int page_index, 438319974Shselasky u64 base_virt_addr, 439319974Shselasky struct page *page, 440319974Shselasky u64 access_mask, 441319974Shselasky unsigned long current_seq) 442319974Shselasky{ 443319974Shselasky struct ib_device *dev = umem->context->device; 444319974Shselasky dma_addr_t dma_addr; 445319974Shselasky int stored_page = 0; 446319974Shselasky int remove_existing_mapping = 0; 447319974Shselasky int ret = 0; 448319974Shselasky 449319974Shselasky /* 450319974Shselasky * Note: we avoid writing if seq is different from the initial seq, to 451319974Shselasky * handle case of a racing notifier. This check also allows us to bail 452319974Shselasky * early if we have a notifier running in parallel with us. 453319974Shselasky */ 454319974Shselasky if (ib_umem_mmu_notifier_retry(umem, current_seq)) { 455319974Shselasky ret = -EAGAIN; 456319974Shselasky goto out; 457319974Shselasky } 458319974Shselasky if (!(umem->odp_data->dma_list[page_index])) { 459319974Shselasky dma_addr = ib_dma_map_page(dev, 460319974Shselasky page, 461319974Shselasky 0, PAGE_SIZE, 462319974Shselasky DMA_BIDIRECTIONAL); 463319974Shselasky if (ib_dma_mapping_error(dev, dma_addr)) { 464319974Shselasky ret = -EFAULT; 465319974Shselasky goto out; 466319974Shselasky } 467319974Shselasky umem->odp_data->dma_list[page_index] = dma_addr | access_mask; 468319974Shselasky umem->odp_data->page_list[page_index] = page; 469319974Shselasky stored_page = 1; 470319974Shselasky } else if (umem->odp_data->page_list[page_index] == page) { 471319974Shselasky umem->odp_data->dma_list[page_index] |= access_mask; 472319974Shselasky } else { 473319974Shselasky pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n", 474319974Shselasky umem->odp_data->page_list[page_index], page); 475319974Shselasky /* Better remove the mapping now, to prevent any further 476319974Shselasky * damage. */ 477319974Shselasky remove_existing_mapping = 1; 478319974Shselasky } 479319974Shselasky 480319974Shselaskyout: 481319974Shselasky /* On Demand Paging - avoid pinning the page */ 482319974Shselasky if (umem->context->invalidate_range || !stored_page) 483319974Shselasky put_page(page); 484319974Shselasky 485319974Shselasky if (remove_existing_mapping && umem->context->invalidate_range) { 486319974Shselasky invalidate_page_trampoline( 487319974Shselasky umem, 488319974Shselasky base_virt_addr + (page_index * PAGE_SIZE), 489319974Shselasky base_virt_addr + ((page_index+1)*PAGE_SIZE), 490319974Shselasky NULL); 491319974Shselasky ret = -EAGAIN; 492319974Shselasky } 493319974Shselasky 494319974Shselasky return ret; 495319974Shselasky} 496319974Shselasky 497319974Shselasky/** 498319974Shselasky * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR. 499319974Shselasky * 500319974Shselasky * Pins the range of pages passed in the argument, and maps them to 501319974Shselasky * DMA addresses. The DMA addresses of the mapped pages is updated in 502319974Shselasky * umem->odp_data->dma_list. 503319974Shselasky * 504319974Shselasky * Returns the number of pages mapped in success, negative error code 505319974Shselasky * for failure. 506319974Shselasky * An -EAGAIN error code is returned when a concurrent mmu notifier prevents 507319974Shselasky * the function from completing its task. 508319974Shselasky * 509319974Shselasky * @umem: the umem to map and pin 510319974Shselasky * @user_virt: the address from which we need to map. 511319974Shselasky * @bcnt: the minimal number of bytes to pin and map. The mapping might be 512319974Shselasky * bigger due to alignment, and may also be smaller in case of an error 513319974Shselasky * pinning or mapping a page. The actual pages mapped is returned in 514319974Shselasky * the return value. 515319974Shselasky * @access_mask: bit mask of the requested access permissions for the given 516319974Shselasky * range. 517319974Shselasky * @current_seq: the MMU notifiers sequance value for synchronization with 518319974Shselasky * invalidations. the sequance number is read from 519319974Shselasky * umem->odp_data->notifiers_seq before calling this function 520319974Shselasky */ 521319974Shselaskyint ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, 522319974Shselasky u64 access_mask, unsigned long current_seq) 523319974Shselasky{ 524319974Shselasky struct task_struct *owning_process = NULL; 525319974Shselasky struct mm_struct *owning_mm = NULL; 526319974Shselasky struct page **local_page_list = NULL; 527319974Shselasky u64 off; 528319974Shselasky int j, k, ret = 0, start_idx, npages = 0; 529319974Shselasky u64 base_virt_addr; 530319974Shselasky unsigned int flags = 0; 531319974Shselasky 532319974Shselasky if (access_mask == 0) 533319974Shselasky return -EINVAL; 534319974Shselasky 535319974Shselasky if (user_virt < ib_umem_start(umem) || 536319974Shselasky user_virt + bcnt > ib_umem_end(umem)) 537319974Shselasky return -EFAULT; 538319974Shselasky 539319974Shselasky local_page_list = (struct page **)__get_free_page(GFP_KERNEL); 540319974Shselasky if (!local_page_list) 541319974Shselasky return -ENOMEM; 542319974Shselasky 543319974Shselasky off = user_virt & (~PAGE_MASK); 544319974Shselasky user_virt = user_virt & PAGE_MASK; 545319974Shselasky base_virt_addr = user_virt; 546319974Shselasky bcnt += off; /* Charge for the first page offset as well. */ 547319974Shselasky 548319974Shselasky owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID); 549319974Shselasky if (owning_process == NULL) { 550319974Shselasky ret = -EINVAL; 551319974Shselasky goto out_no_task; 552319974Shselasky } 553319974Shselasky 554319974Shselasky owning_mm = get_task_mm(owning_process); 555319974Shselasky if (owning_mm == NULL) { 556319974Shselasky ret = -EINVAL; 557319974Shselasky goto out_put_task; 558319974Shselasky } 559319974Shselasky 560319974Shselasky if (access_mask & ODP_WRITE_ALLOWED_BIT) 561319974Shselasky flags |= FOLL_WRITE; 562319974Shselasky 563319974Shselasky start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT; 564319974Shselasky k = start_idx; 565319974Shselasky 566319974Shselasky while (bcnt > 0) { 567319974Shselasky const size_t gup_num_pages = 568319974Shselasky min_t(size_t, ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE, 569319974Shselasky PAGE_SIZE / sizeof(struct page *)); 570319974Shselasky 571319974Shselasky down_read(&owning_mm->mmap_sem); 572319974Shselasky /* 573319974Shselasky * Note: this might result in redundent page getting. We can 574319974Shselasky * avoid this by checking dma_list to be 0 before calling 575319974Shselasky * get_user_pages. However, this make the code much more 576319974Shselasky * complex (and doesn't gain us much performance in most use 577319974Shselasky * cases). 578319974Shselasky */ 579319974Shselasky npages = get_user_pages_remote(owning_process, owning_mm, 580319974Shselasky user_virt, gup_num_pages, 581319974Shselasky flags, local_page_list, NULL); 582319974Shselasky up_read(&owning_mm->mmap_sem); 583319974Shselasky 584319974Shselasky if (npages < 0) 585319974Shselasky break; 586319974Shselasky 587319974Shselasky bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); 588319974Shselasky user_virt += npages << PAGE_SHIFT; 589319974Shselasky mutex_lock(&umem->odp_data->umem_mutex); 590319974Shselasky for (j = 0; j < npages; ++j) { 591319974Shselasky ret = ib_umem_odp_map_dma_single_page( 592319974Shselasky umem, k, base_virt_addr, local_page_list[j], 593319974Shselasky access_mask, current_seq); 594319974Shselasky if (ret < 0) 595319974Shselasky break; 596319974Shselasky k++; 597319974Shselasky } 598319974Shselasky mutex_unlock(&umem->odp_data->umem_mutex); 599319974Shselasky 600319974Shselasky if (ret < 0) { 601319974Shselasky /* Release left over pages when handling errors. */ 602319974Shselasky for (++j; j < npages; ++j) 603319974Shselasky put_page(local_page_list[j]); 604319974Shselasky break; 605319974Shselasky } 606319974Shselasky } 607319974Shselasky 608319974Shselasky if (ret >= 0) { 609319974Shselasky if (npages < 0 && k == start_idx) 610319974Shselasky ret = npages; 611319974Shselasky else 612319974Shselasky ret = k - start_idx; 613319974Shselasky } 614319974Shselasky 615319974Shselasky mmput(owning_mm); 616319974Shselaskyout_put_task: 617319974Shselasky put_task_struct(owning_process); 618319974Shselaskyout_no_task: 619319974Shselasky free_page((unsigned long)local_page_list); 620319974Shselasky return ret; 621319974Shselasky} 622319974ShselaskyEXPORT_SYMBOL(ib_umem_odp_map_dma_pages); 623319974Shselasky 624319974Shselaskyvoid ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, 625319974Shselasky u64 bound) 626319974Shselasky{ 627319974Shselasky int idx; 628319974Shselasky u64 addr; 629319974Shselasky struct ib_device *dev = umem->context->device; 630319974Shselasky 631319974Shselasky virt = max_t(u64, virt, ib_umem_start(umem)); 632319974Shselasky bound = min_t(u64, bound, ib_umem_end(umem)); 633319974Shselasky /* Note that during the run of this function, the 634319974Shselasky * notifiers_count of the MR is > 0, preventing any racing 635319974Shselasky * faults from completion. We might be racing with other 636319974Shselasky * invalidations, so we must make sure we free each page only 637319974Shselasky * once. */ 638319974Shselasky mutex_lock(&umem->odp_data->umem_mutex); 639319974Shselasky for (addr = virt; addr < bound; addr += (u64)umem->page_size) { 640319974Shselasky idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; 641319974Shselasky if (umem->odp_data->page_list[idx]) { 642319974Shselasky struct page *page = umem->odp_data->page_list[idx]; 643319974Shselasky dma_addr_t dma = umem->odp_data->dma_list[idx]; 644319974Shselasky dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK; 645319974Shselasky 646319974Shselasky WARN_ON(!dma_addr); 647319974Shselasky 648319974Shselasky ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE, 649319974Shselasky DMA_BIDIRECTIONAL); 650319974Shselasky if (dma & ODP_WRITE_ALLOWED_BIT) { 651319974Shselasky struct page *head_page = compound_head(page); 652319974Shselasky /* 653319974Shselasky * set_page_dirty prefers being called with 654319974Shselasky * the page lock. However, MMU notifiers are 655319974Shselasky * called sometimes with and sometimes without 656319974Shselasky * the lock. We rely on the umem_mutex instead 657319974Shselasky * to prevent other mmu notifiers from 658319974Shselasky * continuing and allowing the page mapping to 659319974Shselasky * be removed. 660319974Shselasky */ 661319974Shselasky set_page_dirty(head_page); 662319974Shselasky } 663319974Shselasky /* on demand pinning support */ 664319974Shselasky if (!umem->context->invalidate_range) 665319974Shselasky put_page(page); 666319974Shselasky umem->odp_data->page_list[idx] = NULL; 667319974Shselasky umem->odp_data->dma_list[idx] = 0; 668319974Shselasky } 669319974Shselasky } 670319974Shselasky mutex_unlock(&umem->odp_data->umem_mutex); 671319974Shselasky} 672319974ShselaskyEXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); 673