ib_umem.c revision 331769
1/* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35#define LINUXKPI_PARAM_PREFIX ibcore_ 36 37#include <linux/mm.h> 38#include <linux/dma-mapping.h> 39#include <linux/sched.h> 40#include <linux/slab.h> 41#include <linux/wait.h> 42#include <rdma/ib_umem_odp.h> 43 44#include "uverbs.h" 45 46#include <sys/priv.h> 47 48static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) 49{ 50 struct scatterlist *sg; 51 struct page *page; 52 int i; 53 54 if (umem->nmap > 0) 55 ib_dma_unmap_sg(dev, umem->sg_head.sgl, 56 umem->nmap, 57 DMA_BIDIRECTIONAL); 58 59 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { 60 61 page = sg_page(sg); 62 put_page(page); 63 } 64 65 sg_free_table(&umem->sg_head); 66 return; 67 68} 69 70/** 71 * ib_umem_get - Pin and DMA map userspace memory. 72 * 73 * If access flags indicate ODP memory, avoid pinning. Instead, stores 74 * the mm for future page fault handling in conjunction with MMU notifiers. 75 * 76 * @context: userspace context to pin memory for 77 * @addr: userspace virtual address to start at 78 * @size: length of region to pin 79 * @access: IB_ACCESS_xxx flags for memory being pinned 80 * @dmasync: flush in-flight DMA when the memory region is written 81 */ 82struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, 83 size_t size, int access, int dmasync) 84{ 85 struct ib_umem *umem; 86 struct page **page_list; 87 struct vm_area_struct **vma_list; 88 unsigned long locked; 89 unsigned long cur_base; 90 unsigned long npages; 91 int ret; 92 int i; 93 struct dma_attrs dma_attrs = { 0 }; 94 struct scatterlist *sg, *sg_list_start; 95 int need_release = 0; 96 unsigned int gup_flags = FOLL_WRITE; 97 98 if (dmasync) 99 dma_attrs.flags |= DMA_ATTR_WRITE_BARRIER; 100 101 if (!size) 102 return ERR_PTR(-EINVAL); 103 104 /* 105 * If the combination of the addr and size requested for this memory 106 * region causes an integer overflow, return error. 107 */ 108 if (((addr + size) < addr) || 109 PAGE_ALIGN(addr + size) < (addr + size)) 110 return ERR_PTR(-EINVAL); 111 112 if (priv_check(curthread, PRIV_VM_MLOCK) != 0) 113 return ERR_PTR(-EPERM); 114 115 umem = kzalloc(sizeof *umem, GFP_KERNEL); 116 if (!umem) 117 return ERR_PTR(-ENOMEM); 118 119 umem->context = context; 120 umem->length = size; 121 umem->address = addr; 122 umem->page_size = PAGE_SIZE; 123 umem->pid = get_pid(task_pid(current)); 124 /* 125 * We ask for writable memory if any of the following 126 * access flags are set. "Local write" and "remote write" 127 * obviously require write access. "Remote atomic" can do 128 * things like fetch and add, which will modify memory, and 129 * "MW bind" can change permissions by binding a window. 130 */ 131 umem->writable = !!(access & 132 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | 133 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); 134 135 if (access & IB_ACCESS_ON_DEMAND) { 136 ret = ib_umem_odp_get(context, umem); 137 if (ret) { 138 kfree(umem); 139 return ERR_PTR(ret); 140 } 141 return umem; 142 } 143 144 umem->odp_data = NULL; 145 146 page_list = (struct page **) __get_free_page(GFP_KERNEL); 147 if (!page_list) { 148 kfree(umem); 149 return ERR_PTR(-ENOMEM); 150 } 151 152 vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL); 153 154 npages = ib_umem_num_pages(umem); 155 156 down_write(¤t->mm->mmap_sem); 157 158 locked = npages + current->mm->pinned_vm; 159 160 cur_base = addr & PAGE_MASK; 161 162 if (npages == 0 || npages > UINT_MAX) { 163 ret = -EINVAL; 164 goto out; 165 } 166 167 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL); 168 if (ret) 169 goto out; 170 171 if (!umem->writable) 172 gup_flags |= FOLL_FORCE; 173 174 need_release = 1; 175 sg_list_start = umem->sg_head.sgl; 176 177 while (npages) { 178 ret = get_user_pages(cur_base, 179 min_t(unsigned long, npages, 180 PAGE_SIZE / sizeof (struct page *)), 181 gup_flags, page_list, vma_list); 182 183 if (ret < 0) 184 goto out; 185 186 umem->npages += ret; 187 cur_base += ret * PAGE_SIZE; 188 npages -= ret; 189 190 for_each_sg(sg_list_start, sg, ret, i) { 191 sg_set_page(sg, page_list[i], PAGE_SIZE, 0); 192 } 193 194 /* preparing for next loop */ 195 sg_list_start = sg; 196 } 197 198 umem->nmap = ib_dma_map_sg_attrs(context->device, 199 umem->sg_head.sgl, 200 umem->npages, 201 DMA_BIDIRECTIONAL, 202 &dma_attrs); 203 204 if (umem->nmap <= 0) { 205 ret = -ENOMEM; 206 goto out; 207 } 208 209 ret = 0; 210 211out: 212 if (ret < 0) { 213 if (need_release) 214 __ib_umem_release(context->device, umem, 0); 215 put_pid(umem->pid); 216 kfree(umem); 217 } else 218 current->mm->pinned_vm = locked; 219 220 up_write(¤t->mm->mmap_sem); 221 if (vma_list) 222 free_page((unsigned long) vma_list); 223 free_page((unsigned long) page_list); 224 225 return ret < 0 ? ERR_PTR(ret) : umem; 226} 227EXPORT_SYMBOL(ib_umem_get); 228 229static void ib_umem_account(struct work_struct *work) 230{ 231 struct ib_umem *umem = container_of(work, struct ib_umem, work); 232 233 down_write(&umem->mm->mmap_sem); 234 umem->mm->pinned_vm -= umem->diff; 235 up_write(&umem->mm->mmap_sem); 236 mmput(umem->mm); 237 kfree(umem); 238} 239 240/** 241 * ib_umem_release - release memory pinned with ib_umem_get 242 * @umem: umem struct to release 243 */ 244void ib_umem_release(struct ib_umem *umem) 245{ 246 struct ib_ucontext *context = umem->context; 247 struct mm_struct *mm; 248 struct task_struct *task; 249 unsigned long diff; 250 251 if (umem->odp_data) { 252 ib_umem_odp_release(umem); 253 return; 254 } 255 256 __ib_umem_release(umem->context->device, umem, 1); 257 258 task = get_pid_task(umem->pid, PIDTYPE_PID); 259 put_pid(umem->pid); 260 if (!task) 261 goto out; 262 mm = get_task_mm(task); 263 put_task_struct(task); 264 if (!mm) 265 goto out; 266 267 diff = ib_umem_num_pages(umem); 268 269 /* 270 * We may be called with the mm's mmap_sem already held. This 271 * can happen when a userspace munmap() is the call that drops 272 * the last reference to our file and calls our release 273 * method. If there are memory regions to destroy, we'll end 274 * up here and not be able to take the mmap_sem. In that case 275 * we defer the vm_locked accounting to the system workqueue. 276 */ 277 if (context->closing) { 278 if (!down_write_trylock(&mm->mmap_sem)) { 279 INIT_WORK(&umem->work, ib_umem_account); 280 umem->mm = mm; 281 umem->diff = diff; 282 283 queue_work(ib_wq, &umem->work); 284 return; 285 } 286 } else 287 down_write(&mm->mmap_sem); 288 289 mm->pinned_vm -= diff; 290 up_write(&mm->mmap_sem); 291 mmput(mm); 292out: 293 kfree(umem); 294} 295EXPORT_SYMBOL(ib_umem_release); 296 297int ib_umem_page_count(struct ib_umem *umem) 298{ 299 int shift; 300 int i; 301 int n; 302 struct scatterlist *sg; 303 304 if (umem->odp_data) 305 return ib_umem_num_pages(umem); 306 307 shift = ilog2(umem->page_size); 308 309 n = 0; 310 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) 311 n += sg_dma_len(sg) >> shift; 312 313 return n; 314} 315EXPORT_SYMBOL(ib_umem_page_count); 316 317/* 318 * Copy from the given ib_umem's pages to the given buffer. 319 * 320 * umem - the umem to copy from 321 * offset - offset to start copying from 322 * dst - destination buffer 323 * length - buffer length 324 * 325 * Returns 0 on success, or an error code. 326 */ 327int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, 328 size_t length) 329{ 330 size_t end = offset + length; 331 int ret; 332 333 if (offset > umem->length || length > umem->length - offset) { 334 pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n", 335 offset, umem->length, end); 336 return -EINVAL; 337 } 338 339#ifdef __linux__ 340 ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length, 341 offset + ib_umem_offset(umem)); 342#else 343 ret = 0; 344#endif 345 if (ret < 0) 346 return ret; 347 else if (ret != length) 348 return -EINVAL; 349 else 350 return 0; 351} 352EXPORT_SYMBOL(ib_umem_copy_from); 353