1/* 2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/mm.h> 35#include <linux/device.h> 36 37#include "ipath_kernel.h" 38 39static void __ipath_release_user_pages(struct page **p, size_t num_pages, 40 int dirty) 41{ 42 size_t i; 43 44 for (i = 0; i < num_pages; i++) { 45 ipath_cdbg(MM, "%lu/%lu put_page %p\n", (unsigned long) i, 46 (unsigned long) num_pages, p[i]); 47 if (dirty) 48 set_page_dirty_lock(p[i]); 49 put_page(p[i]); 50 } 51} 52 53/* call with current->mm->mmap_sem held */ 54static int __get_user_pages(unsigned long start_page, size_t num_pages, 55 struct page **p, struct vm_area_struct **vma) 56{ 57 unsigned long lock_limit; 58 size_t got; 59 int ret; 60 61 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> 62 PAGE_SHIFT; 63 64 if (num_pages > lock_limit) { 65 ret = -ENOMEM; 66 goto bail; 67 } 68 69 ipath_cdbg(VERBOSE, "pin %lx pages from vaddr %lx\n", 70 (unsigned long) num_pages, start_page); 71 72 for (got = 0; got < num_pages; got += ret) { 73 ret = get_user_pages(current, current->mm, 74 start_page + got * PAGE_SIZE, 75 num_pages - got, 1, 1, 76 p + got, vma); 77 if (ret < 0) 78 goto bail_release; 79 } 80 81 current->mm->locked_vm += num_pages; 82 83 ret = 0; 84 goto bail; 85 86bail_release: 87 __ipath_release_user_pages(p, got, 0); 88bail: 89 return ret; 90} 91 92dma_addr_t ipath_map_page(struct pci_dev *hwdev, struct page *page, 93 unsigned long offset, size_t size, int direction) 94{ 95 dma_addr_t phys; 96 97 phys = pci_map_page(hwdev, page, offset, size, direction); 98 99 if (phys == 0) { 100 pci_unmap_page(hwdev, phys, size, direction); 101 phys = pci_map_page(hwdev, page, offset, size, direction); 102 } 103 104 return phys; 105} 106 107/** 108 * ipath_map_single - a safety wrapper around pci_map_single() 109 * 110 * Same idea as ipath_map_page(). 111 */ 112dma_addr_t ipath_map_single(struct pci_dev *hwdev, void *ptr, size_t size, 113 int direction) 114{ 115 dma_addr_t phys; 116 117 phys = pci_map_single(hwdev, ptr, size, direction); 118 119 if (phys == 0) { 120 pci_unmap_single(hwdev, phys, size, direction); 121 phys = pci_map_single(hwdev, ptr, size, direction); 122 } 123 124 return phys; 125} 126 127/** 128 * ipath_get_user_pages - lock user pages into memory 129 * @start_page: the start page 130 * @num_pages: the number of pages 131 * @p: the output page structures 132 * 133 * This function takes a given start page (page aligned user virtual 134 * address) and pins it and the following specified number of pages. For 135 * now, num_pages is always 1, but that will probably change at some point 136 * (because caller is doing expected sends on a single virtually contiguous 137 * buffer, so we can do all pages at once). 138 */ 139int ipath_get_user_pages(unsigned long start_page, size_t num_pages, 140 struct page **p) 141{ 142 int ret; 143 144 down_write(¤t->mm->mmap_sem); 145 146 ret = __get_user_pages(start_page, num_pages, p, NULL); 147 148 up_write(¤t->mm->mmap_sem); 149 150 return ret; 151} 152 153/** 154 * ipath_get_user_pages_nocopy - lock a single page for I/O and mark shared 155 * @start_page: the page to lock 156 * @p: the output page structure 157 * 158 * This is similar to ipath_get_user_pages, but it's always one page, and we 159 * mark the page as locked for I/O, and shared. This is used for the user 160 * process page that contains the destination address for the rcvhdrq tail 161 * update, so we need to have the vma. If we don't do this, the page can be 162 * taken away from us on fork, even if the child never touches it, and then 163 * the user process never sees the tail register updates. 164 */ 165int ipath_get_user_pages_nocopy(unsigned long page, struct page **p) 166{ 167 struct vm_area_struct *vma; 168 int ret; 169 170 down_write(¤t->mm->mmap_sem); 171 172 ret = __get_user_pages(page, 1, p, &vma); 173 174 up_write(¤t->mm->mmap_sem); 175 176 return ret; 177} 178 179void ipath_release_user_pages(struct page **p, size_t num_pages) 180{ 181 down_write(¤t->mm->mmap_sem); 182 183 __ipath_release_user_pages(p, num_pages, 1); 184 185 current->mm->locked_vm -= num_pages; 186 187 up_write(¤t->mm->mmap_sem); 188} 189 190struct ipath_user_pages_work { 191 struct work_struct work; 192 struct mm_struct *mm; 193 unsigned long num_pages; 194}; 195 196static void user_pages_account(struct work_struct *_work) 197{ 198 struct ipath_user_pages_work *work = 199 container_of(_work, struct ipath_user_pages_work, work); 200 201 down_write(&work->mm->mmap_sem); 202 work->mm->locked_vm -= work->num_pages; 203 up_write(&work->mm->mmap_sem); 204 mmput(work->mm); 205 kfree(work); 206} 207 208void ipath_release_user_pages_on_close(struct page **p, size_t num_pages) 209{ 210 struct ipath_user_pages_work *work; 211 struct mm_struct *mm; 212 213 __ipath_release_user_pages(p, num_pages, 1); 214 215 mm = get_task_mm(current); 216 if (!mm) 217 goto bail; 218 219 work = kmalloc(sizeof(*work), GFP_KERNEL); 220 if (!work) 221 goto bail_mm; 222 223 goto bail; 224 225 INIT_WORK(&work->work, user_pages_account); 226 work->mm = mm; 227 work->num_pages = num_pages; 228 229bail_mm: 230 mmput(mm); 231bail: 232 return; 233} 234