1/* 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
| 1/* 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
|
39 * $FreeBSD: head/sys/kern/kern_subr.c 98477 2002-06-20 07:08:43Z peter $
| 39 * $FreeBSD: head/sys/kern/kern_subr.c 98849 2002-06-26 03:37:47Z ken $
|
40 */ 41
| 40 */ 41
|
| 42#include "opt_zero.h" 43
|
42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/kernel.h> 45#include <sys/ktr.h> 46#include <sys/lock.h> 47#include <sys/mutex.h> 48#include <sys/proc.h> 49#include <sys/malloc.h> 50#include <sys/resourcevar.h> 51#include <sys/sysctl.h> 52#include <sys/vnode.h> 53 54#include <vm/vm.h> 55#include <vm/vm_page.h> 56#include <vm/vm_map.h> 57 58SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV, 59 "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)"); 60
| 44#include <sys/param.h> 45#include <sys/systm.h> 46#include <sys/kernel.h> 47#include <sys/ktr.h> 48#include <sys/lock.h> 49#include <sys/mutex.h> 50#include <sys/proc.h> 51#include <sys/malloc.h> 52#include <sys/resourcevar.h> 53#include <sys/sysctl.h> 54#include <sys/vnode.h> 55 56#include <vm/vm.h> 57#include <vm/vm_page.h> 58#include <vm/vm_map.h> 59 60SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV, 61 "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)"); 62
|
| 63#ifdef ZERO_COPY_SOCKETS 64#include <vm/vm.h> 65#include <vm/vm_param.h> 66#include <sys/lock.h> 67#include <vm/pmap.h> 68#include <vm/vm_map.h> 69#include <vm/vm_page.h> 70#include <vm/vm_object.h> 71#include <vm/vm_pager.h> 72#include <vm/vm_kern.h> 73#include <vm/vm_extern.h> 74#include <vm/swap_pager.h> 75#include <sys/mbuf.h> 76#include <machine/cpu.h> 77 78/* Declared in uipc_socket.c */ 79extern int so_zero_copy_receive; 80 81static int vm_pgmoveco(vm_map_t mapa, vm_object_t srcobj, vm_offset_t kaddr, 82 vm_offset_t uaddr); 83static int userspaceco(caddr_t cp, u_int cnt, struct uio *uio, 84 struct vm_object *obj, int disposable); 85 86static int 87vm_pgmoveco(mapa, srcobj, kaddr, uaddr) 88 vm_map_t mapa; 89 vm_object_t srcobj; 90 vm_offset_t kaddr, uaddr; 91{ 92 vm_map_t map = mapa; 93 vm_page_t kern_pg, user_pg; 94 vm_object_t uobject; 95 vm_map_entry_t entry; 96 vm_pindex_t upindex, kpindex; 97 vm_prot_t prot; 98 boolean_t wired; 99 100 /* 101 * First lookup the kernel page. 102 */ 103 kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr)); 104 105 if ((vm_map_lookup(&map, uaddr, 106 VM_PROT_READ, &entry, &uobject, 107 &upindex, &prot, &wired)) != KERN_SUCCESS) { 108 return(EFAULT); 109 } 110 if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) { 111 vm_page_sleep_busy(user_pg, 1, "vm_pgmoveco"); 112 pmap_remove(map->pmap, uaddr, uaddr+PAGE_SIZE); 113 vm_page_busy(user_pg); 114 vm_page_free(user_pg); 115 } 116 117 if (kern_pg->busy || ((kern_pg->queue - kern_pg->pc) == PQ_FREE) || 118 (kern_pg->hold_count != 0)|| (kern_pg->flags & PG_BUSY)) { 119 printf("vm_pgmoveco: pindex(%lu), busy(%d), PG_BUSY(%d), " 120 "hold(%d) paddr(0x%lx)\n", (u_long)kern_pg->pindex, 121 kern_pg->busy, (kern_pg->flags & PG_BUSY) ? 1 : 0, 122 kern_pg->hold_count, (u_long)kern_pg->phys_addr); 123 if ((kern_pg->queue - kern_pg->pc) == PQ_FREE) 124 panic("vm_pgmoveco: renaming free page"); 125 else 126 panic("vm_pgmoveco: renaming busy page"); 127 } 128 kpindex = kern_pg->pindex; 129 vm_page_busy(kern_pg); 130 vm_page_rename(kern_pg, uobject, upindex); 131 vm_page_flag_clear(kern_pg, PG_BUSY); 132 kern_pg->valid = VM_PAGE_BITS_ALL; 133 134 vm_map_lookup_done(map, entry); 135 return(KERN_SUCCESS); 136} 137#endif /* ZERO_COPY_SOCKETS */ 138
|
61int 62uiomove(cp, n, uio) 63 register caddr_t cp; 64 register int n; 65 register struct uio *uio; 66{ 67 struct thread *td = curthread; 68 register struct iovec *iov; 69 u_int cnt; 70 int error = 0; 71 int save = 0; 72 73 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, 74 ("uiomove: mode")); 75 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 76 ("uiomove proc")); 77 78 if (td) { 79 mtx_lock_spin(&sched_lock); 80 save = td->td_flags & TDF_DEADLKTREAT; 81 td->td_flags |= TDF_DEADLKTREAT; 82 mtx_unlock_spin(&sched_lock); 83 } 84 85 while (n > 0 && uio->uio_resid) { 86 iov = uio->uio_iov; 87 cnt = iov->iov_len; 88 if (cnt == 0) { 89 uio->uio_iov++; 90 uio->uio_iovcnt--; 91 continue; 92 } 93 if (cnt > n) 94 cnt = n; 95 96 switch (uio->uio_segflg) { 97 98 case UIO_USERSPACE: 99 if (ticks - PCPU_GET(switchticks) >= hogticks) 100 uio_yield(); 101 if (uio->uio_rw == UIO_READ) 102 error = copyout(cp, iov->iov_base, cnt); 103 else 104 error = copyin(iov->iov_base, cp, cnt); 105 if (error) 106 goto out; 107 break; 108 109 case UIO_SYSSPACE: 110 if (uio->uio_rw == UIO_READ) 111 bcopy((caddr_t)cp, iov->iov_base, cnt); 112 else 113 bcopy(iov->iov_base, (caddr_t)cp, cnt); 114 break; 115 case UIO_NOCOPY: 116 break; 117 } 118 iov->iov_base += cnt; 119 iov->iov_len -= cnt; 120 uio->uio_resid -= cnt; 121 uio->uio_offset += cnt; 122 cp += cnt; 123 n -= cnt; 124 } 125out: 126 if (td != curthread) printf("uiomove: IT CHANGED!"); 127 td = curthread; /* Might things have changed in copyin/copyout? */ 128 if (td) { 129 mtx_lock_spin(&sched_lock); 130 td->td_flags = (td->td_flags & ~TDF_DEADLKTREAT) | save; 131 mtx_unlock_spin(&sched_lock); 132 } 133 return (error); 134} 135
| 139int 140uiomove(cp, n, uio) 141 register caddr_t cp; 142 register int n; 143 register struct uio *uio; 144{ 145 struct thread *td = curthread; 146 register struct iovec *iov; 147 u_int cnt; 148 int error = 0; 149 int save = 0; 150 151 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, 152 ("uiomove: mode")); 153 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 154 ("uiomove proc")); 155 156 if (td) { 157 mtx_lock_spin(&sched_lock); 158 save = td->td_flags & TDF_DEADLKTREAT; 159 td->td_flags |= TDF_DEADLKTREAT; 160 mtx_unlock_spin(&sched_lock); 161 } 162 163 while (n > 0 && uio->uio_resid) { 164 iov = uio->uio_iov; 165 cnt = iov->iov_len; 166 if (cnt == 0) { 167 uio->uio_iov++; 168 uio->uio_iovcnt--; 169 continue; 170 } 171 if (cnt > n) 172 cnt = n; 173 174 switch (uio->uio_segflg) { 175 176 case UIO_USERSPACE: 177 if (ticks - PCPU_GET(switchticks) >= hogticks) 178 uio_yield(); 179 if (uio->uio_rw == UIO_READ) 180 error = copyout(cp, iov->iov_base, cnt); 181 else 182 error = copyin(iov->iov_base, cp, cnt); 183 if (error) 184 goto out; 185 break; 186 187 case UIO_SYSSPACE: 188 if (uio->uio_rw == UIO_READ) 189 bcopy((caddr_t)cp, iov->iov_base, cnt); 190 else 191 bcopy(iov->iov_base, (caddr_t)cp, cnt); 192 break; 193 case UIO_NOCOPY: 194 break; 195 } 196 iov->iov_base += cnt; 197 iov->iov_len -= cnt; 198 uio->uio_resid -= cnt; 199 uio->uio_offset += cnt; 200 cp += cnt; 201 n -= cnt; 202 } 203out: 204 if (td != curthread) printf("uiomove: IT CHANGED!"); 205 td = curthread; /* Might things have changed in copyin/copyout? */ 206 if (td) { 207 mtx_lock_spin(&sched_lock); 208 td->td_flags = (td->td_flags & ~TDF_DEADLKTREAT) | save; 209 mtx_unlock_spin(&sched_lock); 210 } 211 return (error); 212} 213
|
136#ifdef ENABLE_VFS_IOOPT
| 214#if defined(ENABLE_VFS_IOOPT) || defined(ZERO_COPY_SOCKETS)
|
137/* 138 * Experimental support for zero-copy I/O 139 */
| 215/* 216 * Experimental support for zero-copy I/O 217 */
|
| 218static int 219userspaceco(cp, cnt, uio, obj, disposable) 220 caddr_t cp; 221 u_int cnt; 222 struct uio *uio; 223 struct vm_object *obj; 224 int disposable; 225{ 226 struct iovec *iov; 227 int error; 228 229 iov = uio->uio_iov; 230 231#ifdef ZERO_COPY_SOCKETS 232 233 if (uio->uio_rw == UIO_READ) { 234 if ((so_zero_copy_receive != 0) 235 && (obj != NULL) 236 && ((cnt & PAGE_MASK) == 0) 237 && ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) 238 && ((uio->uio_offset & PAGE_MASK) == 0) 239 && ((((intptr_t) cp) & PAGE_MASK) == 0) 240 && (obj->type == OBJT_DEFAULT) 241 && (disposable != 0)) { 242 /* SOCKET: use page-trading */ 243 /* 244 * We only want to call vm_pgmoveco() on 245 * disposeable pages, since it gives the 246 * kernel page to the userland process. 247 */ 248 error = vm_pgmoveco(&curproc->p_vmspace->vm_map, 249 obj, (vm_offset_t)cp, 250 (vm_offset_t)iov->iov_base); 251 252 /* 253 * If we get an error back, attempt 254 * to use copyout() instead. The 255 * disposable page should be freed 256 * automatically if we weren't able to move 257 * it into userland. 258 */ 259 if (error != 0) 260 error = copyout(cp, iov->iov_base, cnt); 261#ifdef ENABLE_VFS_IOOPT 262 } else if ((vfs_ioopt != 0) 263 && ((cnt & PAGE_MASK) == 0) 264 && ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) 265 && ((uio->uio_offset & PAGE_MASK) == 0) 266 && ((((intptr_t) cp) & PAGE_MASK) == 0)) { 267 error = vm_uiomove(&curproc->p_vmspace->vm_map, obj, 268 uio->uio_offset, cnt, 269 (vm_offset_t) iov->iov_base, NULL); 270#endif /* ENABLE_VFS_IOOPT */ 271 } else { 272 error = copyout(cp, iov->iov_base, cnt); 273 } 274 } else { 275 error = copyin(iov->iov_base, cp, cnt); 276 } 277#else /* ZERO_COPY_SOCKETS */ 278 if (uio->uio_rw == UIO_READ) { 279#ifdef ENABLE_VFS_IOOPT 280 if ((vfs_ioopt != 0) 281 && ((cnt & PAGE_MASK) == 0) 282 && ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) 283 && ((uio->uio_offset & PAGE_MASK) == 0) 284 && ((((intptr_t) cp) & PAGE_MASK) == 0)) { 285 error = vm_uiomove(&curproc->p_vmspace->vm_map, obj, 286 uio->uio_offset, cnt, 287 (vm_offset_t) iov->iov_base, NULL); 288 } else 289#endif /* ENABLE_VFS_IOOPT */ 290 { 291 error = copyout(cp, iov->iov_base, cnt); 292 } 293 } else { 294 error = copyin(iov->iov_base, cp, cnt); 295 } 296#endif /* ZERO_COPY_SOCKETS */ 297 298 return (error); 299} 300
|
140int
| 301int
|
141uiomoveco(cp, n, uio, obj)
| 302uiomoveco(cp, n, uio, obj, disposable)
|
142 caddr_t cp; 143 int n; 144 struct uio *uio; 145 struct vm_object *obj;
| 303 caddr_t cp; 304 int n; 305 struct uio *uio; 306 struct vm_object *obj;
|
| 307 int disposable;
|
146{ 147 struct iovec *iov; 148 u_int cnt; 149 int error; 150 151 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, 152 ("uiomoveco: mode")); 153 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 154 ("uiomoveco proc")); 155 156 while (n > 0 && uio->uio_resid) { 157 iov = uio->uio_iov; 158 cnt = iov->iov_len; 159 if (cnt == 0) { 160 uio->uio_iov++; 161 uio->uio_iovcnt--; 162 continue; 163 } 164 if (cnt > n) 165 cnt = n; 166 167 switch (uio->uio_segflg) { 168 169 case UIO_USERSPACE: 170 if (ticks - PCPU_GET(switchticks) >= hogticks) 171 uio_yield();
| 308{ 309 struct iovec *iov; 310 u_int cnt; 311 int error; 312 313 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, 314 ("uiomoveco: mode")); 315 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 316 ("uiomoveco proc")); 317 318 while (n > 0 && uio->uio_resid) { 319 iov = uio->uio_iov; 320 cnt = iov->iov_len; 321 if (cnt == 0) { 322 uio->uio_iov++; 323 uio->uio_iovcnt--; 324 continue; 325 } 326 if (cnt > n) 327 cnt = n; 328 329 switch (uio->uio_segflg) { 330 331 case UIO_USERSPACE: 332 if (ticks - PCPU_GET(switchticks) >= hogticks) 333 uio_yield();
|
172 if (uio->uio_rw == UIO_READ) { 173#ifdef ENABLE_VFS_IOOPT 174 if (vfs_ioopt && ((cnt & PAGE_MASK) == 0) && 175 ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) && 176 ((uio->uio_offset & PAGE_MASK) == 0) && 177 ((((intptr_t) cp) & PAGE_MASK) == 0)) { 178 error = vm_uiomove(&curproc->p_vmspace->vm_map, obj, 179 uio->uio_offset, cnt, 180 (vm_offset_t) iov->iov_base, NULL); 181 } else 182#endif 183 { 184 error = copyout(cp, iov->iov_base, cnt); 185 } 186 } else { 187 error = copyin(iov->iov_base, cp, cnt); 188 }
| 334 335 error = userspaceco(cp, cnt, uio, obj, disposable); 336
|
189 if (error) 190 return (error); 191 break; 192 193 case UIO_SYSSPACE: 194 if (uio->uio_rw == UIO_READ) 195 bcopy((caddr_t)cp, iov->iov_base, cnt); 196 else 197 bcopy(iov->iov_base, (caddr_t)cp, cnt); 198 break; 199 case UIO_NOCOPY: 200 break; 201 } 202 iov->iov_base += cnt; 203 iov->iov_len -= cnt; 204 uio->uio_resid -= cnt; 205 uio->uio_offset += cnt; 206 cp += cnt; 207 n -= cnt; 208 } 209 return (0); 210}
| 337 if (error) 338 return (error); 339 break; 340 341 case UIO_SYSSPACE: 342 if (uio->uio_rw == UIO_READ) 343 bcopy((caddr_t)cp, iov->iov_base, cnt); 344 else 345 bcopy(iov->iov_base, (caddr_t)cp, cnt); 346 break; 347 case UIO_NOCOPY: 348 break; 349 } 350 iov->iov_base += cnt; 351 iov->iov_len -= cnt; 352 uio->uio_resid -= cnt; 353 uio->uio_offset += cnt; 354 cp += cnt; 355 n -= cnt; 356 } 357 return (0); 358}
|
| 359#endif /* ENABLE_VFS_IOOPT || ZERO_COPY_SOCKETS */
|
211
| 360
|
| 361#ifdef ENABLE_VFS_IOOPT 362
|
212/* 213 * Experimental support for zero-copy I/O 214 */ 215int 216uioread(n, uio, obj, nread) 217 int n; 218 struct uio *uio; 219 struct vm_object *obj; 220 int *nread; 221{ 222 int npagesmoved; 223 struct iovec *iov; 224 u_int cnt, tcnt; 225 int error; 226 227 *nread = 0; 228 if (vfs_ioopt < 2) 229 return 0; 230 231 error = 0; 232 233 while (n > 0 && uio->uio_resid) { 234 iov = uio->uio_iov; 235 cnt = iov->iov_len; 236 if (cnt == 0) { 237 uio->uio_iov++; 238 uio->uio_iovcnt--; 239 continue; 240 } 241 if (cnt > n) 242 cnt = n; 243 244 if ((uio->uio_segflg == UIO_USERSPACE) && 245 ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) && 246 ((uio->uio_offset & PAGE_MASK) == 0) ) { 247 248 if (cnt < PAGE_SIZE) 249 break; 250 251 cnt &= ~PAGE_MASK; 252 253 if (ticks - PCPU_GET(switchticks) >= hogticks) 254 uio_yield(); 255 error = vm_uiomove(&curproc->p_vmspace->vm_map, obj, 256 uio->uio_offset, cnt, 257 (vm_offset_t) iov->iov_base, &npagesmoved); 258 259 if (npagesmoved == 0) 260 break; 261 262 tcnt = npagesmoved * PAGE_SIZE; 263 cnt = tcnt; 264 265 if (error) 266 break; 267 268 iov->iov_base += cnt; 269 iov->iov_len -= cnt; 270 uio->uio_resid -= cnt; 271 uio->uio_offset += cnt; 272 *nread += cnt; 273 n -= cnt; 274 } else { 275 break; 276 } 277 } 278 return error; 279}
| 363/* 364 * Experimental support for zero-copy I/O 365 */ 366int 367uioread(n, uio, obj, nread) 368 int n; 369 struct uio *uio; 370 struct vm_object *obj; 371 int *nread; 372{ 373 int npagesmoved; 374 struct iovec *iov; 375 u_int cnt, tcnt; 376 int error; 377 378 *nread = 0; 379 if (vfs_ioopt < 2) 380 return 0; 381 382 error = 0; 383 384 while (n > 0 && uio->uio_resid) { 385 iov = uio->uio_iov; 386 cnt = iov->iov_len; 387 if (cnt == 0) { 388 uio->uio_iov++; 389 uio->uio_iovcnt--; 390 continue; 391 } 392 if (cnt > n) 393 cnt = n; 394 395 if ((uio->uio_segflg == UIO_USERSPACE) && 396 ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) && 397 ((uio->uio_offset & PAGE_MASK) == 0) ) { 398 399 if (cnt < PAGE_SIZE) 400 break; 401 402 cnt &= ~PAGE_MASK; 403 404 if (ticks - PCPU_GET(switchticks) >= hogticks) 405 uio_yield(); 406 error = vm_uiomove(&curproc->p_vmspace->vm_map, obj, 407 uio->uio_offset, cnt, 408 (vm_offset_t) iov->iov_base, &npagesmoved); 409 410 if (npagesmoved == 0) 411 break; 412 413 tcnt = npagesmoved * PAGE_SIZE; 414 cnt = tcnt; 415 416 if (error) 417 break; 418 419 iov->iov_base += cnt; 420 iov->iov_len -= cnt; 421 uio->uio_resid -= cnt; 422 uio->uio_offset += cnt; 423 *nread += cnt; 424 n -= cnt; 425 } else { 426 break; 427 } 428 } 429 return error; 430}
|
280#endif
| 431#endif /* ENABLE_VFS_IOOPT */
|
281 282/* 283 * Give next character to user as result of read. 284 */ 285int 286ureadc(c, uio) 287 register int c; 288 register struct uio *uio; 289{ 290 register struct iovec *iov; 291 292again: 293 if (uio->uio_iovcnt == 0 || uio->uio_resid == 0) 294 panic("ureadc"); 295 iov = uio->uio_iov; 296 if (iov->iov_len == 0) { 297 uio->uio_iovcnt--; 298 uio->uio_iov++; 299 goto again; 300 } 301 switch (uio->uio_segflg) { 302 303 case UIO_USERSPACE: 304 if (subyte(iov->iov_base, c) < 0) 305 return (EFAULT); 306 break; 307 308 case UIO_SYSSPACE: 309 *iov->iov_base = c; 310 break; 311 312 case UIO_NOCOPY: 313 break; 314 } 315 iov->iov_base++; 316 iov->iov_len--; 317 uio->uio_resid--; 318 uio->uio_offset++; 319 return (0); 320} 321 322/* 323 * General routine to allocate a hash table. 324 */ 325void * 326hashinit(elements, type, hashmask) 327 int elements; 328 struct malloc_type *type; 329 u_long *hashmask; 330{ 331 long hashsize; 332 LIST_HEAD(generic, generic) *hashtbl; 333 int i; 334 335 if (elements <= 0) 336 panic("hashinit: bad elements"); 337 for (hashsize = 1; hashsize <= elements; hashsize <<= 1) 338 continue; 339 hashsize >>= 1; 340 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK); 341 for (i = 0; i < hashsize; i++) 342 LIST_INIT(&hashtbl[i]); 343 *hashmask = hashsize - 1; 344 return (hashtbl); 345} 346 347static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039, 348 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653, 349 7159, 7673, 8191, 12281, 16381, 24571, 32749 }; 350#define NPRIMES (sizeof(primes) / sizeof(primes[0])) 351 352/* 353 * General routine to allocate a prime number sized hash table. 354 */ 355void * 356phashinit(elements, type, nentries) 357 int elements; 358 struct malloc_type *type; 359 u_long *nentries; 360{ 361 long hashsize; 362 LIST_HEAD(generic, generic) *hashtbl; 363 int i; 364 365 if (elements <= 0) 366 panic("phashinit: bad elements"); 367 for (i = 1, hashsize = primes[1]; hashsize <= elements;) { 368 i++; 369 if (i == NPRIMES) 370 break; 371 hashsize = primes[i]; 372 } 373 hashsize = primes[i - 1]; 374 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK); 375 for (i = 0; i < hashsize; i++) 376 LIST_INIT(&hashtbl[i]); 377 *nentries = hashsize; 378 return (hashtbl); 379} 380 381void 382uio_yield() 383{ 384 struct thread *td; 385 386 td = curthread; 387 mtx_lock_spin(&sched_lock); 388 DROP_GIANT(); 389 td->td_priority = td->td_ksegrp->kg_user_pri; /* XXXKSE */ 390 setrunqueue(td); 391 td->td_proc->p_stats->p_ru.ru_nivcsw++; 392 mi_switch(); 393 mtx_unlock_spin(&sched_lock); 394 PICKUP_GIANT(); 395} 396 397int 398copyinfrom(const void *src, void *dst, size_t len, int seg) 399{ 400 int error = 0; 401 402 switch (seg) { 403 case UIO_USERSPACE: 404 error = copyin(src, dst, len); 405 break; 406 case UIO_SYSSPACE: 407 bcopy(src, dst, len); 408 break; 409 default: 410 panic("copyinfrom: bad seg %d\n", seg); 411 } 412 return (error); 413} 414 415int 416copyinstrfrom(const void *src, void *dst, size_t len, size_t *copied, int seg) 417{ 418 int error = 0; 419 420 switch (seg) { 421 case UIO_USERSPACE: 422 error = copyinstr(src, dst, len, copied); 423 break; 424 case UIO_SYSSPACE: 425 error = copystr(src, dst, len, copied); 426 break; 427 default: 428 panic("copyinstrfrom: bad seg %d\n", seg); 429 } 430 return (error); 431}
| 432 433/* 434 * Give next character to user as result of read. 435 */ 436int 437ureadc(c, uio) 438 register int c; 439 register struct uio *uio; 440{ 441 register struct iovec *iov; 442 443again: 444 if (uio->uio_iovcnt == 0 || uio->uio_resid == 0) 445 panic("ureadc"); 446 iov = uio->uio_iov; 447 if (iov->iov_len == 0) { 448 uio->uio_iovcnt--; 449 uio->uio_iov++; 450 goto again; 451 } 452 switch (uio->uio_segflg) { 453 454 case UIO_USERSPACE: 455 if (subyte(iov->iov_base, c) < 0) 456 return (EFAULT); 457 break; 458 459 case UIO_SYSSPACE: 460 *iov->iov_base = c; 461 break; 462 463 case UIO_NOCOPY: 464 break; 465 } 466 iov->iov_base++; 467 iov->iov_len--; 468 uio->uio_resid--; 469 uio->uio_offset++; 470 return (0); 471} 472 473/* 474 * General routine to allocate a hash table. 475 */ 476void * 477hashinit(elements, type, hashmask) 478 int elements; 479 struct malloc_type *type; 480 u_long *hashmask; 481{ 482 long hashsize; 483 LIST_HEAD(generic, generic) *hashtbl; 484 int i; 485 486 if (elements <= 0) 487 panic("hashinit: bad elements"); 488 for (hashsize = 1; hashsize <= elements; hashsize <<= 1) 489 continue; 490 hashsize >>= 1; 491 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK); 492 for (i = 0; i < hashsize; i++) 493 LIST_INIT(&hashtbl[i]); 494 *hashmask = hashsize - 1; 495 return (hashtbl); 496} 497 498static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039, 499 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653, 500 7159, 7673, 8191, 12281, 16381, 24571, 32749 }; 501#define NPRIMES (sizeof(primes) / sizeof(primes[0])) 502 503/* 504 * General routine to allocate a prime number sized hash table. 505 */ 506void * 507phashinit(elements, type, nentries) 508 int elements; 509 struct malloc_type *type; 510 u_long *nentries; 511{ 512 long hashsize; 513 LIST_HEAD(generic, generic) *hashtbl; 514 int i; 515 516 if (elements <= 0) 517 panic("phashinit: bad elements"); 518 for (i = 1, hashsize = primes[1]; hashsize <= elements;) { 519 i++; 520 if (i == NPRIMES) 521 break; 522 hashsize = primes[i]; 523 } 524 hashsize = primes[i - 1]; 525 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK); 526 for (i = 0; i < hashsize; i++) 527 LIST_INIT(&hashtbl[i]); 528 *nentries = hashsize; 529 return (hashtbl); 530} 531 532void 533uio_yield() 534{ 535 struct thread *td; 536 537 td = curthread; 538 mtx_lock_spin(&sched_lock); 539 DROP_GIANT(); 540 td->td_priority = td->td_ksegrp->kg_user_pri; /* XXXKSE */ 541 setrunqueue(td); 542 td->td_proc->p_stats->p_ru.ru_nivcsw++; 543 mi_switch(); 544 mtx_unlock_spin(&sched_lock); 545 PICKUP_GIANT(); 546} 547 548int 549copyinfrom(const void *src, void *dst, size_t len, int seg) 550{ 551 int error = 0; 552 553 switch (seg) { 554 case UIO_USERSPACE: 555 error = copyin(src, dst, len); 556 break; 557 case UIO_SYSSPACE: 558 bcopy(src, dst, len); 559 break; 560 default: 561 panic("copyinfrom: bad seg %d\n", seg); 562 } 563 return (error); 564} 565 566int 567copyinstrfrom(const void *src, void *dst, size_t len, size_t *copied, int seg) 568{ 569 int error = 0; 570 571 switch (seg) { 572 case UIO_USERSPACE: 573 error = copyinstr(src, dst, len, copied); 574 break; 575 case UIO_SYSSPACE: 576 error = copystr(src, dst, len, copied); 577 break; 578 default: 579 panic("copyinstrfrom: bad seg %d\n", seg); 580 } 581 return (error); 582}
|