subr_uio.c revision 178272
1112758Ssam/*- 2112758Ssam * Copyright (c) 1982, 1986, 1991, 1993 3112758Ssam * The Regents of the University of California. All rights reserved. 4112758Ssam * (c) UNIX System Laboratories, Inc. 5112758Ssam * All or some portions of this file are derived from material licensed 6112758Ssam * to the University of California by American Telephone and Telegraph 7112758Ssam * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8112758Ssam * the permission of UNIX System Laboratories, Inc. 9112758Ssam * 10112758Ssam * Redistribution and use in source and binary forms, with or without 11112758Ssam * modification, are permitted provided that the following conditions 12112758Ssam * are met: 13112758Ssam * 1. Redistributions of source code must retain the above copyright 14112758Ssam * notice, this list of conditions and the following disclaimer. 15112758Ssam * 2. Redistributions in binary form must reproduce the above copyright 16112758Ssam * notice, this list of conditions and the following disclaimer in the 17112758Ssam * documentation and/or other materials provided with the distribution. 18112758Ssam * 4. Neither the name of the University nor the names of its contributors 19112758Ssam * may be used to endorse or promote products derived from this software 20112758Ssam * without specific prior written permission. 21112758Ssam * 22112758Ssam * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23112758Ssam * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24112758Ssam * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25112758Ssam * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26112758Ssam * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27112758Ssam * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28105197Ssam * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29105197Ssam * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30105197Ssam * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31105197Ssam * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32105197Ssam * SUCH DAMAGE. 33105197Ssam * 34105197Ssam * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94 35159965Sthompsa */ 36105197Ssam 37105197Ssam#include <sys/cdefs.h> 38105197Ssam__FBSDID("$FreeBSD: head/sys/kern/kern_subr.c 178272 2008-04-17 04:20:10Z jeff $"); 39105197Ssam 40105197Ssam#include "opt_zero.h" 41105197Ssam 42105197Ssam#include <sys/param.h> 43105197Ssam#include <sys/systm.h> 44105197Ssam#include <sys/kernel.h> 45105197Ssam#include <sys/ktr.h> 46105197Ssam#include <sys/limits.h> 47257176Sglebius#include <sys/lock.h> 48171497Sbz#include <sys/mutex.h> 49105197Ssam#include <sys/proc.h> 50195699Srwatson#include <sys/malloc.h> 51105197Ssam#include <sys/resourcevar.h> 52105197Ssam#include <sys/sched.h> 53105197Ssam#include <sys/sysctl.h> 54105197Ssam#include <sys/vnode.h> 55105197Ssam 56105197Ssam#include <vm/vm.h> 57105197Ssam#include <vm/vm_page.h> 58105197Ssam#include <vm/vm_map.h> 59105197Ssam#ifdef ZERO_COPY_SOCKETS 60105197Ssam#include <vm/vm_param.h> 61105197Ssam#include <vm/vm_object.h> 62105197Ssam#endif 63105197Ssam 64105197SsamSYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV, 65105197Ssam "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)"); 66105197Ssam 67105197Ssam#ifdef ZERO_COPY_SOCKETS 68105197Ssam/* Declared in uipc_socket.c */ 69105197Ssamextern int so_zero_copy_receive; 70105197Ssam 71105197Ssam/* 72105197Ssam * Identify the physical page mapped at the given kernel virtual 73105197Ssam * address. Insert this physical page into the given address space at 74105197Ssam * the given virtual address, replacing the physical page, if any, 75105197Ssam * that already exists there. 76105197Ssam */ 77105197Ssamstatic int 78105197Ssamvm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr) 79105197Ssam{ 80105197Ssam vm_map_t map = mapa; 81105197Ssam vm_page_t kern_pg, user_pg; 82105197Ssam vm_object_t uobject; 83105197Ssam vm_map_entry_t entry; 84105197Ssam vm_pindex_t upindex; 85105197Ssam vm_prot_t prot; 86105197Ssam boolean_t wired; 87194062Svanhu 88194062Svanhu KASSERT((uaddr & PAGE_MASK) == 0, 89194062Svanhu ("vm_pgmoveco: uaddr is not page aligned")); 90194062Svanhu 91181627Svanhu /* 92181627Svanhu * Herein the physical page is validated and dirtied. It is 93181627Svanhu * unwired in sf_buf_mext(). 94181627Svanhu */ 95181627Svanhu kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr)); 96105197Ssam kern_pg->valid = VM_PAGE_BITS_ALL; 97105197Ssam KASSERT(kern_pg->queue == PQ_NONE && kern_pg->wire_count == 1, 98105197Ssam ("vm_pgmoveco: kern_pg is not correctly wired")); 99105197Ssam 100105197Ssam if ((vm_map_lookup(&map, uaddr, 101105197Ssam VM_PROT_WRITE, &entry, &uobject, 102105197Ssam &upindex, &prot, &wired)) != KERN_SUCCESS) { 103105197Ssam return(EFAULT); 104105197Ssam } 105120585Ssam VM_OBJECT_LOCK(uobject); 106120585Ssamretry: 107105197Ssam if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) { 108120585Ssam if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco")) 109120585Ssam goto retry; 110105197Ssam vm_page_lock_queues(); 111105197Ssam pmap_remove_all(user_pg); 112105197Ssam vm_page_free(user_pg); 113105197Ssam } else { 114105197Ssam /* 115105197Ssam * Even if a physical page does not exist in the 116105197Ssam * object chain's first object, a physical page from a 117105197Ssam * backing object may be mapped read only. 118105197Ssam */ 119105197Ssam if (uobject->backing_object != NULL) 120105197Ssam pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE); 121105197Ssam vm_page_lock_queues(); 122105197Ssam } 123105197Ssam vm_page_insert(kern_pg, uobject, upindex); 124105197Ssam vm_page_dirty(kern_pg); 125105197Ssam vm_page_unlock_queues(); 126105197Ssam VM_OBJECT_UNLOCK(uobject); 127105197Ssam vm_map_lookup_done(map, entry); 128105197Ssam return(KERN_SUCCESS); 129105197Ssam} 130105197Ssam#endif /* ZERO_COPY_SOCKETS */ 131105197Ssam 132105197Ssamint 133105197Ssamuiomove(void *cp, int n, struct uio *uio) 134105197Ssam{ 135105197Ssam struct thread *td = curthread; 136120585Ssam struct iovec *iov; 137105197Ssam u_int cnt; 138105197Ssam int error = 0; 139105197Ssam int save = 0; 140105197Ssam 141105197Ssam KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, 142105197Ssam ("uiomove: mode")); 143105197Ssam KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 144105197Ssam ("uiomove proc")); 145105197Ssam WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 146105197Ssam "Calling uiomove()"); 147105197Ssam 148105197Ssam save = td->td_pflags & TDP_DEADLKTREAT; 149120585Ssam td->td_pflags |= TDP_DEADLKTREAT; 150105197Ssam 151105197Ssam while (n > 0 && uio->uio_resid) { 152105197Ssam iov = uio->uio_iov; 153105197Ssam cnt = iov->iov_len; 154105197Ssam if (cnt == 0) { 155105197Ssam uio->uio_iov++; 156105197Ssam uio->uio_iovcnt--; 157105197Ssam continue; 158105197Ssam } 159105197Ssam if (cnt > n) 160105197Ssam cnt = n; 161105197Ssam 162105197Ssam switch (uio->uio_segflg) { 163105197Ssam 164105197Ssam case UIO_USERSPACE: 165105197Ssam if (ticks - PCPU_GET(switchticks) >= hogticks) 166105197Ssam uio_yield(); 167105197Ssam if (uio->uio_rw == UIO_READ) 168252026Sae error = copyout(cp, iov->iov_base, cnt); 169238700Sbz else 170221129Sbz error = copyin(iov->iov_base, cp, cnt); 171221129Sbz if (error) 172221129Sbz goto out; 173221129Sbz break; 174221129Sbz 175221129Sbz case UIO_SYSSPACE: 176221129Sbz if (uio->uio_rw == UIO_READ) 177221129Sbz bcopy(cp, iov->iov_base, cnt); 178221129Sbz else 179221129Sbz bcopy(iov->iov_base, cp, cnt); 180266800Svanhu break; 181221129Sbz case UIO_NOCOPY: 182221129Sbz break; 183221129Sbz } 184221129Sbz iov->iov_base = (char *)iov->iov_base + cnt; 185221129Sbz iov->iov_len -= cnt; 186221129Sbz uio->uio_resid -= cnt; 187221129Sbz uio->uio_offset += cnt; 188221129Sbz cp = (char *)cp + cnt; 189221129Sbz n -= cnt; 190105197Ssam } 191117056Ssamout: 192105197Ssam if (save == 0) 193105197Ssam td->td_pflags &= ~TDP_DEADLKTREAT; 194105197Ssam return (error); 195105197Ssam} 196105197Ssam 197105197Ssam/* 198105197Ssam * Wrapper for uiomove() that validates the arguments against a known-good 199105197Ssam * kernel buffer. Currently, uiomove accepts a signed (n) argument, which 200105197Ssam * is almost definitely a bad thing, so we catch that here as well. We 201194062Svanhu * return a runtime failure, but it might be desirable to generate a runtime 202194062Svanhu * assertion failure instead. 203194062Svanhu */ 204194062Svanhuint 205194062Svanhuuiomove_frombuf(void *buf, int buflen, struct uio *uio) 206194062Svanhu{ 207241919Sglebius unsigned int offset, n; 208194062Svanhu 209194062Svanhu if (uio->uio_offset < 0 || uio->uio_resid < 0 || 210194062Svanhu (offset = uio->uio_offset) != uio->uio_offset) 211194062Svanhu return (EINVAL); 212194062Svanhu if (buflen <= 0 || offset >= buflen) 213194062Svanhu return (0); 214194062Svanhu if ((n = buflen - offset) > INT_MAX) 215194062Svanhu return (EINVAL); 216194062Svanhu return (uiomove((char *)buf + offset, n, uio)); 217194062Svanhu} 218194062Svanhu 219194062Svanhu#ifdef ZERO_COPY_SOCKETS 220194062Svanhu/* 221194062Svanhu * Experimental support for zero-copy I/O 222194062Svanhu */ 223194062Svanhustatic int 224194062Svanhuuserspaceco(void *cp, u_int cnt, struct uio *uio, int disposable) 225194062Svanhu{ 226194062Svanhu struct iovec *iov; 227194062Svanhu int error; 228194062Svanhu 229194062Svanhu iov = uio->uio_iov; 230194062Svanhu if (uio->uio_rw == UIO_READ) { 231194062Svanhu if ((so_zero_copy_receive != 0) 232194062Svanhu && ((cnt & PAGE_MASK) == 0) 233194062Svanhu && ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) 234194062Svanhu && ((uio->uio_offset & PAGE_MASK) == 0) 235194062Svanhu && ((((intptr_t) cp) & PAGE_MASK) == 0) 236194062Svanhu && (disposable != 0)) { 237194062Svanhu /* SOCKET: use page-trading */ 238194062Svanhu /* 239194062Svanhu * We only want to call vm_pgmoveco() on 240194062Svanhu * disposeable pages, since it gives the 241241919Sglebius * kernel page to the userland process. 242194062Svanhu */ 243194062Svanhu error = vm_pgmoveco(&curproc->p_vmspace->vm_map, 244194062Svanhu (vm_offset_t)cp, (vm_offset_t)iov->iov_base); 245194062Svanhu 246194062Svanhu /* 247194062Svanhu * If we get an error back, attempt 248194062Svanhu * to use copyout() instead. The 249105197Ssam * disposable page should be freed 250105197Ssam * automatically if we weren't able to move 251105197Ssam * it into userland. 252105197Ssam */ 253105197Ssam if (error != 0) 254105197Ssam error = copyout(cp, iov->iov_base, cnt); 255105197Ssam } else { 256105197Ssam error = copyout(cp, iov->iov_base, cnt); 257105197Ssam } 258105197Ssam } else { 259105197Ssam error = copyin(iov->iov_base, cp, cnt); 260105197Ssam } 261105197Ssam return (error); 262105197Ssam} 263105197Ssam 264105197Ssamint 265105197Ssamuiomoveco(void *cp, int n, struct uio *uio, int disposable) 266105197Ssam{ 267105197Ssam struct iovec *iov; 268105197Ssam u_int cnt; 269105197Ssam int error; 270105197Ssam 271105197Ssam KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, 272105197Ssam ("uiomoveco: mode")); 273105197Ssam KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 274105197Ssam ("uiomoveco proc")); 275252028Sae 276252028Sae while (n > 0 && uio->uio_resid) { 277252028Sae iov = uio->uio_iov; 278252028Sae cnt = iov->iov_len; 279252028Sae if (cnt == 0) { 280252028Sae uio->uio_iov++; 281252028Sae uio->uio_iovcnt--; 282252028Sae continue; 283105197Ssam } 284105197Ssam if (cnt > n) 285120585Ssam cnt = n; 286120585Ssam 287120585Ssam switch (uio->uio_segflg) { 288120585Ssam 289105197Ssam case UIO_USERSPACE: 290105197Ssam if (ticks - PCPU_GET(switchticks) >= hogticks) 291105197Ssam uio_yield(); 292105197Ssam 293105197Ssam error = userspaceco(cp, cnt, uio, disposable); 294105197Ssam 295105197Ssam if (error) 296105197Ssam return (error); 297105197Ssam break; 298105197Ssam 299105197Ssam case UIO_SYSSPACE: 300105197Ssam if (uio->uio_rw == UIO_READ) 301105197Ssam bcopy(cp, iov->iov_base, cnt); 302105197Ssam else 303105197Ssam bcopy(iov->iov_base, cp, cnt); 304105197Ssam break; 305105197Ssam case UIO_NOCOPY: 306105197Ssam break; 307105197Ssam } 308105197Ssam iov->iov_base = (char *)iov->iov_base + cnt; 309105197Ssam iov->iov_len -= cnt; 310105197Ssam uio->uio_resid -= cnt; 311105197Ssam uio->uio_offset += cnt; 312105197Ssam cp = (char *)cp + cnt; 313105197Ssam n -= cnt; 314105197Ssam } 315105197Ssam return (0); 316105197Ssam} 317105197Ssam#endif /* ZERO_COPY_SOCKETS */ 318105197Ssam 319105197Ssam/* 320105197Ssam * Give next character to user as result of read. 321105197Ssam */ 322105197Ssamint 323105197Ssamureadc(int c, struct uio *uio) 324105197Ssam{ 325105197Ssam struct iovec *iov; 326105197Ssam char *iov_base; 327105197Ssam 328105197Ssamagain: 329105197Ssam if (uio->uio_iovcnt == 0 || uio->uio_resid == 0) 330105197Ssam panic("ureadc"); 331105197Ssam iov = uio->uio_iov; 332105197Ssam if (iov->iov_len == 0) { 333105197Ssam uio->uio_iovcnt--; 334105197Ssam uio->uio_iov++; 335105197Ssam goto again; 336105197Ssam } 337105197Ssam switch (uio->uio_segflg) { 338105197Ssam 339105197Ssam case UIO_USERSPACE: 340105197Ssam if (subyte(iov->iov_base, c) < 0) 341105197Ssam return (EFAULT); 342105197Ssam break; 343105197Ssam 344105197Ssam case UIO_SYSSPACE: 345105197Ssam iov_base = iov->iov_base; 346105197Ssam *iov_base = c; 347105197Ssam iov->iov_base = iov_base; 348105197Ssam break; 349105197Ssam 350105197Ssam case UIO_NOCOPY: 351105197Ssam break; 352105197Ssam } 353105197Ssam iov->iov_base = (char *)iov->iov_base + 1; 354105197Ssam iov->iov_len--; 355105197Ssam uio->uio_resid--; 356105197Ssam uio->uio_offset++; 357105197Ssam return (0); 358105197Ssam} 359105197Ssam 360105197Ssam/* 361252026Sae * General routine to allocate a hash table with control of memory flags. 362105197Ssam */ 363105197Ssamvoid * 364105197Ssamhashinit_flags(int elements, struct malloc_type *type, u_long *hashmask, 365177175Sbz int flags) 366120585Ssam{ 367120585Ssam long hashsize; 368105197Ssam LIST_HEAD(generic, generic) *hashtbl; 369120585Ssam int i; 370105197Ssam 371177175Sbz if (elements <= 0) 372177175Sbz panic("hashinit: bad elements"); 373177175Sbz 374177175Sbz /* Exactly one of HASH_WAITOK and HASH_NOWAIT must be set. */ 375177175Sbz KASSERT((flags & HASH_WAITOK) ^ (flags & HASH_NOWAIT), 376177175Sbz ("Bad flags (0x%x) passed to hashinit_flags", flags)); 377105197Ssam 378120585Ssam for (hashsize = 1; hashsize <= elements; hashsize <<= 1) 379105197Ssam continue; 380105197Ssam hashsize >>= 1; 381105197Ssam 382105197Ssam if (flags & HASH_NOWAIT) 383105197Ssam hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), 384105197Ssam type, M_NOWAIT); 385181803Sbz else 386181803Sbz hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), 387181803Sbz type, M_WAITOK); 388120585Ssam 389120585Ssam if (hashtbl != NULL) { 390252028Sae for (i = 0; i < hashsize; i++) 391105197Ssam LIST_INIT(&hashtbl[i]); 392105197Ssam *hashmask = hashsize - 1; 393105197Ssam } 394105197Ssam return (hashtbl); 395105197Ssam} 396105197Ssam 397105197Ssam/* 398105197Ssam * Allocate and initialize a hash table with default flag: may sleep. 399105197Ssam */ 400120585Ssamvoid * 401252028Saehashinit(int elements, struct malloc_type *type, u_long *hashmask) 402105197Ssam{ 403105197Ssam 404105197Ssam return (hashinit_flags(elements, type, hashmask, HASH_WAITOK)); 405105197Ssam} 406105197Ssam 407120585Ssamvoid 408120585Ssamhashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask) 409105197Ssam{ 410105197Ssam LIST_HEAD(generic, generic) *hashtbl, *hp; 411105197Ssam 412105197Ssam hashtbl = vhashtbl; 413105197Ssam for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++) 414105197Ssam if (!LIST_EMPTY(hp)) 415105197Ssam panic("hashdestroy: hash not empty"); 416105197Ssam free(hashtbl, type); 417105197Ssam} 418105197Ssam 419105197Ssamstatic int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039, 420105197Ssam 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653, 421105197Ssam 7159, 7673, 8191, 12281, 16381, 24571, 32749 }; 422105197Ssam#define NPRIMES (sizeof(primes) / sizeof(primes[0])) 423105197Ssam 424105197Ssam/* 425105197Ssam * General routine to allocate a prime number sized hash table. 426105197Ssam */ 427119643Ssamvoid * 428105197Ssamphashinit(int elements, struct malloc_type *type, u_long *nentries) 429120585Ssam{ 430120585Ssam long hashsize; 431105197Ssam LIST_HEAD(generic, generic) *hashtbl; 432120585Ssam int i; 433105197Ssam 434105197Ssam if (elements <= 0) 435177175Sbz panic("phashinit: bad elements"); 436177175Sbz for (i = 1, hashsize = primes[1]; hashsize <= elements;) { 437177175Sbz i++; 438177175Sbz if (i == NPRIMES) 439177175Sbz break; 440105197Ssam hashsize = primes[i]; 441105197Ssam } 442159965Sthompsa hashsize = primes[i - 1]; 443159965Sthompsa hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK); 444181627Svanhu for (i = 0; i < hashsize; i++) 445181627Svanhu LIST_INIT(&hashtbl[i]); 446181627Svanhu *nentries = hashsize; 447174054Sbz return (hashtbl); 448174054Sbz} 449159965Sthompsa 450174054Sbzvoid 451159965Sthompsauio_yield(void) 452159965Sthompsa{ 453159965Sthompsa struct thread *td; 454105197Ssam 455105197Ssam td = curthread; 456105197Ssam DROP_GIANT(); 457105197Ssam thread_lock(td); 458105197Ssam sched_prio(td, td->td_user_pri); 459105197Ssam mi_switch(SW_INVOL | SWT_RELINQUISH, NULL); 460105197Ssam thread_unlock(td); 461105197Ssam PICKUP_GIANT(); 462105197Ssam} 463105197Ssam 464105197Ssamint 465105197Ssamcopyinfrom(const void * __restrict src, void * __restrict dst, size_t len, 466105197Ssam int seg) 467105197Ssam{ 468105197Ssam int error = 0; 469181803Sbz 470105197Ssam switch (seg) { 471105197Ssam case UIO_USERSPACE: 472181803Sbz error = copyin(src, dst, len); 473105197Ssam break; 474105197Ssam case UIO_SYSSPACE: 475105197Ssam bcopy(src, dst, len); 476105197Ssam break; 477105197Ssam default: 478105197Ssam panic("copyinfrom: bad seg %d\n", seg); 479105197Ssam } 480105197Ssam return (error); 481105197Ssam} 482105197Ssam 483105197Ssamint 484105197Ssamcopyinstrfrom(const void * __restrict src, void * __restrict dst, size_t len, 485105197Ssam size_t * __restrict copied, int seg) 486105197Ssam{ 487105197Ssam int error = 0; 488105197Ssam 489105197Ssam switch (seg) { 490105197Ssam case UIO_USERSPACE: 491105197Ssam error = copyinstr(src, dst, len, copied); 492105197Ssam break; 493105197Ssam case UIO_SYSSPACE: 494105197Ssam error = copystr(src, dst, len, copied); 495105197Ssam break; 496105197Ssam default: 497105197Ssam panic("copyinstrfrom: bad seg %d\n", seg); 498105197Ssam } 499105197Ssam return (error); 500105197Ssam} 501105197Ssam 502105197Ssamint 503105197Ssamcopyiniov(struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error) 504105197Ssam{ 505105197Ssam u_int iovlen; 506105197Ssam 507105197Ssam *iov = NULL; 508105197Ssam if (iovcnt > UIO_MAXIOV) 509120585Ssam return (error); 510120585Ssam iovlen = iovcnt * sizeof (struct iovec); 511105197Ssam *iov = malloc(iovlen, M_IOV, M_WAITOK); 512105197Ssam error = copyin(iovp, *iov, iovlen); 513105197Ssam if (error) { 514124765Ssam free(*iov, M_IOV); 515124765Ssam *iov = NULL; 516105197Ssam } 517124765Ssam return (error); 518124765Ssam} 519105197Ssam 520105197Ssamint 521105197Ssamcopyinuio(struct iovec *iovp, u_int iovcnt, struct uio **uiop) 522105197Ssam{ 523105197Ssam struct iovec *iov; 524105197Ssam struct uio *uio; 525105197Ssam u_int iovlen; 526105197Ssam int error, i; 527105197Ssam 528105197Ssam *uiop = NULL; 529105197Ssam if (iovcnt > UIO_MAXIOV) 530105197Ssam return (EINVAL); 531105197Ssam iovlen = iovcnt * sizeof (struct iovec); 532105197Ssam uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK); 533105197Ssam iov = (struct iovec *)(uio + 1); 534105197Ssam error = copyin(iovp, iov, iovlen); 535105197Ssam if (error) { 536105197Ssam free(uio, M_IOV); 537105197Ssam return (error); 538105197Ssam } 539105197Ssam uio->uio_iov = iov; 540105197Ssam uio->uio_iovcnt = iovcnt; 541105197Ssam uio->uio_segflg = UIO_USERSPACE; 542105197Ssam uio->uio_offset = -1; 543159965Sthompsa uio->uio_resid = 0; 544159965Sthompsa for (i = 0; i < iovcnt; i++) { 545266800Svanhu if (iov->iov_len > INT_MAX - uio->uio_resid) { 546174054Sbz free(uio, M_IOV); 547174054Sbz return (EINVAL); 548174054Sbz } 549159965Sthompsa uio->uio_resid += iov->iov_len; 550159965Sthompsa iov++; 551105197Ssam } 552105197Ssam *uiop = uio; 553105197Ssam return (0); 554105197Ssam} 555105197Ssam 556105197Ssamstruct uio * 557105197Ssamcloneuio(struct uio *uiop) 558105197Ssam{ 559105197Ssam struct uio *uio; 560105197Ssam int iovlen; 561105197Ssam 562266800Svanhu iovlen = uiop->uio_iovcnt * sizeof (struct iovec); 563266800Svanhu uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK); 564266800Svanhu *uio = *uiop; 565266800Svanhu uio->uio_iov = (struct iovec *)(uio + 1); 566266800Svanhu bcopy(uiop->uio_iov, uio->uio_iov, iovlen); 567266800Svanhu return (uio); 568266800Svanhu} 569266800Svanhu