subr_hash.c revision 196454
1139804Simp/*-
21541Srgrimes * Copyright (c) 1982, 1986, 1991, 1993
31541Srgrimes *	The Regents of the University of California.  All rights reserved.
41541Srgrimes * (c) UNIX System Laboratories, Inc.
51541Srgrimes * All or some portions of this file are derived from material licensed
61541Srgrimes * to the University of California by American Telephone and Telegraph
71541Srgrimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with
81541Srgrimes * the permission of UNIX System Laboratories, Inc.
91541Srgrimes *
101541Srgrimes * Redistribution and use in source and binary forms, with or without
111541Srgrimes * modification, are permitted provided that the following conditions
121541Srgrimes * are met:
131541Srgrimes * 1. Redistributions of source code must retain the above copyright
141541Srgrimes *    notice, this list of conditions and the following disclaimer.
151541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
161541Srgrimes *    notice, this list of conditions and the following disclaimer in the
171541Srgrimes *    documentation and/or other materials provided with the distribution.
181541Srgrimes * 4. Neither the name of the University nor the names of its contributors
191541Srgrimes *    may be used to endorse or promote products derived from this software
201541Srgrimes *    without specific prior written permission.
211541Srgrimes *
221541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
231541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
241541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
251541Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
261541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
271541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
281541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
291541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
301541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
311541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
321541Srgrimes * SUCH DAMAGE.
331541Srgrimes *
341541Srgrimes *	@(#)kern_subr.c	8.3 (Berkeley) 1/21/94
351541Srgrimes */
361541Srgrimes
37116182Sobrien#include <sys/cdefs.h>
38116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_subr.c 196454 2009-08-23 09:55:06Z rpaulo $");
39116182Sobrien
4098849Sken#include "opt_zero.h"
4198849Sken
421541Srgrimes#include <sys/param.h>
431541Srgrimes#include <sys/systm.h>
4444218Sbde#include <sys/kernel.h>
4565557Sjasone#include <sys/ktr.h>
46120665Snectar#include <sys/limits.h>
4776166Smarkm#include <sys/lock.h>
4876166Smarkm#include <sys/mutex.h>
491541Srgrimes#include <sys/proc.h>
501541Srgrimes#include <sys/malloc.h>
5143529Sbde#include <sys/resourcevar.h>
52104964Sjeff#include <sys/sched.h>
5378431Swollman#include <sys/sysctl.h>
5432702Sdyson#include <sys/vnode.h>
551541Srgrimes
5631853Sdyson#include <vm/vm.h>
5731853Sdyson#include <vm/vm_page.h>
5831853Sdyson#include <vm/vm_map.h>
5999848Sken#ifdef ZERO_COPY_SOCKETS
6099848Sken#include <vm/vm_param.h>
6199848Sken#include <vm/vm_object.h>
6299848Sken#endif
6331853Sdyson
64111737SdesSYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV,
6578431Swollman	"Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
6678431Swollman
6798849Sken#ifdef ZERO_COPY_SOCKETS
6898849Sken/* Declared in uipc_socket.c */
6998849Skenextern int so_zero_copy_receive;
7098849Sken
71138781Salc/*
72138781Salc * Identify the physical page mapped at the given kernel virtual
73138781Salc * address.  Insert this physical page into the given address space at
74138781Salc * the given virtual address, replacing the physical page, if any,
75138781Salc * that already exists there.
76138781Salc */
7798849Skenstatic int
78137377Salcvm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
7998849Sken{
8098849Sken	vm_map_t map = mapa;
8198849Sken	vm_page_t kern_pg, user_pg;
8298849Sken	vm_object_t uobject;
8398849Sken	vm_map_entry_t entry;
84137244Salc	vm_pindex_t upindex;
8598849Sken	vm_prot_t prot;
8698849Sken	boolean_t wired;
8798849Sken
88138781Salc	KASSERT((uaddr & PAGE_MASK) == 0,
89138781Salc	    ("vm_pgmoveco: uaddr is not page aligned"));
90138781Salc
9198849Sken	/*
92138781Salc	 * Herein the physical page is validated and dirtied.  It is
93138781Salc	 * unwired in sf_buf_mext().
9498849Sken	 */
9598849Sken	kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr));
96138781Salc	kern_pg->valid = VM_PAGE_BITS_ALL;
97138781Salc	KASSERT(kern_pg->queue == PQ_NONE && kern_pg->wire_count == 1,
98138781Salc	    ("vm_pgmoveco: kern_pg is not correctly wired"));
99138424Salc
10098849Sken	if ((vm_map_lookup(&map, uaddr,
101111977Sken			   VM_PROT_WRITE, &entry, &uobject,
10298849Sken			   &upindex, &prot, &wired)) != KERN_SUCCESS) {
10398849Sken		return(EFAULT);
10498849Sken	}
105116110Salc	VM_OBJECT_LOCK(uobject);
106138781Salcretry:
10798849Sken	if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
108161252Salc		if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
109161252Salc			goto retry;
110138781Salc		vm_page_lock_queues();
111107371Salc		pmap_remove_all(user_pg);
11298849Sken		vm_page_free(user_pg);
113138781Salc	} else {
114138781Salc		/*
115138781Salc		 * Even if a physical page does not exist in the
116138781Salc		 * object chain's first object, a physical page from a
117138781Salc		 * backing object may be mapped read only.
118138781Salc		 */
119138781Salc		if (uobject->backing_object != NULL)
120138781Salc			pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE);
121108139Salc		vm_page_lock_queues();
12298849Sken	}
123138424Salc	vm_page_insert(kern_pg, uobject, upindex);
124138424Salc	vm_page_dirty(kern_pg);
125108139Salc	vm_page_unlock_queues();
126116110Salc	VM_OBJECT_UNLOCK(uobject);
12798849Sken	vm_map_lookup_done(map, entry);
12898849Sken	return(KERN_SUCCESS);
12998849Sken}
13098849Sken#endif /* ZERO_COPY_SOCKETS */
13198849Sken
1321549Srgrimesint
133111739Sdesuiomove(void *cp, int n, struct uio *uio)
1341541Srgrimes{
13583366Sjulian	struct thread *td = curthread;
136111737Sdes	struct iovec *iov;
1371541Srgrimes	u_int cnt;
13844681Sjulian	int error = 0;
13944681Sjulian	int save = 0;
1401541Srgrimes
14142408Seivind	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
14242453Seivind	    ("uiomove: mode"));
14383366Sjulian	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
14442453Seivind	    ("uiomove proc"));
145136444Sjhb	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
146136444Sjhb	    "Calling uiomove()");
14742408Seivind
148130028Stjr	save = td->td_pflags & TDP_DEADLKTREAT;
149130028Stjr	td->td_pflags |= TDP_DEADLKTREAT;
15044681Sjulian
1511541Srgrimes	while (n > 0 && uio->uio_resid) {
1521541Srgrimes		iov = uio->uio_iov;
1531541Srgrimes		cnt = iov->iov_len;
1541541Srgrimes		if (cnt == 0) {
1551541Srgrimes			uio->uio_iov++;
1561541Srgrimes			uio->uio_iovcnt--;
1571541Srgrimes			continue;
1581541Srgrimes		}
1591541Srgrimes		if (cnt > n)
1601541Srgrimes			cnt = n;
1616324Sdg
1621541Srgrimes		switch (uio->uio_segflg) {
1631541Srgrimes
1641541Srgrimes		case UIO_USERSPACE:
16570861Sjake			if (ticks - PCPU_GET(switchticks) >= hogticks)
16643529Sbde				uio_yield();
1671541Srgrimes			if (uio->uio_rw == UIO_READ)
1681541Srgrimes				error = copyout(cp, iov->iov_base, cnt);
1691541Srgrimes			else
1701541Srgrimes				error = copyin(iov->iov_base, cp, cnt);
1711541Srgrimes			if (error)
17290413Stmm				goto out;
1731541Srgrimes			break;
1741541Srgrimes
1751541Srgrimes		case UIO_SYSSPACE:
1761541Srgrimes			if (uio->uio_rw == UIO_READ)
17798998Salfred				bcopy(cp, iov->iov_base, cnt);
1781541Srgrimes			else
17998998Salfred				bcopy(iov->iov_base, cp, cnt);
1801541Srgrimes			break;
1817611Sdg		case UIO_NOCOPY:
1827611Sdg			break;
1831541Srgrimes		}
184104908Smike		iov->iov_base = (char *)iov->iov_base + cnt;
1851541Srgrimes		iov->iov_len -= cnt;
1861541Srgrimes		uio->uio_resid -= cnt;
1871541Srgrimes		uio->uio_offset += cnt;
188111739Sdes		cp = (char *)cp + cnt;
1891541Srgrimes		n -= cnt;
1901541Srgrimes	}
19190413Stmmout:
192130028Stjr	if (save == 0)
193130023Stjr		td->td_pflags &= ~TDP_DEADLKTREAT;
19444681Sjulian	return (error);
1951541Srgrimes}
1961541Srgrimes
197120665Snectar/*
198120665Snectar * Wrapper for uiomove() that validates the arguments against a known-good
199120665Snectar * kernel buffer.  Currently, uiomove accepts a signed (n) argument, which
200120665Snectar * is almost definitely a bad thing, so we catch that here as well.  We
201120665Snectar * return a runtime failure, but it might be desirable to generate a runtime
202120665Snectar * assertion failure instead.
203120665Snectar */
204120665Snectarint
205120665Snectaruiomove_frombuf(void *buf, int buflen, struct uio *uio)
206120665Snectar{
207120665Snectar	unsigned int offset, n;
208120665Snectar
209120665Snectar	if (uio->uio_offset < 0 || uio->uio_resid < 0 ||
210120665Snectar	    (offset = uio->uio_offset) != uio->uio_offset)
211120665Snectar		return (EINVAL);
212120665Snectar	if (buflen <= 0 || offset >= buflen)
213120665Snectar		return (0);
214120665Snectar	if ((n = buflen - offset) > INT_MAX)
215120665Snectar		return (EINVAL);
216120665Snectar	return (uiomove((char *)buf + offset, n, uio));
217120665Snectar}
218120665Snectar
219111937Salc#ifdef ZERO_COPY_SOCKETS
22096080Salc/*
22196080Salc * Experimental support for zero-copy I/O
22296080Salc */
22398849Skenstatic int
224138539Salcuserspaceco(void *cp, u_int cnt, struct uio *uio, int disposable)
22598849Sken{
22698849Sken	struct iovec *iov;
22798849Sken	int error;
22898849Sken
22998849Sken	iov = uio->uio_iov;
23098849Sken	if (uio->uio_rw == UIO_READ) {
23198849Sken		if ((so_zero_copy_receive != 0)
23298849Sken		 && ((cnt & PAGE_MASK) == 0)
23398849Sken		 && ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0)
23498849Sken		 && ((uio->uio_offset & PAGE_MASK) == 0)
23598849Sken		 && ((((intptr_t) cp) & PAGE_MASK) == 0)
23698849Sken		 && (disposable != 0)) {
23798849Sken			/* SOCKET: use page-trading */
23898849Sken			/*
23998849Sken			 * We only want to call vm_pgmoveco() on
24098849Sken			 * disposeable pages, since it gives the
24198849Sken			 * kernel page to the userland process.
24298849Sken			 */
24398849Sken			error =	vm_pgmoveco(&curproc->p_vmspace->vm_map,
244137377Salc			    (vm_offset_t)cp, (vm_offset_t)iov->iov_base);
24598849Sken
24698849Sken			/*
24798849Sken			 * If we get an error back, attempt
24898849Sken			 * to use copyout() instead.  The
24998849Sken			 * disposable page should be freed
25098849Sken			 * automatically if we weren't able to move
25198849Sken			 * it into userland.
25298849Sken			 */
25398849Sken			if (error != 0)
25498849Sken				error = copyout(cp, iov->iov_base, cnt);
25598849Sken		} else {
25698849Sken			error = copyout(cp, iov->iov_base, cnt);
25798849Sken		}
25898849Sken	} else {
25998849Sken		error = copyin(iov->iov_base, cp, cnt);
26098849Sken	}
26198849Sken	return (error);
26298849Sken}
26398849Sken
26431853Sdysonint
265138539Salcuiomoveco(void *cp, int n, struct uio *uio, int disposable)
26631853Sdyson{
26731853Sdyson	struct iovec *iov;
26831853Sdyson	u_int cnt;
26931853Sdyson	int error;
27031853Sdyson
27142408Seivind	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
27242453Seivind	    ("uiomoveco: mode"));
27383366Sjulian	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
27442453Seivind	    ("uiomoveco proc"));
27542408Seivind
27631853Sdyson	while (n > 0 && uio->uio_resid) {
27731853Sdyson		iov = uio->uio_iov;
27831853Sdyson		cnt = iov->iov_len;
27931853Sdyson		if (cnt == 0) {
28031853Sdyson			uio->uio_iov++;
28131853Sdyson			uio->uio_iovcnt--;
28231853Sdyson			continue;
28331853Sdyson		}
28431853Sdyson		if (cnt > n)
28531853Sdyson			cnt = n;
28631853Sdyson
28731853Sdyson		switch (uio->uio_segflg) {
28831853Sdyson
28931853Sdyson		case UIO_USERSPACE:
29070861Sjake			if (ticks - PCPU_GET(switchticks) >= hogticks)
29143529Sbde				uio_yield();
29298849Sken
293138539Salc			error = userspaceco(cp, cnt, uio, disposable);
29498849Sken
29531853Sdyson			if (error)
29631853Sdyson				return (error);
29731853Sdyson			break;
29831853Sdyson
29931853Sdyson		case UIO_SYSSPACE:
30031853Sdyson			if (uio->uio_rw == UIO_READ)
30198998Salfred				bcopy(cp, iov->iov_base, cnt);
30231853Sdyson			else
30398998Salfred				bcopy(iov->iov_base, cp, cnt);
30431853Sdyson			break;
30531853Sdyson		case UIO_NOCOPY:
30631853Sdyson			break;
30731853Sdyson		}
308104908Smike		iov->iov_base = (char *)iov->iov_base + cnt;
30931853Sdyson		iov->iov_len -= cnt;
31031853Sdyson		uio->uio_resid -= cnt;
31131853Sdyson		uio->uio_offset += cnt;
312111739Sdes		cp = (char *)cp + cnt;
31331853Sdyson		n -= cnt;
31431853Sdyson	}
31531853Sdyson	return (0);
31631853Sdyson}
317111937Salc#endif /* ZERO_COPY_SOCKETS */
31831853Sdyson
31996080Salc/*
3201541Srgrimes * Give next character to user as result of read.
3211541Srgrimes */
3221549Srgrimesint
323111737Sdesureadc(int c, struct uio *uio)
3241541Srgrimes{
325111737Sdes	struct iovec *iov;
326111737Sdes	char *iov_base;
3271541Srgrimes
328182399Sed	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
329182399Sed	    "Calling ureadc()");
330182399Sed
3311541Srgrimesagain:
3321541Srgrimes	if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
3331541Srgrimes		panic("ureadc");
3341541Srgrimes	iov = uio->uio_iov;
3351541Srgrimes	if (iov->iov_len == 0) {
3361541Srgrimes		uio->uio_iovcnt--;
3371541Srgrimes		uio->uio_iov++;
3381541Srgrimes		goto again;
3391541Srgrimes	}
3401541Srgrimes	switch (uio->uio_segflg) {
3411541Srgrimes
3421541Srgrimes	case UIO_USERSPACE:
3431541Srgrimes		if (subyte(iov->iov_base, c) < 0)
3441541Srgrimes			return (EFAULT);
3451541Srgrimes		break;
3461541Srgrimes
3471541Srgrimes	case UIO_SYSSPACE:
348104908Smike		iov_base = iov->iov_base;
349104908Smike		*iov_base = c;
350104908Smike		iov->iov_base = iov_base;
3511541Srgrimes		break;
3521541Srgrimes
3538177Sdg	case UIO_NOCOPY:
3548177Sdg		break;
3551541Srgrimes	}
356104908Smike	iov->iov_base = (char *)iov->iov_base + 1;
3571541Srgrimes	iov->iov_len--;
3581541Srgrimes	uio->uio_resid--;
3591541Srgrimes	uio->uio_offset++;
3601541Srgrimes	return (0);
3611541Srgrimes}
3621541Srgrimes
3631541Srgrimes/*
364166022Srrs * General routine to allocate a hash table with control of memory flags.
3651541Srgrimes */
3661541Srgrimesvoid *
367166022Srrshashinit_flags(int elements, struct malloc_type *type, u_long *hashmask,
368166022Srrs    int flags)
3691541Srgrimes{
3701541Srgrimes	long hashsize;
37160938Sjake	LIST_HEAD(generic, generic) *hashtbl;
3721541Srgrimes	int i;
3731541Srgrimes
3741541Srgrimes	if (elements <= 0)
3758364Sdg		panic("hashinit: bad elements");
376166022Srrs
377166022Srrs	/* Exactly one of HASH_WAITOK and HASH_NOWAIT must be set. */
378166022Srrs	KASSERT((flags & HASH_WAITOK) ^ (flags & HASH_NOWAIT),
379166042Srrs	    ("Bad flags (0x%x) passed to hashinit_flags", flags));
380166022Srrs
3811541Srgrimes	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
3821541Srgrimes		continue;
3831541Srgrimes	hashsize >>= 1;
384166022Srrs
385166022Srrs	if (flags & HASH_NOWAIT)
386166022Srrs		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl),
387166022Srrs		    type, M_NOWAIT);
388166022Srrs	else
389166022Srrs		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl),
390166022Srrs		    type, M_WAITOK);
391166022Srrs
392166022Srrs	if (hashtbl != NULL) {
393166022Srrs		for (i = 0; i < hashsize; i++)
394166022Srrs			LIST_INIT(&hashtbl[i]);
395166022Srrs		*hashmask = hashsize - 1;
396166022Srrs	}
3971541Srgrimes	return (hashtbl);
3981541Srgrimes}
3997611Sdg
400166022Srrs/*
401166022Srrs * Allocate and initialize a hash table with default flag: may sleep.
402166022Srrs */
403166022Srrsvoid *
404166022Srrshashinit(int elements, struct malloc_type *type, u_long *hashmask)
405166022Srrs{
406166022Srrs
407166022Srrs	return (hashinit_flags(elements, type, hashmask, HASH_WAITOK));
408166022Srrs}
409166022Srrs
41099098Siedowsevoid
411111737Sdeshashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
41299098Siedowse{
41399098Siedowse	LIST_HEAD(generic, generic) *hashtbl, *hp;
41499098Siedowse
41599098Siedowse	hashtbl = vhashtbl;
41699098Siedowse	for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
41799098Siedowse		if (!LIST_EMPTY(hp))
41899098Siedowse			panic("hashdestroy: hash not empty");
41999098Siedowse	free(hashtbl, type);
42099098Siedowse}
42199098Siedowse
422196454Srpaulostatic const int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531,
423196454Srpaulo			2039, 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143,
424196454Srpaulo			6653, 7159, 7673, 8191, 12281, 16381, 24571, 32749 };
42526205Salex#define NPRIMES (sizeof(primes) / sizeof(primes[0]))
4267611Sdg
4277611Sdg/*
4287611Sdg * General routine to allocate a prime number sized hash table.
4297611Sdg */
4307611Sdgvoid *
431111737Sdesphashinit(int elements, struct malloc_type *type, u_long *nentries)
4327611Sdg{
4337611Sdg	long hashsize;
43460938Sjake	LIST_HEAD(generic, generic) *hashtbl;
4357611Sdg	int i;
4367611Sdg
4377611Sdg	if (elements <= 0)
4388364Sdg		panic("phashinit: bad elements");
4397611Sdg	for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
4407611Sdg		i++;
4417611Sdg		if (i == NPRIMES)
4427611Sdg			break;
4437611Sdg		hashsize = primes[i];
4447611Sdg	}
4457611Sdg	hashsize = primes[i - 1];
446111119Simp	hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
4477611Sdg	for (i = 0; i < hashsize; i++)
4487611Sdg		LIST_INIT(&hashtbl[i]);
4497611Sdg	*nentries = hashsize;
4507611Sdg	return (hashtbl);
4517611Sdg}
45243529Sbde
45383959Sdillonvoid
454111737Sdesuio_yield(void)
45543529Sbde{
45683366Sjulian	struct thread *td;
45743529Sbde
45883366Sjulian	td = curthread;
45988900Sjhb	DROP_GIANT();
460170307Sjeff	thread_lock(td);
461163709Sjb	sched_prio(td, td->td_user_pri);
462178272Sjeff	mi_switch(SW_INVOL | SWT_RELINQUISH, NULL);
463170307Sjeff	thread_unlock(td);
46468808Sjhb	PICKUP_GIANT();
46543529Sbde}
46672537Sjlemon
46772537Sjlemonint
468123852Salfredcopyinfrom(const void * __restrict src, void * __restrict dst, size_t len,
469123852Salfred    int seg)
47072537Sjlemon{
47172537Sjlemon	int error = 0;
47272537Sjlemon
47372537Sjlemon	switch (seg) {
47472537Sjlemon	case UIO_USERSPACE:
47572537Sjlemon		error = copyin(src, dst, len);
47672537Sjlemon		break;
47772537Sjlemon	case UIO_SYSSPACE:
47872537Sjlemon		bcopy(src, dst, len);
47972537Sjlemon		break;
48072537Sjlemon	default:
48172537Sjlemon		panic("copyinfrom: bad seg %d\n", seg);
48272537Sjlemon	}
48372537Sjlemon	return (error);
48472537Sjlemon}
48572537Sjlemon
48672537Sjlemonint
487123852Salfredcopyinstrfrom(const void * __restrict src, void * __restrict dst, size_t len,
488123852Salfred    size_t * __restrict copied, int seg)
48972537Sjlemon{
49072537Sjlemon	int error = 0;
49172537Sjlemon
49272537Sjlemon	switch (seg) {
49372537Sjlemon	case UIO_USERSPACE:
49472537Sjlemon		error = copyinstr(src, dst, len, copied);
49572537Sjlemon		break;
49672537Sjlemon	case UIO_SYSSPACE:
49772537Sjlemon		error = copystr(src, dst, len, copied);
49872537Sjlemon		break;
49972537Sjlemon	default:
50072537Sjlemon		panic("copyinstrfrom: bad seg %d\n", seg);
50172537Sjlemon	}
50272537Sjlemon	return (error);
50372537Sjlemon}
504125296Ssilby
505125296Ssilbyint
506131897Sphkcopyiniov(struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error)
507125296Ssilby{
508131897Sphk	u_int iovlen;
509131897Sphk
510131897Sphk	*iov = NULL;
511131897Sphk	if (iovcnt > UIO_MAXIOV)
512131897Sphk		return (error);
513131897Sphk	iovlen = iovcnt * sizeof (struct iovec);
514131897Sphk	*iov = malloc(iovlen, M_IOV, M_WAITOK);
515131897Sphk	error = copyin(iovp, *iov, iovlen);
516131897Sphk	if (error) {
517131897Sphk		free(*iov, M_IOV);
518131897Sphk		*iov = NULL;
519131897Sphk	}
520131897Sphk	return (error);
521131897Sphk}
522131897Sphk
523131897Sphkint
524131897Sphkcopyinuio(struct iovec *iovp, u_int iovcnt, struct uio **uiop)
525131897Sphk{
526125420Ssilby	struct iovec *iov;
527131897Sphk	struct uio *uio;
528125296Ssilby	u_int iovlen;
529125420Ssilby	int error, i;
530125296Ssilby
531131897Sphk	*uiop = NULL;
532131897Sphk	if (iovcnt > UIO_MAXIOV)
533131897Sphk		return (EINVAL);
534125420Ssilby	iovlen = iovcnt * sizeof (struct iovec);
535131897Sphk	uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
536131897Sphk	iov = (struct iovec *)(uio + 1);
537131897Sphk	error = copyin(iovp, iov, iovlen);
538131897Sphk	if (error) {
539131897Sphk		free(uio, M_IOV);
540131897Sphk		return (error);
541125420Ssilby	}
542125420Ssilby	uio->uio_iov = iov;
543125420Ssilby	uio->uio_iovcnt = iovcnt;
544125420Ssilby	uio->uio_segflg = UIO_USERSPACE;
545125420Ssilby	uio->uio_offset = -1;
546125420Ssilby	uio->uio_resid = 0;
547125420Ssilby	for (i = 0; i < iovcnt; i++) {
548125420Ssilby		if (iov->iov_len > INT_MAX - uio->uio_resid) {
549131897Sphk			free(uio, M_IOV);
550131897Sphk			return (EINVAL);
551125420Ssilby		}
552125420Ssilby		uio->uio_resid += iov->iov_len;
553125420Ssilby		iov++;
554125420Ssilby	}
555131897Sphk	*uiop = uio;
556131897Sphk	return (0);
557131897Sphk}
558125296Ssilby
559131897Sphkstruct uio *
560131897Sphkcloneuio(struct uio *uiop)
561131897Sphk{
562131897Sphk	struct uio *uio;
563131897Sphk	int iovlen;
564125296Ssilby
565131897Sphk	iovlen = uiop->uio_iovcnt * sizeof (struct iovec);
566131897Sphk	uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
567131897Sphk	*uio = *uiop;
568131897Sphk	uio->uio_iov = (struct iovec *)(uio + 1);
569131897Sphk	bcopy(uiop->uio_iov, uio->uio_iov, iovlen);
570131897Sphk	return (uio);
571125296Ssilby}
572