subr_uio.c revision 48677
1259701Sdim/*
2259701Sdim * Copyright (c) 1982, 1986, 1991, 1993
3259701Sdim *	The Regents of the University of California.  All rights reserved.
4259701Sdim * (c) UNIX System Laboratories, Inc.
5259701Sdim * All or some portions of this file are derived from material licensed
6259701Sdim * to the University of California by American Telephone and Telegraph
7259701Sdim * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8259701Sdim * the permission of UNIX System Laboratories, Inc.
9259701Sdim *
10259701Sdim * Redistribution and use in source and binary forms, with or without
11259701Sdim * modification, are permitted provided that the following conditions
12259701Sdim * are met:
13259701Sdim * 1. Redistributions of source code must retain the above copyright
14259701Sdim *    notice, this list of conditions and the following disclaimer.
15259701Sdim * 2. Redistributions in binary form must reproduce the above copyright
16259701Sdim *    notice, this list of conditions and the following disclaimer in the
17259701Sdim *    documentation and/or other materials provided with the distribution.
18259701Sdim * 3. All advertising materials mentioning features or use of this software
19259701Sdim *    must display the following acknowledgement:
20259701Sdim *	This product includes software developed by the University of
21259701Sdim *	California, Berkeley and its contributors.
22259701Sdim * 4. Neither the name of the University nor the names of its contributors
23259701Sdim *    may be used to endorse or promote products derived from this software
24259701Sdim *    without specific prior written permission.
25259701Sdim *
26259701Sdim * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27259701Sdim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28259701Sdim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)kern_subr.c	8.3 (Berkeley) 1/21/94
39 * $Id: kern_subr.c,v 1.28 1999/03/12 03:09:29 julian Exp $
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/proc.h>
46#include <sys/malloc.h>
47#include <sys/lock.h>
48#include <sys/resourcevar.h>
49#include <sys/vnode.h>
50
51#include <vm/vm.h>
52#include <vm/vm_prot.h>
53#include <vm/vm_page.h>
54#include <vm/vm_map.h>
55
56static void	uio_yield __P((void));
57
58int
59uiomove(cp, n, uio)
60	register caddr_t cp;
61	register int n;
62	register struct uio *uio;
63{
64	register struct iovec *iov;
65	u_int cnt;
66	int error = 0;
67	int save = 0;
68
69	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
70	    ("uiomove: mode"));
71	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_procp == curproc,
72	    ("uiomove proc"));
73
74	if (curproc) {
75		save = curproc->p_flag & P_DEADLKTREAT;
76		curproc->p_flag |= P_DEADLKTREAT;
77	}
78
79	while (n > 0 && uio->uio_resid) {
80		iov = uio->uio_iov;
81		cnt = iov->iov_len;
82		if (cnt == 0) {
83			uio->uio_iov++;
84			uio->uio_iovcnt--;
85			continue;
86		}
87		if (cnt > n)
88			cnt = n;
89
90		switch (uio->uio_segflg) {
91
92		case UIO_USERSPACE:
93		case UIO_USERISPACE:
94			if (ticks - switchticks >= hogticks)
95				uio_yield();
96			if (uio->uio_rw == UIO_READ)
97				error = copyout(cp, iov->iov_base, cnt);
98			else
99				error = copyin(iov->iov_base, cp, cnt);
100			if (error)
101				break;
102			break;
103
104		case UIO_SYSSPACE:
105			if (uio->uio_rw == UIO_READ)
106				bcopy((caddr_t)cp, iov->iov_base, cnt);
107			else
108				bcopy(iov->iov_base, (caddr_t)cp, cnt);
109			break;
110		case UIO_NOCOPY:
111			break;
112		}
113		iov->iov_base += cnt;
114		iov->iov_len -= cnt;
115		uio->uio_resid -= cnt;
116		uio->uio_offset += cnt;
117		cp += cnt;
118		n -= cnt;
119	}
120	if (curproc)
121		curproc->p_flag = (curproc->p_flag & ~P_DEADLKTREAT) | save;
122	return (error);
123}
124
125int
126uiomoveco(cp, n, uio, obj)
127	caddr_t cp;
128	int n;
129	struct uio *uio;
130	struct vm_object *obj;
131{
132	struct iovec *iov;
133	u_int cnt;
134	int error;
135
136	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
137	    ("uiomoveco: mode"));
138	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_procp == curproc,
139	    ("uiomoveco proc"));
140
141	while (n > 0 && uio->uio_resid) {
142		iov = uio->uio_iov;
143		cnt = iov->iov_len;
144		if (cnt == 0) {
145			uio->uio_iov++;
146			uio->uio_iovcnt--;
147			continue;
148		}
149		if (cnt > n)
150			cnt = n;
151
152		switch (uio->uio_segflg) {
153
154		case UIO_USERSPACE:
155		case UIO_USERISPACE:
156			if (ticks - switchticks >= hogticks)
157				uio_yield();
158			if (uio->uio_rw == UIO_READ) {
159#ifdef ENABLE_VFS_IOOPT
160				if (vfs_ioopt && ((cnt & PAGE_MASK) == 0) &&
161					((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) &&
162					((uio->uio_offset & PAGE_MASK) == 0) &&
163					((((intptr_t) cp) & PAGE_MASK) == 0)) {
164						error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
165								uio->uio_offset, cnt,
166								(vm_offset_t) iov->iov_base, NULL);
167				} else
168#endif
169				{
170					error = copyout(cp, iov->iov_base, cnt);
171				}
172			} else {
173				error = copyin(iov->iov_base, cp, cnt);
174			}
175			if (error)
176				return (error);
177			break;
178
179		case UIO_SYSSPACE:
180			if (uio->uio_rw == UIO_READ)
181				bcopy((caddr_t)cp, iov->iov_base, cnt);
182			else
183				bcopy(iov->iov_base, (caddr_t)cp, cnt);
184			break;
185		case UIO_NOCOPY:
186			break;
187		}
188		iov->iov_base += cnt;
189		iov->iov_len -= cnt;
190		uio->uio_resid -= cnt;
191		uio->uio_offset += cnt;
192		cp += cnt;
193		n -= cnt;
194	}
195	return (0);
196}
197
198#ifdef ENABLE_VFS_IOOPT
199
200int
201uioread(n, uio, obj, nread)
202	int n;
203	struct uio *uio;
204	struct vm_object *obj;
205	int *nread;
206{
207	int npagesmoved;
208	struct iovec *iov;
209	u_int cnt, tcnt;
210	int error;
211
212	*nread = 0;
213	if (vfs_ioopt < 2)
214		return 0;
215
216	error = 0;
217
218	while (n > 0 && uio->uio_resid) {
219		iov = uio->uio_iov;
220		cnt = iov->iov_len;
221		if (cnt == 0) {
222			uio->uio_iov++;
223			uio->uio_iovcnt--;
224			continue;
225		}
226		if (cnt > n)
227			cnt = n;
228
229		if ((uio->uio_segflg == UIO_USERSPACE) &&
230			((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) &&
231				 ((uio->uio_offset & PAGE_MASK) == 0) ) {
232
233			if (cnt < PAGE_SIZE)
234				break;
235
236			cnt &= ~PAGE_MASK;
237
238			if (ticks - switchticks >= hogticks)
239				uio_yield();
240			error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
241						uio->uio_offset, cnt,
242						(vm_offset_t) iov->iov_base, &npagesmoved);
243
244			if (npagesmoved == 0)
245				break;
246
247			tcnt = npagesmoved * PAGE_SIZE;
248			cnt = tcnt;
249
250			if (error)
251				break;
252
253			iov->iov_base += cnt;
254			iov->iov_len -= cnt;
255			uio->uio_resid -= cnt;
256			uio->uio_offset += cnt;
257			*nread += cnt;
258			n -= cnt;
259		} else {
260			break;
261		}
262	}
263	return error;
264}
265
266#endif
267
268/*
269 * Give next character to user as result of read.
270 */
271int
272ureadc(c, uio)
273	register int c;
274	register struct uio *uio;
275{
276	register struct iovec *iov;
277
278again:
279	if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
280		panic("ureadc");
281	iov = uio->uio_iov;
282	if (iov->iov_len == 0) {
283		uio->uio_iovcnt--;
284		uio->uio_iov++;
285		goto again;
286	}
287	switch (uio->uio_segflg) {
288
289	case UIO_USERSPACE:
290		if (subyte(iov->iov_base, c) < 0)
291			return (EFAULT);
292		break;
293
294	case UIO_SYSSPACE:
295		*iov->iov_base = c;
296		break;
297
298	case UIO_USERISPACE:
299		if (suibyte(iov->iov_base, c) < 0)
300			return (EFAULT);
301		break;
302	case UIO_NOCOPY:
303		break;
304	}
305	iov->iov_base++;
306	iov->iov_len--;
307	uio->uio_resid--;
308	uio->uio_offset++;
309	return (0);
310}
311
312#ifdef vax	/* unused except by ct.c, other oddities XXX */
313/*
314 * Get next character written in by user from uio.
315 */
316int
317uwritec(uio)
318	struct uio *uio;
319{
320	register struct iovec *iov;
321	register int c;
322
323	if (uio->uio_resid <= 0)
324		return (-1);
325again:
326	if (uio->uio_iovcnt <= 0)
327		panic("uwritec");
328	iov = uio->uio_iov;
329	if (iov->iov_len == 0) {
330		uio->uio_iov++;
331		if (--uio->uio_iovcnt == 0)
332			return (-1);
333		goto again;
334	}
335	switch (uio->uio_segflg) {
336
337	case UIO_USERSPACE:
338		c = fubyte(iov->iov_base);
339		break;
340
341	case UIO_SYSSPACE:
342		c = *(u_char *) iov->iov_base;
343		break;
344
345	case UIO_USERISPACE:
346		c = fuibyte(iov->iov_base);
347		break;
348	}
349	if (c < 0)
350		return (-1);
351	iov->iov_base++;
352	iov->iov_len--;
353	uio->uio_resid--;
354	uio->uio_offset++;
355	return (c);
356}
357#endif /* vax */
358
359/*
360 * General routine to allocate a hash table.
361 */
362void *
363hashinit(elements, type, hashmask)
364	int elements;
365	struct malloc_type *type;
366	u_long *hashmask;
367{
368	long hashsize;
369	LIST_HEAD(generic, generic) *hashtbl;
370	int i;
371
372	if (elements <= 0)
373		panic("hashinit: bad elements");
374	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
375		continue;
376	hashsize >>= 1;
377	hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
378	for (i = 0; i < hashsize; i++)
379		LIST_INIT(&hashtbl[i]);
380	*hashmask = hashsize - 1;
381	return (hashtbl);
382}
383
384static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
385			2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
386			7159, 7673, 8191, 12281, 16381, 24571, 32749 };
387#define NPRIMES (sizeof(primes) / sizeof(primes[0]))
388
389/*
390 * General routine to allocate a prime number sized hash table.
391 */
392void *
393phashinit(elements, type, nentries)
394	int elements;
395	struct malloc_type *type;
396	u_long *nentries;
397{
398	long hashsize;
399	LIST_HEAD(generic, generic) *hashtbl;
400	int i;
401
402	if (elements <= 0)
403		panic("phashinit: bad elements");
404	for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
405		i++;
406		if (i == NPRIMES)
407			break;
408		hashsize = primes[i];
409	}
410	hashsize = primes[i - 1];
411	hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
412	for (i = 0; i < hashsize; i++)
413		LIST_INIT(&hashtbl[i]);
414	*nentries = hashsize;
415	return (hashtbl);
416}
417
418static void
419uio_yield()
420{
421	struct proc *p;
422	int s;
423
424	p = curproc;
425	p->p_priority = p->p_usrpri;
426	s = splhigh();
427	setrunqueue(p);
428	p->p_stats->p_ru.ru_nivcsw++;
429	mi_switch();
430	splx(s);
431}
432