subr_hash.c revision 78431
1/*
2 * Copyright (c) 1982, 1986, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)kern_subr.c	8.3 (Berkeley) 1/21/94
39 * $FreeBSD: head/sys/kern/kern_subr.c 78431 2001-06-18 20:24:54Z wollman $
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/ktr.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/proc.h>
49#include <sys/malloc.h>
50#include <sys/resourcevar.h>
51#include <sys/sysctl.h>
52#include <sys/vnode.h>
53
54#include <vm/vm.h>
55#include <vm/vm_page.h>
56#include <vm/vm_map.h>
57
58static void	uio_yield __P((void));
59
60SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV,
61	"Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
62
63int
64uiomove(cp, n, uio)
65	register caddr_t cp;
66	register int n;
67	register struct uio *uio;
68{
69	register struct iovec *iov;
70	u_int cnt;
71	int error = 0;
72	int save = 0;
73
74	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
75	    ("uiomove: mode"));
76	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_procp == curproc,
77	    ("uiomove proc"));
78
79	if (curproc) {
80		save = curproc->p_flag & P_DEADLKTREAT;
81		curproc->p_flag |= P_DEADLKTREAT;
82	}
83
84	while (n > 0 && uio->uio_resid) {
85		iov = uio->uio_iov;
86		cnt = iov->iov_len;
87		if (cnt == 0) {
88			uio->uio_iov++;
89			uio->uio_iovcnt--;
90			continue;
91		}
92		if (cnt > n)
93			cnt = n;
94
95		switch (uio->uio_segflg) {
96
97		case UIO_USERSPACE:
98		case UIO_USERISPACE:
99			if (ticks - PCPU_GET(switchticks) >= hogticks)
100				uio_yield();
101			if (uio->uio_rw == UIO_READ)
102				error = copyout(cp, iov->iov_base, cnt);
103			else
104				error = copyin(iov->iov_base, cp, cnt);
105			if (error)
106				break;
107			break;
108
109		case UIO_SYSSPACE:
110			if (uio->uio_rw == UIO_READ)
111				bcopy((caddr_t)cp, iov->iov_base, cnt);
112			else
113				bcopy(iov->iov_base, (caddr_t)cp, cnt);
114			break;
115		case UIO_NOCOPY:
116			break;
117		}
118		iov->iov_base += cnt;
119		iov->iov_len -= cnt;
120		uio->uio_resid -= cnt;
121		uio->uio_offset += cnt;
122		cp += cnt;
123		n -= cnt;
124	}
125	if (curproc)
126		curproc->p_flag = (curproc->p_flag & ~P_DEADLKTREAT) | save;
127	return (error);
128}
129
130int
131uiomoveco(cp, n, uio, obj)
132	caddr_t cp;
133	int n;
134	struct uio *uio;
135	struct vm_object *obj;
136{
137	struct iovec *iov;
138	u_int cnt;
139	int error;
140
141	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
142	    ("uiomoveco: mode"));
143	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_procp == curproc,
144	    ("uiomoveco proc"));
145
146	while (n > 0 && uio->uio_resid) {
147		iov = uio->uio_iov;
148		cnt = iov->iov_len;
149		if (cnt == 0) {
150			uio->uio_iov++;
151			uio->uio_iovcnt--;
152			continue;
153		}
154		if (cnt > n)
155			cnt = n;
156
157		switch (uio->uio_segflg) {
158
159		case UIO_USERSPACE:
160		case UIO_USERISPACE:
161			if (ticks - PCPU_GET(switchticks) >= hogticks)
162				uio_yield();
163			if (uio->uio_rw == UIO_READ) {
164#ifdef ENABLE_VFS_IOOPT
165				if (vfs_ioopt && ((cnt & PAGE_MASK) == 0) &&
166					((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) &&
167					((uio->uio_offset & PAGE_MASK) == 0) &&
168					((((intptr_t) cp) & PAGE_MASK) == 0)) {
169						error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
170								uio->uio_offset, cnt,
171								(vm_offset_t) iov->iov_base, NULL);
172				} else
173#endif
174				{
175					error = copyout(cp, iov->iov_base, cnt);
176				}
177			} else {
178				error = copyin(iov->iov_base, cp, cnt);
179			}
180			if (error)
181				return (error);
182			break;
183
184		case UIO_SYSSPACE:
185			if (uio->uio_rw == UIO_READ)
186				bcopy((caddr_t)cp, iov->iov_base, cnt);
187			else
188				bcopy(iov->iov_base, (caddr_t)cp, cnt);
189			break;
190		case UIO_NOCOPY:
191			break;
192		}
193		iov->iov_base += cnt;
194		iov->iov_len -= cnt;
195		uio->uio_resid -= cnt;
196		uio->uio_offset += cnt;
197		cp += cnt;
198		n -= cnt;
199	}
200	return (0);
201}
202
203#ifdef ENABLE_VFS_IOOPT
204
205int
206uioread(n, uio, obj, nread)
207	int n;
208	struct uio *uio;
209	struct vm_object *obj;
210	int *nread;
211{
212	int npagesmoved;
213	struct iovec *iov;
214	u_int cnt, tcnt;
215	int error;
216
217	*nread = 0;
218	if (vfs_ioopt < 2)
219		return 0;
220
221	error = 0;
222
223	while (n > 0 && uio->uio_resid) {
224		iov = uio->uio_iov;
225		cnt = iov->iov_len;
226		if (cnt == 0) {
227			uio->uio_iov++;
228			uio->uio_iovcnt--;
229			continue;
230		}
231		if (cnt > n)
232			cnt = n;
233
234		if ((uio->uio_segflg == UIO_USERSPACE) &&
235			((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) &&
236				 ((uio->uio_offset & PAGE_MASK) == 0) ) {
237
238			if (cnt < PAGE_SIZE)
239				break;
240
241			cnt &= ~PAGE_MASK;
242
243			if (ticks - PCPU_GET(switchticks) >= hogticks)
244				uio_yield();
245			error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
246						uio->uio_offset, cnt,
247						(vm_offset_t) iov->iov_base, &npagesmoved);
248
249			if (npagesmoved == 0)
250				break;
251
252			tcnt = npagesmoved * PAGE_SIZE;
253			cnt = tcnt;
254
255			if (error)
256				break;
257
258			iov->iov_base += cnt;
259			iov->iov_len -= cnt;
260			uio->uio_resid -= cnt;
261			uio->uio_offset += cnt;
262			*nread += cnt;
263			n -= cnt;
264		} else {
265			break;
266		}
267	}
268	return error;
269}
270
271#endif
272
273/*
274 * Give next character to user as result of read.
275 */
276int
277ureadc(c, uio)
278	register int c;
279	register struct uio *uio;
280{
281	register struct iovec *iov;
282
283again:
284	if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
285		panic("ureadc");
286	iov = uio->uio_iov;
287	if (iov->iov_len == 0) {
288		uio->uio_iovcnt--;
289		uio->uio_iov++;
290		goto again;
291	}
292	switch (uio->uio_segflg) {
293
294	case UIO_USERSPACE:
295		if (subyte(iov->iov_base, c) < 0)
296			return (EFAULT);
297		break;
298
299	case UIO_SYSSPACE:
300		*iov->iov_base = c;
301		break;
302
303	case UIO_USERISPACE:
304		if (suibyte(iov->iov_base, c) < 0)
305			return (EFAULT);
306		break;
307	case UIO_NOCOPY:
308		break;
309	}
310	iov->iov_base++;
311	iov->iov_len--;
312	uio->uio_resid--;
313	uio->uio_offset++;
314	return (0);
315}
316
317/*
318 * General routine to allocate a hash table.
319 */
320void *
321hashinit(elements, type, hashmask)
322	int elements;
323	struct malloc_type *type;
324	u_long *hashmask;
325{
326	long hashsize;
327	LIST_HEAD(generic, generic) *hashtbl;
328	int i;
329
330	if (elements <= 0)
331		panic("hashinit: bad elements");
332	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
333		continue;
334	hashsize >>= 1;
335	hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
336	for (i = 0; i < hashsize; i++)
337		LIST_INIT(&hashtbl[i]);
338	*hashmask = hashsize - 1;
339	return (hashtbl);
340}
341
342static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
343			2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
344			7159, 7673, 8191, 12281, 16381, 24571, 32749 };
345#define NPRIMES (sizeof(primes) / sizeof(primes[0]))
346
347/*
348 * General routine to allocate a prime number sized hash table.
349 */
350void *
351phashinit(elements, type, nentries)
352	int elements;
353	struct malloc_type *type;
354	u_long *nentries;
355{
356	long hashsize;
357	LIST_HEAD(generic, generic) *hashtbl;
358	int i;
359
360	if (elements <= 0)
361		panic("phashinit: bad elements");
362	for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
363		i++;
364		if (i == NPRIMES)
365			break;
366		hashsize = primes[i];
367	}
368	hashsize = primes[i - 1];
369	hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
370	for (i = 0; i < hashsize; i++)
371		LIST_INIT(&hashtbl[i]);
372	*nentries = hashsize;
373	return (hashtbl);
374}
375
376static void
377uio_yield()
378{
379	struct proc *p;
380	int s;
381
382	p = curproc;
383	s = splhigh();
384	mtx_lock_spin(&sched_lock);
385	DROP_GIANT_NOSWITCH();
386	p->p_pri.pri_level = p->p_pri.pri_user;
387	setrunqueue(p);
388	p->p_stats->p_ru.ru_nivcsw++;
389	mi_switch();
390	mtx_unlock_spin(&sched_lock);
391	PICKUP_GIANT();
392	splx(s);
393}
394
395int
396copyinfrom(const void *src, void *dst, size_t len, int seg)
397{
398	int error = 0;
399
400	switch (seg) {
401	case UIO_USERSPACE:
402		error = copyin(src, dst, len);
403		break;
404	case UIO_SYSSPACE:
405		bcopy(src, dst, len);
406		break;
407	default:
408		panic("copyinfrom: bad seg %d\n", seg);
409	}
410	return (error);
411}
412
413int
414copyinstrfrom(const void *src, void *dst, size_t len, size_t *copied, int seg)
415{
416	int error = 0;
417
418	switch (seg) {
419	case UIO_USERSPACE:
420		error = copyinstr(src, dst, len, copied);
421		break;
422	case UIO_SYSSPACE:
423		error = copystr(src, dst, len, copied);
424		break;
425	default:
426		panic("copyinstrfrom: bad seg %d\n", seg);
427	}
428	return (error);
429}
430