subr_hash.c revision 72376
1/*
2 * Copyright (c) 1982, 1986, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)kern_subr.c	8.3 (Berkeley) 1/21/94
39 * $FreeBSD: head/sys/kern/kern_subr.c 72376 2001-02-12 00:20:08Z jake $
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/ktr.h>
46#include <sys/proc.h>
47#include <sys/malloc.h>
48#include <sys/mutex.h>
49#include <sys/lock.h>
50#include <sys/resourcevar.h>
51#include <sys/vnode.h>
52
53#include <vm/vm.h>
54#include <vm/vm_page.h>
55#include <vm/vm_map.h>
56
57static void	uio_yield __P((void));
58
59int
60uiomove(cp, n, uio)
61	register caddr_t cp;
62	register int n;
63	register struct uio *uio;
64{
65	register struct iovec *iov;
66	u_int cnt;
67	int error = 0;
68	int save = 0;
69
70	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
71	    ("uiomove: mode"));
72	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_procp == curproc,
73	    ("uiomove proc"));
74
75	if (curproc) {
76		save = curproc->p_flag & P_DEADLKTREAT;
77		curproc->p_flag |= P_DEADLKTREAT;
78	}
79
80	while (n > 0 && uio->uio_resid) {
81		iov = uio->uio_iov;
82		cnt = iov->iov_len;
83		if (cnt == 0) {
84			uio->uio_iov++;
85			uio->uio_iovcnt--;
86			continue;
87		}
88		if (cnt > n)
89			cnt = n;
90
91		switch (uio->uio_segflg) {
92
93		case UIO_USERSPACE:
94		case UIO_USERISPACE:
95			if (ticks - PCPU_GET(switchticks) >= hogticks)
96				uio_yield();
97			if (uio->uio_rw == UIO_READ)
98				error = copyout(cp, iov->iov_base, cnt);
99			else
100				error = copyin(iov->iov_base, cp, cnt);
101			if (error)
102				break;
103			break;
104
105		case UIO_SYSSPACE:
106			if (uio->uio_rw == UIO_READ)
107				bcopy((caddr_t)cp, iov->iov_base, cnt);
108			else
109				bcopy(iov->iov_base, (caddr_t)cp, cnt);
110			break;
111		case UIO_NOCOPY:
112			break;
113		}
114		iov->iov_base += cnt;
115		iov->iov_len -= cnt;
116		uio->uio_resid -= cnt;
117		uio->uio_offset += cnt;
118		cp += cnt;
119		n -= cnt;
120	}
121	if (curproc)
122		curproc->p_flag = (curproc->p_flag & ~P_DEADLKTREAT) | save;
123	return (error);
124}
125
126int
127uiomoveco(cp, n, uio, obj)
128	caddr_t cp;
129	int n;
130	struct uio *uio;
131	struct vm_object *obj;
132{
133	struct iovec *iov;
134	u_int cnt;
135	int error;
136
137	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
138	    ("uiomoveco: mode"));
139	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_procp == curproc,
140	    ("uiomoveco proc"));
141
142	while (n > 0 && uio->uio_resid) {
143		iov = uio->uio_iov;
144		cnt = iov->iov_len;
145		if (cnt == 0) {
146			uio->uio_iov++;
147			uio->uio_iovcnt--;
148			continue;
149		}
150		if (cnt > n)
151			cnt = n;
152
153		switch (uio->uio_segflg) {
154
155		case UIO_USERSPACE:
156		case UIO_USERISPACE:
157			if (ticks - PCPU_GET(switchticks) >= hogticks)
158				uio_yield();
159			if (uio->uio_rw == UIO_READ) {
160#ifdef ENABLE_VFS_IOOPT
161				if (vfs_ioopt && ((cnt & PAGE_MASK) == 0) &&
162					((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) &&
163					((uio->uio_offset & PAGE_MASK) == 0) &&
164					((((intptr_t) cp) & PAGE_MASK) == 0)) {
165						error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
166								uio->uio_offset, cnt,
167								(vm_offset_t) iov->iov_base, NULL);
168				} else
169#endif
170				{
171					error = copyout(cp, iov->iov_base, cnt);
172				}
173			} else {
174				error = copyin(iov->iov_base, cp, cnt);
175			}
176			if (error)
177				return (error);
178			break;
179
180		case UIO_SYSSPACE:
181			if (uio->uio_rw == UIO_READ)
182				bcopy((caddr_t)cp, iov->iov_base, cnt);
183			else
184				bcopy(iov->iov_base, (caddr_t)cp, cnt);
185			break;
186		case UIO_NOCOPY:
187			break;
188		}
189		iov->iov_base += cnt;
190		iov->iov_len -= cnt;
191		uio->uio_resid -= cnt;
192		uio->uio_offset += cnt;
193		cp += cnt;
194		n -= cnt;
195	}
196	return (0);
197}
198
199#ifdef ENABLE_VFS_IOOPT
200
201int
202uioread(n, uio, obj, nread)
203	int n;
204	struct uio *uio;
205	struct vm_object *obj;
206	int *nread;
207{
208	int npagesmoved;
209	struct iovec *iov;
210	u_int cnt, tcnt;
211	int error;
212
213	*nread = 0;
214	if (vfs_ioopt < 2)
215		return 0;
216
217	error = 0;
218
219	while (n > 0 && uio->uio_resid) {
220		iov = uio->uio_iov;
221		cnt = iov->iov_len;
222		if (cnt == 0) {
223			uio->uio_iov++;
224			uio->uio_iovcnt--;
225			continue;
226		}
227		if (cnt > n)
228			cnt = n;
229
230		if ((uio->uio_segflg == UIO_USERSPACE) &&
231			((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) &&
232				 ((uio->uio_offset & PAGE_MASK) == 0) ) {
233
234			if (cnt < PAGE_SIZE)
235				break;
236
237			cnt &= ~PAGE_MASK;
238
239			if (ticks - PCPU_GET(switchticks) >= hogticks)
240				uio_yield();
241			error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
242						uio->uio_offset, cnt,
243						(vm_offset_t) iov->iov_base, &npagesmoved);
244
245			if (npagesmoved == 0)
246				break;
247
248			tcnt = npagesmoved * PAGE_SIZE;
249			cnt = tcnt;
250
251			if (error)
252				break;
253
254			iov->iov_base += cnt;
255			iov->iov_len -= cnt;
256			uio->uio_resid -= cnt;
257			uio->uio_offset += cnt;
258			*nread += cnt;
259			n -= cnt;
260		} else {
261			break;
262		}
263	}
264	return error;
265}
266
267#endif
268
269/*
270 * Give next character to user as result of read.
271 */
272int
273ureadc(c, uio)
274	register int c;
275	register struct uio *uio;
276{
277	register struct iovec *iov;
278
279again:
280	if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
281		panic("ureadc");
282	iov = uio->uio_iov;
283	if (iov->iov_len == 0) {
284		uio->uio_iovcnt--;
285		uio->uio_iov++;
286		goto again;
287	}
288	switch (uio->uio_segflg) {
289
290	case UIO_USERSPACE:
291		if (subyte(iov->iov_base, c) < 0)
292			return (EFAULT);
293		break;
294
295	case UIO_SYSSPACE:
296		*iov->iov_base = c;
297		break;
298
299	case UIO_USERISPACE:
300		if (suibyte(iov->iov_base, c) < 0)
301			return (EFAULT);
302		break;
303	case UIO_NOCOPY:
304		break;
305	}
306	iov->iov_base++;
307	iov->iov_len--;
308	uio->uio_resid--;
309	uio->uio_offset++;
310	return (0);
311}
312
313/*
314 * General routine to allocate a hash table.
315 */
316void *
317hashinit(elements, type, hashmask)
318	int elements;
319	struct malloc_type *type;
320	u_long *hashmask;
321{
322	long hashsize;
323	LIST_HEAD(generic, generic) *hashtbl;
324	int i;
325
326	if (elements <= 0)
327		panic("hashinit: bad elements");
328	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
329		continue;
330	hashsize >>= 1;
331	hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
332	for (i = 0; i < hashsize; i++)
333		LIST_INIT(&hashtbl[i]);
334	*hashmask = hashsize - 1;
335	return (hashtbl);
336}
337
338static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
339			2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
340			7159, 7673, 8191, 12281, 16381, 24571, 32749 };
341#define NPRIMES (sizeof(primes) / sizeof(primes[0]))
342
343/*
344 * General routine to allocate a prime number sized hash table.
345 */
346void *
347phashinit(elements, type, nentries)
348	int elements;
349	struct malloc_type *type;
350	u_long *nentries;
351{
352	long hashsize;
353	LIST_HEAD(generic, generic) *hashtbl;
354	int i;
355
356	if (elements <= 0)
357		panic("phashinit: bad elements");
358	for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
359		i++;
360		if (i == NPRIMES)
361			break;
362		hashsize = primes[i];
363	}
364	hashsize = primes[i - 1];
365	hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
366	for (i = 0; i < hashsize; i++)
367		LIST_INIT(&hashtbl[i]);
368	*nentries = hashsize;
369	return (hashtbl);
370}
371
372static void
373uio_yield()
374{
375	struct proc *p;
376	int s;
377
378	p = curproc;
379	s = splhigh();
380	mtx_lock_spin(&sched_lock);
381	DROP_GIANT_NOSWITCH();
382	p->p_pri.pri_level = p->p_pri.pri_user;
383	setrunqueue(p);
384	p->p_stats->p_ru.ru_nivcsw++;
385	mi_switch();
386	mtx_unlock_spin(&sched_lock);
387	PICKUP_GIANT();
388	splx(s);
389}
390