subr_hash.c revision 83959
1/*
2 * Copyright (c) 1982, 1986, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)kern_subr.c	8.3 (Berkeley) 1/21/94
39 * $FreeBSD: head/sys/kern/kern_subr.c 83959 2001-09-26 06:54:32Z dillon $
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/ktr.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/proc.h>
49#include <sys/malloc.h>
50#include <sys/resourcevar.h>
51#include <sys/sysctl.h>
52#include <sys/vnode.h>
53
54#include <vm/vm.h>
55#include <vm/vm_page.h>
56#include <vm/vm_map.h>
57
58SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV,
59	"Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
60
61int
62uiomove(cp, n, uio)
63	register caddr_t cp;
64	register int n;
65	register struct uio *uio;
66{
67	struct thread *td = curthread;
68	register struct iovec *iov;
69	u_int cnt;
70	int error = 0;
71	int save = 0;
72
73	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
74	    ("uiomove: mode"));
75	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
76	    ("uiomove proc"));
77
78	if (td) {
79		mtx_lock_spin(&sched_lock);
80		save = td->td_flags & TDF_DEADLKTREAT;
81		td->td_flags |= TDF_DEADLKTREAT;
82		mtx_unlock_spin(&sched_lock);
83	}
84
85	while (n > 0 && uio->uio_resid) {
86		iov = uio->uio_iov;
87		cnt = iov->iov_len;
88		if (cnt == 0) {
89			uio->uio_iov++;
90			uio->uio_iovcnt--;
91			continue;
92		}
93		if (cnt > n)
94			cnt = n;
95
96		switch (uio->uio_segflg) {
97
98		case UIO_USERSPACE:
99		case UIO_USERISPACE:
100			if (ticks - PCPU_GET(switchticks) >= hogticks)
101				uio_yield();
102			if (uio->uio_rw == UIO_READ)
103				error = copyout(cp, iov->iov_base, cnt);
104			else
105				error = copyin(iov->iov_base, cp, cnt);
106			if (error)
107				break;
108			break;
109
110		case UIO_SYSSPACE:
111			if (uio->uio_rw == UIO_READ)
112				bcopy((caddr_t)cp, iov->iov_base, cnt);
113			else
114				bcopy(iov->iov_base, (caddr_t)cp, cnt);
115			break;
116		case UIO_NOCOPY:
117			break;
118		}
119		iov->iov_base += cnt;
120		iov->iov_len -= cnt;
121		uio->uio_resid -= cnt;
122		uio->uio_offset += cnt;
123		cp += cnt;
124		n -= cnt;
125	}
126	if (td != curthread) printf("uiomove: IT CHANGED!");
127	td = curthread;	/* Might things have changed in copyin/copyout? */
128	if (td) {
129		mtx_lock_spin(&sched_lock);
130		td->td_flags = (td->td_flags & ~TDF_DEADLKTREAT) | save;
131		mtx_unlock_spin(&sched_lock);
132	}
133	return (error);
134}
135
136int
137uiomoveco(cp, n, uio, obj)
138	caddr_t cp;
139	int n;
140	struct uio *uio;
141	struct vm_object *obj;
142{
143	struct iovec *iov;
144	u_int cnt;
145	int error;
146
147	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
148	    ("uiomoveco: mode"));
149	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
150	    ("uiomoveco proc"));
151
152	while (n > 0 && uio->uio_resid) {
153		iov = uio->uio_iov;
154		cnt = iov->iov_len;
155		if (cnt == 0) {
156			uio->uio_iov++;
157			uio->uio_iovcnt--;
158			continue;
159		}
160		if (cnt > n)
161			cnt = n;
162
163		switch (uio->uio_segflg) {
164
165		case UIO_USERSPACE:
166		case UIO_USERISPACE:
167			if (ticks - PCPU_GET(switchticks) >= hogticks)
168				uio_yield();
169			if (uio->uio_rw == UIO_READ) {
170#ifdef ENABLE_VFS_IOOPT
171				if (vfs_ioopt && ((cnt & PAGE_MASK) == 0) &&
172					((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) &&
173					((uio->uio_offset & PAGE_MASK) == 0) &&
174					((((intptr_t) cp) & PAGE_MASK) == 0)) {
175						error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
176								uio->uio_offset, cnt,
177								(vm_offset_t) iov->iov_base, NULL);
178				} else
179#endif
180				{
181					error = copyout(cp, iov->iov_base, cnt);
182				}
183			} else {
184				error = copyin(iov->iov_base, cp, cnt);
185			}
186			if (error)
187				return (error);
188			break;
189
190		case UIO_SYSSPACE:
191			if (uio->uio_rw == UIO_READ)
192				bcopy((caddr_t)cp, iov->iov_base, cnt);
193			else
194				bcopy(iov->iov_base, (caddr_t)cp, cnt);
195			break;
196		case UIO_NOCOPY:
197			break;
198		}
199		iov->iov_base += cnt;
200		iov->iov_len -= cnt;
201		uio->uio_resid -= cnt;
202		uio->uio_offset += cnt;
203		cp += cnt;
204		n -= cnt;
205	}
206	return (0);
207}
208
209#ifdef ENABLE_VFS_IOOPT
210
211int
212uioread(n, uio, obj, nread)
213	int n;
214	struct uio *uio;
215	struct vm_object *obj;
216	int *nread;
217{
218	int npagesmoved;
219	struct iovec *iov;
220	u_int cnt, tcnt;
221	int error;
222
223	*nread = 0;
224	if (vfs_ioopt < 2)
225		return 0;
226
227	error = 0;
228
229	while (n > 0 && uio->uio_resid) {
230		iov = uio->uio_iov;
231		cnt = iov->iov_len;
232		if (cnt == 0) {
233			uio->uio_iov++;
234			uio->uio_iovcnt--;
235			continue;
236		}
237		if (cnt > n)
238			cnt = n;
239
240		if ((uio->uio_segflg == UIO_USERSPACE) &&
241			((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) &&
242				 ((uio->uio_offset & PAGE_MASK) == 0) ) {
243
244			if (cnt < PAGE_SIZE)
245				break;
246
247			cnt &= ~PAGE_MASK;
248
249			if (ticks - PCPU_GET(switchticks) >= hogticks)
250				uio_yield();
251			error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
252						uio->uio_offset, cnt,
253						(vm_offset_t) iov->iov_base, &npagesmoved);
254
255			if (npagesmoved == 0)
256				break;
257
258			tcnt = npagesmoved * PAGE_SIZE;
259			cnt = tcnt;
260
261			if (error)
262				break;
263
264			iov->iov_base += cnt;
265			iov->iov_len -= cnt;
266			uio->uio_resid -= cnt;
267			uio->uio_offset += cnt;
268			*nread += cnt;
269			n -= cnt;
270		} else {
271			break;
272		}
273	}
274	return error;
275}
276
277#endif
278
279/*
280 * Give next character to user as result of read.
281 */
282int
283ureadc(c, uio)
284	register int c;
285	register struct uio *uio;
286{
287	register struct iovec *iov;
288
289again:
290	if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
291		panic("ureadc");
292	iov = uio->uio_iov;
293	if (iov->iov_len == 0) {
294		uio->uio_iovcnt--;
295		uio->uio_iov++;
296		goto again;
297	}
298	switch (uio->uio_segflg) {
299
300	case UIO_USERSPACE:
301		if (subyte(iov->iov_base, c) < 0)
302			return (EFAULT);
303		break;
304
305	case UIO_SYSSPACE:
306		*iov->iov_base = c;
307		break;
308
309	case UIO_USERISPACE:
310		if (suibyte(iov->iov_base, c) < 0)
311			return (EFAULT);
312		break;
313	case UIO_NOCOPY:
314		break;
315	}
316	iov->iov_base++;
317	iov->iov_len--;
318	uio->uio_resid--;
319	uio->uio_offset++;
320	return (0);
321}
322
323/*
324 * General routine to allocate a hash table.
325 */
326void *
327hashinit(elements, type, hashmask)
328	int elements;
329	struct malloc_type *type;
330	u_long *hashmask;
331{
332	long hashsize;
333	LIST_HEAD(generic, generic) *hashtbl;
334	int i;
335
336	if (elements <= 0)
337		panic("hashinit: bad elements");
338	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
339		continue;
340	hashsize >>= 1;
341	hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
342	for (i = 0; i < hashsize; i++)
343		LIST_INIT(&hashtbl[i]);
344	*hashmask = hashsize - 1;
345	return (hashtbl);
346}
347
348static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
349			2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
350			7159, 7673, 8191, 12281, 16381, 24571, 32749 };
351#define NPRIMES (sizeof(primes) / sizeof(primes[0]))
352
353/*
354 * General routine to allocate a prime number sized hash table.
355 */
356void *
357phashinit(elements, type, nentries)
358	int elements;
359	struct malloc_type *type;
360	u_long *nentries;
361{
362	long hashsize;
363	LIST_HEAD(generic, generic) *hashtbl;
364	int i;
365
366	if (elements <= 0)
367		panic("phashinit: bad elements");
368	for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
369		i++;
370		if (i == NPRIMES)
371			break;
372		hashsize = primes[i];
373	}
374	hashsize = primes[i - 1];
375	hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
376	for (i = 0; i < hashsize; i++)
377		LIST_INIT(&hashtbl[i]);
378	*nentries = hashsize;
379	return (hashtbl);
380}
381
382void
383uio_yield()
384{
385	struct thread *td;
386
387	td = curthread;
388	mtx_lock_spin(&sched_lock);
389	DROP_GIANT_NOSWITCH();
390	td->td_ksegrp->kg_pri.pri_level = td->td_ksegrp->kg_pri.pri_user;
391	setrunqueue(td);
392	td->td_proc->p_stats->p_ru.ru_nivcsw++;
393	mi_switch();
394	mtx_unlock_spin(&sched_lock);
395	PICKUP_GIANT();
396}
397
398int
399copyinfrom(const void *src, void *dst, size_t len, int seg)
400{
401	int error = 0;
402
403	switch (seg) {
404	case UIO_USERSPACE:
405		error = copyin(src, dst, len);
406		break;
407	case UIO_SYSSPACE:
408		bcopy(src, dst, len);
409		break;
410	default:
411		panic("copyinfrom: bad seg %d\n", seg);
412	}
413	return (error);
414}
415
416int
417copyinstrfrom(const void *src, void *dst, size_t len, size_t *copied, int seg)
418{
419	int error = 0;
420
421	switch (seg) {
422	case UIO_USERSPACE:
423		error = copyinstr(src, dst, len, copied);
424		break;
425	case UIO_SYSSPACE:
426		error = copystr(src, dst, len, copied);
427		break;
428	default:
429		panic("copyinstrfrom: bad seg %d\n", seg);
430	}
431	return (error);
432}
433