subr_hash.c revision 138781
170856Sjhb/* 270856Sjhb * Copyright (c) 1982, 1986, 1991, 1993 370856Sjhb * The Regents of the University of California. All rights reserved. 470856Sjhb * (c) UNIX System Laboratories, Inc. 570856Sjhb * All or some portions of this file are derived from material licensed 670856Sjhb * to the University of California by American Telephone and Telegraph 770856Sjhb * Co. or Unix System Laboratories, Inc. and are reproduced herein with 870856Sjhb * the permission of UNIX System Laboratories, Inc. 970856Sjhb * 1070856Sjhb * Redistribution and use in source and binary forms, with or without 1170856Sjhb * modification, are permitted provided that the following conditions 1270856Sjhb * are met: 1370856Sjhb * 1. Redistributions of source code must retain the above copyright 1470856Sjhb * notice, this list of conditions and the following disclaimer. 1570856Sjhb * 2. Redistributions in binary form must reproduce the above copyright 1670856Sjhb * notice, this list of conditions and the following disclaimer in the 1770856Sjhb * documentation and/or other materials provided with the distribution. 1870856Sjhb * 4. Neither the name of the University nor the names of its contributors 1970856Sjhb * may be used to endorse or promote products derived from this software 2070856Sjhb * without specific prior written permission. 2170856Sjhb * 2270856Sjhb * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2370856Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2470856Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2570856Sjhb * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 2670856Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2770856Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2870856Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2970856Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30100280Sgordon * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31100280Sgordon * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32100280Sgordon * SUCH DAMAGE. 33100280Sgordon * 34100280Sgordon * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94 35100280Sgordon */ 36100280Sgordon 37100280Sgordon#include <sys/cdefs.h> 38100280Sgordon__FBSDID("$FreeBSD: head/sys/kern/kern_subr.c 138781 2004-12-13 06:24:14Z alc $"); 3970856Sjhb 4070856Sjhb#include "opt_zero.h" 4170856Sjhb 4270856Sjhb#include <sys/param.h> 4370856Sjhb#include <sys/systm.h> 4470856Sjhb#include <sys/kernel.h> 4570856Sjhb#include <sys/ktr.h> 4670856Sjhb#include <sys/limits.h> 4770856Sjhb#include <sys/lock.h> 4870856Sjhb#include <sys/mutex.h> 4970856Sjhb#include <sys/proc.h> 5070856Sjhb#include <sys/malloc.h> 5170856Sjhb#include <sys/resourcevar.h> 5270856Sjhb#include <sys/sched.h> 5370856Sjhb#include <sys/sysctl.h> 5470856Sjhb#include <sys/vnode.h> 5570856Sjhb 5670856Sjhb#include <vm/vm.h> 5770856Sjhb#include <vm/vm_page.h> 5870856Sjhb#include <vm/vm_map.h> 5970856Sjhb#ifdef ZERO_COPY_SOCKETS 6070856Sjhb#include <vm/vm_param.h> 6170856Sjhb#include <vm/vm_object.h> 6270856Sjhb#endif 6370856Sjhb 6470856SjhbSYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV, 6570856Sjhb "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)"); 6670856Sjhb 6770856Sjhb#ifdef ZERO_COPY_SOCKETS 6870856Sjhb/* Declared in uipc_socket.c */ 6970856Sjhbextern int so_zero_copy_receive; 7070856Sjhb 7170856Sjhb/* 7270856Sjhb * Identify the physical page mapped at the given kernel virtual 7370856Sjhb * address. Insert this physical page into the given address space at 7470856Sjhb * the given virtual address, replacing the physical page, if any, 7570856Sjhb * that already exists there. 7670856Sjhb */ 7770856Sjhbstatic int 7870856Sjhbvm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr) 7970856Sjhb{ 8070856Sjhb vm_map_t map = mapa; 8170856Sjhb vm_page_t kern_pg, user_pg; 8270856Sjhb vm_object_t uobject; 8370856Sjhb vm_map_entry_t entry; 8470856Sjhb vm_pindex_t upindex; 8570856Sjhb vm_prot_t prot; 8670856Sjhb boolean_t wired; 8770856Sjhb 8870856Sjhb KASSERT((uaddr & PAGE_MASK) == 0, 8970856Sjhb ("vm_pgmoveco: uaddr is not page aligned")); 9070856Sjhb 9170856Sjhb /* 9270856Sjhb * Herein the physical page is validated and dirtied. It is 9370856Sjhb * unwired in sf_buf_mext(). 9470856Sjhb */ 9570856Sjhb kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr)); 9670856Sjhb kern_pg->valid = VM_PAGE_BITS_ALL; 9770856Sjhb KASSERT(kern_pg->queue == PQ_NONE && kern_pg->wire_count == 1, 9870856Sjhb ("vm_pgmoveco: kern_pg is not correctly wired")); 9970856Sjhb 10070856Sjhb if ((vm_map_lookup(&map, uaddr, 10170856Sjhb VM_PROT_WRITE, &entry, &uobject, 10270856Sjhb &upindex, &prot, &wired)) != KERN_SUCCESS) { 10370856Sjhb return(EFAULT); 10470856Sjhb } 10570856Sjhb VM_OBJECT_LOCK(uobject); 10670856Sjhbretry: 10782660Sdd if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) { 10870856Sjhb vm_page_lock_queues(); 10970856Sjhb if (vm_page_sleep_if_busy(user_pg, 1, "vm_pgmoveco")) 11070856Sjhb goto retry; 11170856Sjhb pmap_remove_all(user_pg); 11270856Sjhb vm_page_free(user_pg); 11370856Sjhb } else { 11470856Sjhb /* 11570856Sjhb * Even if a physical page does not exist in the 11670856Sjhb * object chain's first object, a physical page from a 11770856Sjhb * backing object may be mapped read only. 11870856Sjhb */ 11970856Sjhb if (uobject->backing_object != NULL) 12070856Sjhb pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE); 12170856Sjhb vm_page_lock_queues(); 12270856Sjhb } 12370856Sjhb vm_page_insert(kern_pg, uobject, upindex); 12470856Sjhb vm_page_dirty(kern_pg); 12570856Sjhb vm_page_unlock_queues(); 12670856Sjhb VM_OBJECT_UNLOCK(uobject); 12770856Sjhb vm_map_lookup_done(map, entry); 12870856Sjhb return(KERN_SUCCESS); 12970856Sjhb} 13070856Sjhb#endif /* ZERO_COPY_SOCKETS */ 13170856Sjhb 13270856Sjhbint 13370856Sjhbuiomove(void *cp, int n, struct uio *uio) 13470856Sjhb{ 13570856Sjhb struct thread *td = curthread; 13670856Sjhb struct iovec *iov; 13770856Sjhb u_int cnt; 13870856Sjhb int error = 0; 13970856Sjhb int save = 0; 14070856Sjhb 14170856Sjhb KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, 14270856Sjhb ("uiomove: mode")); 14370856Sjhb KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 14470856Sjhb ("uiomove proc")); 14570856Sjhb WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 14670856Sjhb "Calling uiomove()"); 14782660Sdd 14870856Sjhb save = td->td_pflags & TDP_DEADLKTREAT; 14970856Sjhb td->td_pflags |= TDP_DEADLKTREAT; 15070856Sjhb 15170856Sjhb while (n > 0 && uio->uio_resid) { 15270856Sjhb iov = uio->uio_iov; 15370856Sjhb cnt = iov->iov_len; 15470856Sjhb if (cnt == 0) { 15570856Sjhb uio->uio_iov++; 15670856Sjhb uio->uio_iovcnt--; 15770856Sjhb continue; 15870856Sjhb } 15970856Sjhb if (cnt > n) 16070856Sjhb cnt = n; 16170856Sjhb 16270856Sjhb switch (uio->uio_segflg) { 16370856Sjhb 16470856Sjhb case UIO_USERSPACE: 16570856Sjhb if (ticks - PCPU_GET(switchticks) >= hogticks) 16670856Sjhb uio_yield(); 16770856Sjhb if (uio->uio_rw == UIO_READ) 16870856Sjhb error = copyout(cp, iov->iov_base, cnt); 16970856Sjhb else 17070856Sjhb error = copyin(iov->iov_base, cp, cnt); 17170856Sjhb if (error) 17270856Sjhb goto out; 17370856Sjhb break; 17481357Sobrien 17576110Sdd case UIO_SYSSPACE: 17676110Sdd if (uio->uio_rw == UIO_READ) 17776110Sdd bcopy(cp, iov->iov_base, cnt); 17876110Sdd else 17976110Sdd bcopy(iov->iov_base, cp, cnt); 18076110Sdd break; 18176110Sdd case UIO_NOCOPY: 182 break; 183 } 184 iov->iov_base = (char *)iov->iov_base + cnt; 185 iov->iov_len -= cnt; 186 uio->uio_resid -= cnt; 187 uio->uio_offset += cnt; 188 cp = (char *)cp + cnt; 189 n -= cnt; 190 } 191out: 192 if (save == 0) 193 td->td_pflags &= ~TDP_DEADLKTREAT; 194 return (error); 195} 196 197/* 198 * Wrapper for uiomove() that validates the arguments against a known-good 199 * kernel buffer. Currently, uiomove accepts a signed (n) argument, which 200 * is almost definitely a bad thing, so we catch that here as well. We 201 * return a runtime failure, but it might be desirable to generate a runtime 202 * assertion failure instead. 203 */ 204int 205uiomove_frombuf(void *buf, int buflen, struct uio *uio) 206{ 207 unsigned int offset, n; 208 209 if (uio->uio_offset < 0 || uio->uio_resid < 0 || 210 (offset = uio->uio_offset) != uio->uio_offset) 211 return (EINVAL); 212 if (buflen <= 0 || offset >= buflen) 213 return (0); 214 if ((n = buflen - offset) > INT_MAX) 215 return (EINVAL); 216 return (uiomove((char *)buf + offset, n, uio)); 217} 218 219#ifdef ZERO_COPY_SOCKETS 220/* 221 * Experimental support for zero-copy I/O 222 */ 223static int 224userspaceco(void *cp, u_int cnt, struct uio *uio, int disposable) 225{ 226 struct iovec *iov; 227 int error; 228 229 iov = uio->uio_iov; 230 if (uio->uio_rw == UIO_READ) { 231 if ((so_zero_copy_receive != 0) 232 && ((cnt & PAGE_MASK) == 0) 233 && ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) 234 && ((uio->uio_offset & PAGE_MASK) == 0) 235 && ((((intptr_t) cp) & PAGE_MASK) == 0) 236 && (disposable != 0)) { 237 /* SOCKET: use page-trading */ 238 /* 239 * We only want to call vm_pgmoveco() on 240 * disposeable pages, since it gives the 241 * kernel page to the userland process. 242 */ 243 error = vm_pgmoveco(&curproc->p_vmspace->vm_map, 244 (vm_offset_t)cp, (vm_offset_t)iov->iov_base); 245 246 /* 247 * If we get an error back, attempt 248 * to use copyout() instead. The 249 * disposable page should be freed 250 * automatically if we weren't able to move 251 * it into userland. 252 */ 253 if (error != 0) 254 error = copyout(cp, iov->iov_base, cnt); 255 } else { 256 error = copyout(cp, iov->iov_base, cnt); 257 } 258 } else { 259 error = copyin(iov->iov_base, cp, cnt); 260 } 261 return (error); 262} 263 264int 265uiomoveco(void *cp, int n, struct uio *uio, int disposable) 266{ 267 struct iovec *iov; 268 u_int cnt; 269 int error; 270 271 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, 272 ("uiomoveco: mode")); 273 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 274 ("uiomoveco proc")); 275 276 while (n > 0 && uio->uio_resid) { 277 iov = uio->uio_iov; 278 cnt = iov->iov_len; 279 if (cnt == 0) { 280 uio->uio_iov++; 281 uio->uio_iovcnt--; 282 continue; 283 } 284 if (cnt > n) 285 cnt = n; 286 287 switch (uio->uio_segflg) { 288 289 case UIO_USERSPACE: 290 if (ticks - PCPU_GET(switchticks) >= hogticks) 291 uio_yield(); 292 293 error = userspaceco(cp, cnt, uio, disposable); 294 295 if (error) 296 return (error); 297 break; 298 299 case UIO_SYSSPACE: 300 if (uio->uio_rw == UIO_READ) 301 bcopy(cp, iov->iov_base, cnt); 302 else 303 bcopy(iov->iov_base, cp, cnt); 304 break; 305 case UIO_NOCOPY: 306 break; 307 } 308 iov->iov_base = (char *)iov->iov_base + cnt; 309 iov->iov_len -= cnt; 310 uio->uio_resid -= cnt; 311 uio->uio_offset += cnt; 312 cp = (char *)cp + cnt; 313 n -= cnt; 314 } 315 return (0); 316} 317#endif /* ZERO_COPY_SOCKETS */ 318 319/* 320 * Give next character to user as result of read. 321 */ 322int 323ureadc(int c, struct uio *uio) 324{ 325 struct iovec *iov; 326 char *iov_base; 327 328again: 329 if (uio->uio_iovcnt == 0 || uio->uio_resid == 0) 330 panic("ureadc"); 331 iov = uio->uio_iov; 332 if (iov->iov_len == 0) { 333 uio->uio_iovcnt--; 334 uio->uio_iov++; 335 goto again; 336 } 337 switch (uio->uio_segflg) { 338 339 case UIO_USERSPACE: 340 if (subyte(iov->iov_base, c) < 0) 341 return (EFAULT); 342 break; 343 344 case UIO_SYSSPACE: 345 iov_base = iov->iov_base; 346 *iov_base = c; 347 iov->iov_base = iov_base; 348 break; 349 350 case UIO_NOCOPY: 351 break; 352 } 353 iov->iov_base = (char *)iov->iov_base + 1; 354 iov->iov_len--; 355 uio->uio_resid--; 356 uio->uio_offset++; 357 return (0); 358} 359 360/* 361 * General routine to allocate a hash table. 362 */ 363void * 364hashinit(int elements, struct malloc_type *type, u_long *hashmask) 365{ 366 long hashsize; 367 LIST_HEAD(generic, generic) *hashtbl; 368 int i; 369 370 if (elements <= 0) 371 panic("hashinit: bad elements"); 372 for (hashsize = 1; hashsize <= elements; hashsize <<= 1) 373 continue; 374 hashsize >>= 1; 375 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK); 376 for (i = 0; i < hashsize; i++) 377 LIST_INIT(&hashtbl[i]); 378 *hashmask = hashsize - 1; 379 return (hashtbl); 380} 381 382void 383hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask) 384{ 385 LIST_HEAD(generic, generic) *hashtbl, *hp; 386 387 hashtbl = vhashtbl; 388 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++) 389 if (!LIST_EMPTY(hp)) 390 panic("hashdestroy: hash not empty"); 391 free(hashtbl, type); 392} 393 394static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039, 395 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653, 396 7159, 7673, 8191, 12281, 16381, 24571, 32749 }; 397#define NPRIMES (sizeof(primes) / sizeof(primes[0])) 398 399/* 400 * General routine to allocate a prime number sized hash table. 401 */ 402void * 403phashinit(int elements, struct malloc_type *type, u_long *nentries) 404{ 405 long hashsize; 406 LIST_HEAD(generic, generic) *hashtbl; 407 int i; 408 409 if (elements <= 0) 410 panic("phashinit: bad elements"); 411 for (i = 1, hashsize = primes[1]; hashsize <= elements;) { 412 i++; 413 if (i == NPRIMES) 414 break; 415 hashsize = primes[i]; 416 } 417 hashsize = primes[i - 1]; 418 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK); 419 for (i = 0; i < hashsize; i++) 420 LIST_INIT(&hashtbl[i]); 421 *nentries = hashsize; 422 return (hashtbl); 423} 424 425void 426uio_yield(void) 427{ 428 struct thread *td; 429 430 td = curthread; 431 mtx_lock_spin(&sched_lock); 432 DROP_GIANT(); 433 sched_prio(td, td->td_ksegrp->kg_user_pri); /* XXXKSE */ 434 mi_switch(SW_INVOL, NULL); 435 mtx_unlock_spin(&sched_lock); 436 PICKUP_GIANT(); 437} 438 439int 440copyinfrom(const void * __restrict src, void * __restrict dst, size_t len, 441 int seg) 442{ 443 int error = 0; 444 445 switch (seg) { 446 case UIO_USERSPACE: 447 error = copyin(src, dst, len); 448 break; 449 case UIO_SYSSPACE: 450 bcopy(src, dst, len); 451 break; 452 default: 453 panic("copyinfrom: bad seg %d\n", seg); 454 } 455 return (error); 456} 457 458int 459copyinstrfrom(const void * __restrict src, void * __restrict dst, size_t len, 460 size_t * __restrict copied, int seg) 461{ 462 int error = 0; 463 464 switch (seg) { 465 case UIO_USERSPACE: 466 error = copyinstr(src, dst, len, copied); 467 break; 468 case UIO_SYSSPACE: 469 error = copystr(src, dst, len, copied); 470 break; 471 default: 472 panic("copyinstrfrom: bad seg %d\n", seg); 473 } 474 return (error); 475} 476 477int 478copyiniov(struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error) 479{ 480 u_int iovlen; 481 482 *iov = NULL; 483 if (iovcnt > UIO_MAXIOV) 484 return (error); 485 iovlen = iovcnt * sizeof (struct iovec); 486 *iov = malloc(iovlen, M_IOV, M_WAITOK); 487 error = copyin(iovp, *iov, iovlen); 488 if (error) { 489 free(*iov, M_IOV); 490 *iov = NULL; 491 } 492 return (error); 493} 494 495int 496copyinuio(struct iovec *iovp, u_int iovcnt, struct uio **uiop) 497{ 498 struct iovec *iov; 499 struct uio *uio; 500 u_int iovlen; 501 int error, i; 502 503 *uiop = NULL; 504 if (iovcnt > UIO_MAXIOV) 505 return (EINVAL); 506 iovlen = iovcnt * sizeof (struct iovec); 507 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK); 508 iov = (struct iovec *)(uio + 1); 509 error = copyin(iovp, iov, iovlen); 510 if (error) { 511 free(uio, M_IOV); 512 return (error); 513 } 514 uio->uio_iov = iov; 515 uio->uio_iovcnt = iovcnt; 516 uio->uio_segflg = UIO_USERSPACE; 517 uio->uio_offset = -1; 518 uio->uio_resid = 0; 519 for (i = 0; i < iovcnt; i++) { 520 if (iov->iov_len > INT_MAX - uio->uio_resid) { 521 free(uio, M_IOV); 522 return (EINVAL); 523 } 524 uio->uio_resid += iov->iov_len; 525 iov++; 526 } 527 *uiop = uio; 528 return (0); 529} 530 531struct uio * 532cloneuio(struct uio *uiop) 533{ 534 struct uio *uio; 535 int iovlen; 536 537 iovlen = uiop->uio_iovcnt * sizeof (struct iovec); 538 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK); 539 *uio = *uiop; 540 uio->uio_iov = (struct iovec *)(uio + 1); 541 bcopy(uiop->uio_iov, uio->uio_iov, iovlen); 542 return (uio); 543} 544