subr_hash.c revision 196454
1/*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94 35 */ 36 37#include <sys/cdefs.h> 38__FBSDID("$FreeBSD: head/sys/kern/kern_subr.c 196454 2009-08-23 09:55:06Z rpaulo $"); 39 40#include "opt_zero.h" 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/kernel.h> 45#include <sys/ktr.h> 46#include <sys/limits.h> 47#include <sys/lock.h> 48#include <sys/mutex.h> 49#include <sys/proc.h> 50#include <sys/malloc.h> 51#include <sys/resourcevar.h> 52#include <sys/sched.h> 53#include <sys/sysctl.h> 54#include <sys/vnode.h> 55 56#include <vm/vm.h> 57#include <vm/vm_page.h> 58#include <vm/vm_map.h> 59#ifdef ZERO_COPY_SOCKETS 60#include <vm/vm_param.h> 61#include <vm/vm_object.h> 62#endif 63 64SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV, 65 "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)"); 66 67#ifdef ZERO_COPY_SOCKETS 68/* Declared in uipc_socket.c */ 69extern int so_zero_copy_receive; 70 71/* 72 * Identify the physical page mapped at the given kernel virtual 73 * address. Insert this physical page into the given address space at 74 * the given virtual address, replacing the physical page, if any, 75 * that already exists there. 76 */ 77static int 78vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr) 79{ 80 vm_map_t map = mapa; 81 vm_page_t kern_pg, user_pg; 82 vm_object_t uobject; 83 vm_map_entry_t entry; 84 vm_pindex_t upindex; 85 vm_prot_t prot; 86 boolean_t wired; 87 88 KASSERT((uaddr & PAGE_MASK) == 0, 89 ("vm_pgmoveco: uaddr is not page aligned")); 90 91 /* 92 * Herein the physical page is validated and dirtied. It is 93 * unwired in sf_buf_mext(). 94 */ 95 kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr)); 96 kern_pg->valid = VM_PAGE_BITS_ALL; 97 KASSERT(kern_pg->queue == PQ_NONE && kern_pg->wire_count == 1, 98 ("vm_pgmoveco: kern_pg is not correctly wired")); 99 100 if ((vm_map_lookup(&map, uaddr, 101 VM_PROT_WRITE, &entry, &uobject, 102 &upindex, &prot, &wired)) != KERN_SUCCESS) { 103 return(EFAULT); 104 } 105 VM_OBJECT_LOCK(uobject); 106retry: 107 if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) { 108 if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco")) 109 goto retry; 110 vm_page_lock_queues(); 111 pmap_remove_all(user_pg); 112 vm_page_free(user_pg); 113 } else { 114 /* 115 * Even if a physical page does not exist in the 116 * object chain's first object, a physical page from a 117 * backing object may be mapped read only. 118 */ 119 if (uobject->backing_object != NULL) 120 pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE); 121 vm_page_lock_queues(); 122 } 123 vm_page_insert(kern_pg, uobject, upindex); 124 vm_page_dirty(kern_pg); 125 vm_page_unlock_queues(); 126 VM_OBJECT_UNLOCK(uobject); 127 vm_map_lookup_done(map, entry); 128 return(KERN_SUCCESS); 129} 130#endif /* ZERO_COPY_SOCKETS */ 131 132int 133uiomove(void *cp, int n, struct uio *uio) 134{ 135 struct thread *td = curthread; 136 struct iovec *iov; 137 u_int cnt; 138 int error = 0; 139 int save = 0; 140 141 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, 142 ("uiomove: mode")); 143 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 144 ("uiomove proc")); 145 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 146 "Calling uiomove()"); 147 148 save = td->td_pflags & TDP_DEADLKTREAT; 149 td->td_pflags |= TDP_DEADLKTREAT; 150 151 while (n > 0 && uio->uio_resid) { 152 iov = uio->uio_iov; 153 cnt = iov->iov_len; 154 if (cnt == 0) { 155 uio->uio_iov++; 156 uio->uio_iovcnt--; 157 continue; 158 } 159 if (cnt > n) 160 cnt = n; 161 162 switch (uio->uio_segflg) { 163 164 case UIO_USERSPACE: 165 if (ticks - PCPU_GET(switchticks) >= hogticks) 166 uio_yield(); 167 if (uio->uio_rw == UIO_READ) 168 error = copyout(cp, iov->iov_base, cnt); 169 else 170 error = copyin(iov->iov_base, cp, cnt); 171 if (error) 172 goto out; 173 break; 174 175 case UIO_SYSSPACE: 176 if (uio->uio_rw == UIO_READ) 177 bcopy(cp, iov->iov_base, cnt); 178 else 179 bcopy(iov->iov_base, cp, cnt); 180 break; 181 case UIO_NOCOPY: 182 break; 183 } 184 iov->iov_base = (char *)iov->iov_base + cnt; 185 iov->iov_len -= cnt; 186 uio->uio_resid -= cnt; 187 uio->uio_offset += cnt; 188 cp = (char *)cp + cnt; 189 n -= cnt; 190 } 191out: 192 if (save == 0) 193 td->td_pflags &= ~TDP_DEADLKTREAT; 194 return (error); 195} 196 197/* 198 * Wrapper for uiomove() that validates the arguments against a known-good 199 * kernel buffer. Currently, uiomove accepts a signed (n) argument, which 200 * is almost definitely a bad thing, so we catch that here as well. We 201 * return a runtime failure, but it might be desirable to generate a runtime 202 * assertion failure instead. 203 */ 204int 205uiomove_frombuf(void *buf, int buflen, struct uio *uio) 206{ 207 unsigned int offset, n; 208 209 if (uio->uio_offset < 0 || uio->uio_resid < 0 || 210 (offset = uio->uio_offset) != uio->uio_offset) 211 return (EINVAL); 212 if (buflen <= 0 || offset >= buflen) 213 return (0); 214 if ((n = buflen - offset) > INT_MAX) 215 return (EINVAL); 216 return (uiomove((char *)buf + offset, n, uio)); 217} 218 219#ifdef ZERO_COPY_SOCKETS 220/* 221 * Experimental support for zero-copy I/O 222 */ 223static int 224userspaceco(void *cp, u_int cnt, struct uio *uio, int disposable) 225{ 226 struct iovec *iov; 227 int error; 228 229 iov = uio->uio_iov; 230 if (uio->uio_rw == UIO_READ) { 231 if ((so_zero_copy_receive != 0) 232 && ((cnt & PAGE_MASK) == 0) 233 && ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) 234 && ((uio->uio_offset & PAGE_MASK) == 0) 235 && ((((intptr_t) cp) & PAGE_MASK) == 0) 236 && (disposable != 0)) { 237 /* SOCKET: use page-trading */ 238 /* 239 * We only want to call vm_pgmoveco() on 240 * disposeable pages, since it gives the 241 * kernel page to the userland process. 242 */ 243 error = vm_pgmoveco(&curproc->p_vmspace->vm_map, 244 (vm_offset_t)cp, (vm_offset_t)iov->iov_base); 245 246 /* 247 * If we get an error back, attempt 248 * to use copyout() instead. The 249 * disposable page should be freed 250 * automatically if we weren't able to move 251 * it into userland. 252 */ 253 if (error != 0) 254 error = copyout(cp, iov->iov_base, cnt); 255 } else { 256 error = copyout(cp, iov->iov_base, cnt); 257 } 258 } else { 259 error = copyin(iov->iov_base, cp, cnt); 260 } 261 return (error); 262} 263 264int 265uiomoveco(void *cp, int n, struct uio *uio, int disposable) 266{ 267 struct iovec *iov; 268 u_int cnt; 269 int error; 270 271 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, 272 ("uiomoveco: mode")); 273 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, 274 ("uiomoveco proc")); 275 276 while (n > 0 && uio->uio_resid) { 277 iov = uio->uio_iov; 278 cnt = iov->iov_len; 279 if (cnt == 0) { 280 uio->uio_iov++; 281 uio->uio_iovcnt--; 282 continue; 283 } 284 if (cnt > n) 285 cnt = n; 286 287 switch (uio->uio_segflg) { 288 289 case UIO_USERSPACE: 290 if (ticks - PCPU_GET(switchticks) >= hogticks) 291 uio_yield(); 292 293 error = userspaceco(cp, cnt, uio, disposable); 294 295 if (error) 296 return (error); 297 break; 298 299 case UIO_SYSSPACE: 300 if (uio->uio_rw == UIO_READ) 301 bcopy(cp, iov->iov_base, cnt); 302 else 303 bcopy(iov->iov_base, cp, cnt); 304 break; 305 case UIO_NOCOPY: 306 break; 307 } 308 iov->iov_base = (char *)iov->iov_base + cnt; 309 iov->iov_len -= cnt; 310 uio->uio_resid -= cnt; 311 uio->uio_offset += cnt; 312 cp = (char *)cp + cnt; 313 n -= cnt; 314 } 315 return (0); 316} 317#endif /* ZERO_COPY_SOCKETS */ 318 319/* 320 * Give next character to user as result of read. 321 */ 322int 323ureadc(int c, struct uio *uio) 324{ 325 struct iovec *iov; 326 char *iov_base; 327 328 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 329 "Calling ureadc()"); 330 331again: 332 if (uio->uio_iovcnt == 0 || uio->uio_resid == 0) 333 panic("ureadc"); 334 iov = uio->uio_iov; 335 if (iov->iov_len == 0) { 336 uio->uio_iovcnt--; 337 uio->uio_iov++; 338 goto again; 339 } 340 switch (uio->uio_segflg) { 341 342 case UIO_USERSPACE: 343 if (subyte(iov->iov_base, c) < 0) 344 return (EFAULT); 345 break; 346 347 case UIO_SYSSPACE: 348 iov_base = iov->iov_base; 349 *iov_base = c; 350 iov->iov_base = iov_base; 351 break; 352 353 case UIO_NOCOPY: 354 break; 355 } 356 iov->iov_base = (char *)iov->iov_base + 1; 357 iov->iov_len--; 358 uio->uio_resid--; 359 uio->uio_offset++; 360 return (0); 361} 362 363/* 364 * General routine to allocate a hash table with control of memory flags. 365 */ 366void * 367hashinit_flags(int elements, struct malloc_type *type, u_long *hashmask, 368 int flags) 369{ 370 long hashsize; 371 LIST_HEAD(generic, generic) *hashtbl; 372 int i; 373 374 if (elements <= 0) 375 panic("hashinit: bad elements"); 376 377 /* Exactly one of HASH_WAITOK and HASH_NOWAIT must be set. */ 378 KASSERT((flags & HASH_WAITOK) ^ (flags & HASH_NOWAIT), 379 ("Bad flags (0x%x) passed to hashinit_flags", flags)); 380 381 for (hashsize = 1; hashsize <= elements; hashsize <<= 1) 382 continue; 383 hashsize >>= 1; 384 385 if (flags & HASH_NOWAIT) 386 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), 387 type, M_NOWAIT); 388 else 389 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), 390 type, M_WAITOK); 391 392 if (hashtbl != NULL) { 393 for (i = 0; i < hashsize; i++) 394 LIST_INIT(&hashtbl[i]); 395 *hashmask = hashsize - 1; 396 } 397 return (hashtbl); 398} 399 400/* 401 * Allocate and initialize a hash table with default flag: may sleep. 402 */ 403void * 404hashinit(int elements, struct malloc_type *type, u_long *hashmask) 405{ 406 407 return (hashinit_flags(elements, type, hashmask, HASH_WAITOK)); 408} 409 410void 411hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask) 412{ 413 LIST_HEAD(generic, generic) *hashtbl, *hp; 414 415 hashtbl = vhashtbl; 416 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++) 417 if (!LIST_EMPTY(hp)) 418 panic("hashdestroy: hash not empty"); 419 free(hashtbl, type); 420} 421 422static const int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 423 2039, 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 424 6653, 7159, 7673, 8191, 12281, 16381, 24571, 32749 }; 425#define NPRIMES (sizeof(primes) / sizeof(primes[0])) 426 427/* 428 * General routine to allocate a prime number sized hash table. 429 */ 430void * 431phashinit(int elements, struct malloc_type *type, u_long *nentries) 432{ 433 long hashsize; 434 LIST_HEAD(generic, generic) *hashtbl; 435 int i; 436 437 if (elements <= 0) 438 panic("phashinit: bad elements"); 439 for (i = 1, hashsize = primes[1]; hashsize <= elements;) { 440 i++; 441 if (i == NPRIMES) 442 break; 443 hashsize = primes[i]; 444 } 445 hashsize = primes[i - 1]; 446 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK); 447 for (i = 0; i < hashsize; i++) 448 LIST_INIT(&hashtbl[i]); 449 *nentries = hashsize; 450 return (hashtbl); 451} 452 453void 454uio_yield(void) 455{ 456 struct thread *td; 457 458 td = curthread; 459 DROP_GIANT(); 460 thread_lock(td); 461 sched_prio(td, td->td_user_pri); 462 mi_switch(SW_INVOL | SWT_RELINQUISH, NULL); 463 thread_unlock(td); 464 PICKUP_GIANT(); 465} 466 467int 468copyinfrom(const void * __restrict src, void * __restrict dst, size_t len, 469 int seg) 470{ 471 int error = 0; 472 473 switch (seg) { 474 case UIO_USERSPACE: 475 error = copyin(src, dst, len); 476 break; 477 case UIO_SYSSPACE: 478 bcopy(src, dst, len); 479 break; 480 default: 481 panic("copyinfrom: bad seg %d\n", seg); 482 } 483 return (error); 484} 485 486int 487copyinstrfrom(const void * __restrict src, void * __restrict dst, size_t len, 488 size_t * __restrict copied, int seg) 489{ 490 int error = 0; 491 492 switch (seg) { 493 case UIO_USERSPACE: 494 error = copyinstr(src, dst, len, copied); 495 break; 496 case UIO_SYSSPACE: 497 error = copystr(src, dst, len, copied); 498 break; 499 default: 500 panic("copyinstrfrom: bad seg %d\n", seg); 501 } 502 return (error); 503} 504 505int 506copyiniov(struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error) 507{ 508 u_int iovlen; 509 510 *iov = NULL; 511 if (iovcnt > UIO_MAXIOV) 512 return (error); 513 iovlen = iovcnt * sizeof (struct iovec); 514 *iov = malloc(iovlen, M_IOV, M_WAITOK); 515 error = copyin(iovp, *iov, iovlen); 516 if (error) { 517 free(*iov, M_IOV); 518 *iov = NULL; 519 } 520 return (error); 521} 522 523int 524copyinuio(struct iovec *iovp, u_int iovcnt, struct uio **uiop) 525{ 526 struct iovec *iov; 527 struct uio *uio; 528 u_int iovlen; 529 int error, i; 530 531 *uiop = NULL; 532 if (iovcnt > UIO_MAXIOV) 533 return (EINVAL); 534 iovlen = iovcnt * sizeof (struct iovec); 535 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK); 536 iov = (struct iovec *)(uio + 1); 537 error = copyin(iovp, iov, iovlen); 538 if (error) { 539 free(uio, M_IOV); 540 return (error); 541 } 542 uio->uio_iov = iov; 543 uio->uio_iovcnt = iovcnt; 544 uio->uio_segflg = UIO_USERSPACE; 545 uio->uio_offset = -1; 546 uio->uio_resid = 0; 547 for (i = 0; i < iovcnt; i++) { 548 if (iov->iov_len > INT_MAX - uio->uio_resid) { 549 free(uio, M_IOV); 550 return (EINVAL); 551 } 552 uio->uio_resid += iov->iov_len; 553 iov++; 554 } 555 *uiop = uio; 556 return (0); 557} 558 559struct uio * 560cloneuio(struct uio *uiop) 561{ 562 struct uio *uio; 563 int iovlen; 564 565 iovlen = uiop->uio_iovcnt * sizeof (struct iovec); 566 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK); 567 *uio = *uiop; 568 uio->uio_iov = (struct iovec *)(uio + 1); 569 bcopy(uiop->uio_iov, uio->uio_iov, iovlen); 570 return (uio); 571} 572