1/*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61/* 62 * Paging space routine stubs. Emulates a matchmaker-like interface 63 * for builtin pagers. 64 */ 65 66#include <sys/cdefs.h> 67__FBSDID("$FreeBSD$"); 68 69#include <sys/param.h> 70#include <sys/systm.h> 71#include <sys/kernel.h> 72#include <sys/vnode.h> 73#include <sys/bio.h> 74#include <sys/buf.h> 75#include <sys/ucred.h> 76#include <sys/malloc.h> 77#include <sys/rwlock.h> 78 79#include <vm/vm.h> 80#include <vm/vm_param.h> 81#include <vm/vm_kern.h> 82#include <vm/vm_object.h> 83#include <vm/vm_page.h> 84#include <vm/vm_pager.h> 85#include <vm/vm_extern.h> 86 87int cluster_pbuf_freecnt = -1; /* unlimited to begin with */ 88 89struct buf *swbuf; 90 91static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *); 92static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t, 93 vm_ooffset_t, struct ucred *); 94static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *); 95static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); 96static void dead_pager_dealloc(vm_object_t); 97 98static int 99dead_pager_getpages(vm_object_t obj, vm_page_t *ma, int count, int *rbehind, 100 int *rahead) 101{ 102 103 return (VM_PAGER_FAIL); 104} 105 106static vm_object_t 107dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 108 vm_ooffset_t off, struct ucred *cred) 109{ 110 return NULL; 111} 112 113static void 114dead_pager_putpages(object, m, count, flags, rtvals) 115 vm_object_t object; 116 vm_page_t *m; 117 int count; 118 int flags; 119 int *rtvals; 120{ 121 int i; 122 123 for (i = 0; i < count; i++) { 124 rtvals[i] = VM_PAGER_AGAIN; 125 } 126} 127 128static int 129dead_pager_haspage(object, pindex, prev, next) 130 vm_object_t object; 131 vm_pindex_t pindex; 132 int *prev; 133 int *next; 134{ 135 if (prev) 136 *prev = 0; 137 if (next) 138 *next = 0; 139 return FALSE; 140} 141 142static void 143dead_pager_dealloc(object) 144 vm_object_t object; 145{ 146 return; 147} 148 149static struct pagerops deadpagerops = { 150 .pgo_alloc = dead_pager_alloc, 151 .pgo_dealloc = dead_pager_dealloc, 152 .pgo_getpages = dead_pager_getpages, 153 .pgo_putpages = dead_pager_putpages, 154 .pgo_haspage = dead_pager_haspage, 155}; 156 157struct pagerops *pagertab[] = { 158 &defaultpagerops, /* OBJT_DEFAULT */ 159 &swappagerops, /* OBJT_SWAP */ 160 &vnodepagerops, /* OBJT_VNODE */ 161 &devicepagerops, /* OBJT_DEVICE */ 162 &physpagerops, /* OBJT_PHYS */ 163 &deadpagerops, /* OBJT_DEAD */ 164 &sgpagerops, /* OBJT_SG */ 165 &mgtdevicepagerops, /* OBJT_MGTDEVICE */ 166}; 167 168/* 169 * Kernel address space for mapping pages. 170 * Used by pagers where KVAs are needed for IO. 171 * 172 * XXX needs to be large enough to support the number of pending async 173 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size 174 * (MAXPHYS == 64k) if you want to get the most efficiency. 175 */ 176struct mtx_padalign pbuf_mtx; 177static TAILQ_HEAD(swqueue, buf) bswlist; 178static int bswneeded; 179vm_offset_t swapbkva; /* swap buffers kva */ 180 181void 182vm_pager_init() 183{ 184 struct pagerops **pgops; 185 186 TAILQ_INIT(&bswlist); 187 /* 188 * Initialize known pagers 189 */ 190 for (pgops = pagertab; pgops < &pagertab[nitems(pagertab)]; pgops++) 191 if ((*pgops)->pgo_init != NULL) 192 (*(*pgops)->pgo_init) (); 193} 194 195void 196vm_pager_bufferinit() 197{ 198 struct buf *bp; 199 int i; 200 201 mtx_init(&pbuf_mtx, "pbuf mutex", NULL, MTX_DEF); 202 bp = swbuf; 203 /* 204 * Now set up swap and physical I/O buffer headers. 205 */ 206 for (i = 0; i < nswbuf; i++, bp++) { 207 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist); 208 BUF_LOCKINIT(bp); 209 LIST_INIT(&bp->b_dep); 210 bp->b_rcred = bp->b_wcred = NOCRED; 211 bp->b_xflags = 0; 212 } 213 214 cluster_pbuf_freecnt = nswbuf / 2; 215 vnode_pbuf_freecnt = nswbuf / 2 + 1; 216 vnode_async_pbuf_freecnt = nswbuf / 2; 217} 218 219/* 220 * Allocate an instance of a pager of the given type. 221 * Size, protection and offset parameters are passed in for pagers that 222 * need to perform page-level validation (e.g. the device pager). 223 */ 224vm_object_t 225vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size, 226 vm_prot_t prot, vm_ooffset_t off, struct ucred *cred) 227{ 228 vm_object_t ret; 229 struct pagerops *ops; 230 231 ops = pagertab[type]; 232 if (ops) 233 ret = (*ops->pgo_alloc) (handle, size, prot, off, cred); 234 else 235 ret = NULL; 236 return (ret); 237} 238 239/* 240 * The object must be locked. 241 */ 242void 243vm_pager_deallocate(object) 244 vm_object_t object; 245{ 246 247 VM_OBJECT_ASSERT_WLOCKED(object); 248 (*pagertab[object->type]->pgo_dealloc) (object); 249} 250 251static void 252vm_pager_assert_in(vm_object_t object, vm_page_t *m, int count) 253{ 254#ifdef INVARIANTS 255 256 VM_OBJECT_ASSERT_WLOCKED(object); 257 KASSERT(count > 0, ("%s: 0 count", __func__)); 258 /* 259 * All pages must be busied, not mapped, not fully valid, 260 * not dirty and belong to the proper object. 261 */ 262 for (int i = 0 ; i < count; i++) { 263 vm_page_assert_xbusied(m[i]); 264 KASSERT(!pmap_page_is_mapped(m[i]), 265 ("%s: page %p is mapped", __func__, m[i])); 266 KASSERT(m[i]->valid != VM_PAGE_BITS_ALL, 267 ("%s: request for a valid page %p", __func__, m[i])); 268 KASSERT(m[i]->dirty == 0, 269 ("%s: page %p is dirty", __func__, m[i])); 270 KASSERT(m[i]->object == object, 271 ("%s: wrong object %p/%p", __func__, object, m[i]->object)); 272 } 273#endif 274} 275 276/* 277 * Page in the pages for the object using its associated pager. 278 * The requested page must be fully valid on successful return. 279 */ 280int 281vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int *rbehind, 282 int *rahead) 283{ 284#ifdef INVARIANTS 285 vm_pindex_t pindex = m[0]->pindex; 286#endif 287 int r; 288 289 vm_pager_assert_in(object, m, count); 290 291 r = (*pagertab[object->type]->pgo_getpages)(object, m, count, rbehind, 292 rahead); 293 if (r != VM_PAGER_OK) 294 return (r); 295 296 for (int i = 0; i < count; i++) { 297 /* 298 * If pager has replaced a page, assert that it had 299 * updated the array. 300 */ 301 KASSERT(m[i] == vm_page_lookup(object, pindex++), 302 ("%s: mismatch page %p pindex %ju", __func__, 303 m[i], (uintmax_t )pindex - 1)); 304 /* 305 * Zero out partially filled data. 306 */ 307 if (m[i]->valid != VM_PAGE_BITS_ALL) 308 vm_page_zero_invalid(m[i], TRUE); 309 } 310 return (VM_PAGER_OK); 311} 312 313int 314vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count, 315 int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg) 316{ 317 318 vm_pager_assert_in(object, m, count); 319 320 return ((*pagertab[object->type]->pgo_getpages_async)(object, m, 321 count, rbehind, rahead, iodone, arg)); 322} 323 324/* 325 * vm_pager_put_pages() - inline, see vm/vm_pager.h 326 * vm_pager_has_page() - inline, see vm/vm_pager.h 327 */ 328 329/* 330 * Search the specified pager object list for an object with the 331 * specified handle. If an object with the specified handle is found, 332 * increase its reference count and return it. Otherwise, return NULL. 333 * 334 * The pager object list must be locked. 335 */ 336vm_object_t 337vm_pager_object_lookup(struct pagerlst *pg_list, void *handle) 338{ 339 vm_object_t object; 340 341 TAILQ_FOREACH(object, pg_list, pager_object_list) { 342 if (object->handle == handle) { 343 VM_OBJECT_WLOCK(object); 344 if ((object->flags & OBJ_DEAD) == 0) { 345 vm_object_reference_locked(object); 346 VM_OBJECT_WUNLOCK(object); 347 break; 348 } 349 VM_OBJECT_WUNLOCK(object); 350 } 351 } 352 return (object); 353} 354 355/* 356 * initialize a physical buffer 357 */ 358 359/* 360 * XXX This probably belongs in vfs_bio.c 361 */ 362static void 363initpbuf(struct buf *bp) 364{ 365 KASSERT(bp->b_bufobj == NULL, ("initpbuf with bufobj")); 366 KASSERT(bp->b_vp == NULL, ("initpbuf with vp")); 367 bp->b_rcred = NOCRED; 368 bp->b_wcred = NOCRED; 369 bp->b_qindex = 0; /* On no queue (QUEUE_NONE) */ 370 bp->b_kvabase = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva; 371 bp->b_data = bp->b_kvabase; 372 bp->b_kvasize = MAXPHYS; 373 bp->b_flags = 0; 374 bp->b_xflags = 0; 375 bp->b_ioflags = 0; 376 bp->b_iodone = NULL; 377 bp->b_error = 0; 378 BUF_LOCK(bp, LK_EXCLUSIVE, NULL); 379} 380 381/* 382 * allocate a physical buffer 383 * 384 * There are a limited number (nswbuf) of physical buffers. We need 385 * to make sure that no single subsystem is able to hog all of them, 386 * so each subsystem implements a counter which is typically initialized 387 * to 1/2 nswbuf. getpbuf() decrements this counter in allocation and 388 * increments it on release, and blocks if the counter hits zero. A 389 * subsystem may initialize the counter to -1 to disable the feature, 390 * but it must still be sure to match up all uses of getpbuf() with 391 * relpbuf() using the same variable. 392 * 393 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed 394 * relatively soon when the rest of the subsystems get smart about it. XXX 395 */ 396struct buf * 397getpbuf(int *pfreecnt) 398{ 399 struct buf *bp; 400 401 mtx_lock(&pbuf_mtx); 402 403 for (;;) { 404 if (pfreecnt) { 405 while (*pfreecnt == 0) { 406 msleep(pfreecnt, &pbuf_mtx, PVM, "wswbuf0", 0); 407 } 408 } 409 410 /* get a bp from the swap buffer header pool */ 411 if ((bp = TAILQ_FIRST(&bswlist)) != NULL) 412 break; 413 414 bswneeded = 1; 415 msleep(&bswneeded, &pbuf_mtx, PVM, "wswbuf1", 0); 416 /* loop in case someone else grabbed one */ 417 } 418 TAILQ_REMOVE(&bswlist, bp, b_freelist); 419 if (pfreecnt) 420 --*pfreecnt; 421 mtx_unlock(&pbuf_mtx); 422 423 initpbuf(bp); 424 return bp; 425} 426 427/* 428 * allocate a physical buffer, if one is available. 429 * 430 * Note that there is no NULL hack here - all subsystems using this 431 * call understand how to use pfreecnt. 432 */ 433struct buf * 434trypbuf(int *pfreecnt) 435{ 436 struct buf *bp; 437 438 mtx_lock(&pbuf_mtx); 439 if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist)) == NULL) { 440 mtx_unlock(&pbuf_mtx); 441 return NULL; 442 } 443 TAILQ_REMOVE(&bswlist, bp, b_freelist); 444 445 --*pfreecnt; 446 447 mtx_unlock(&pbuf_mtx); 448 449 initpbuf(bp); 450 451 return bp; 452} 453 454/* 455 * release a physical buffer 456 * 457 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed 458 * relatively soon when the rest of the subsystems get smart about it. XXX 459 */ 460void 461relpbuf(struct buf *bp, int *pfreecnt) 462{ 463 464 if (bp->b_rcred != NOCRED) { 465 crfree(bp->b_rcred); 466 bp->b_rcred = NOCRED; 467 } 468 if (bp->b_wcred != NOCRED) { 469 crfree(bp->b_wcred); 470 bp->b_wcred = NOCRED; 471 } 472 473 KASSERT(bp->b_vp == NULL, ("relpbuf with vp")); 474 KASSERT(bp->b_bufobj == NULL, ("relpbuf with bufobj")); 475 476 BUF_UNLOCK(bp); 477 478 mtx_lock(&pbuf_mtx); 479 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist); 480 481 if (bswneeded) { 482 bswneeded = 0; 483 wakeup(&bswneeded); 484 } 485 if (pfreecnt) { 486 if (++*pfreecnt == 1) 487 wakeup(pfreecnt); 488 } 489 mtx_unlock(&pbuf_mtx); 490} 491 492/* 493 * Associate a p-buffer with a vnode. 494 * 495 * Also sets B_PAGING flag to indicate that vnode is not fully associated 496 * with the buffer. i.e. the bp has not been linked into the vnode or 497 * ref-counted. 498 */ 499void 500pbgetvp(struct vnode *vp, struct buf *bp) 501{ 502 503 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free")); 504 KASSERT(bp->b_bufobj == NULL, ("pbgetvp: not free (bufobj)")); 505 506 bp->b_vp = vp; 507 bp->b_flags |= B_PAGING; 508 bp->b_bufobj = &vp->v_bufobj; 509} 510 511/* 512 * Associate a p-buffer with a vnode. 513 * 514 * Also sets B_PAGING flag to indicate that vnode is not fully associated 515 * with the buffer. i.e. the bp has not been linked into the vnode or 516 * ref-counted. 517 */ 518void 519pbgetbo(struct bufobj *bo, struct buf *bp) 520{ 521 522 KASSERT(bp->b_vp == NULL, ("pbgetbo: not free (vnode)")); 523 KASSERT(bp->b_bufobj == NULL, ("pbgetbo: not free (bufobj)")); 524 525 bp->b_flags |= B_PAGING; 526 bp->b_bufobj = bo; 527} 528 529/* 530 * Disassociate a p-buffer from a vnode. 531 */ 532void 533pbrelvp(struct buf *bp) 534{ 535 536 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL")); 537 KASSERT(bp->b_bufobj != NULL, ("pbrelvp: NULL bufobj")); 538 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0, 539 ("pbrelvp: pager buf on vnode list.")); 540 541 bp->b_vp = NULL; 542 bp->b_bufobj = NULL; 543 bp->b_flags &= ~B_PAGING; 544} 545 546/* 547 * Disassociate a p-buffer from a bufobj. 548 */ 549void 550pbrelbo(struct buf *bp) 551{ 552 553 KASSERT(bp->b_vp == NULL, ("pbrelbo: vnode")); 554 KASSERT(bp->b_bufobj != NULL, ("pbrelbo: NULL bufobj")); 555 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0, 556 ("pbrelbo: pager buf on vnode list.")); 557 558 bp->b_bufobj = NULL; 559 bp->b_flags &= ~B_PAGING; 560} 561