vm_pager.c revision 1541
1289848Sjkim/* 2289848Sjkim * Copyright (c) 1991, 1993 3289848Sjkim * The Regents of the University of California. All rights reserved. 4289848Sjkim * 5289848Sjkim * This code is derived from software contributed to Berkeley by 6289848Sjkim * The Mach Operating System project at Carnegie-Mellon University. 7289848Sjkim * 8289848Sjkim * Redistribution and use in source and binary forms, with or without 9289848Sjkim * modification, are permitted provided that the following conditions 10289848Sjkim * are met: 11289848Sjkim * 1. Redistributions of source code must retain the above copyright 12289848Sjkim * notice, this list of conditions and the following disclaimer. 13289848Sjkim * 2. Redistributions in binary form must reproduce the above copyright 14289848Sjkim * notice, this list of conditions and the following disclaimer in the 15289848Sjkim * documentation and/or other materials provided with the distribution. 16289848Sjkim * 3. All advertising materials mentioning features or use of this software 17289848Sjkim * must display the following acknowledgement: 18289848Sjkim * This product includes software developed by the University of 19289848Sjkim * California, Berkeley and its contributors. 20289848Sjkim * 4. Neither the name of the University nor the names of its contributors 21289848Sjkim * may be used to endorse or promote products derived from this software 22289848Sjkim * without specific prior written permission. 23289848Sjkim * 24289848Sjkim * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25289848Sjkim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26289848Sjkim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27289848Sjkim * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28289848Sjkim * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29289848Sjkim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30289848Sjkim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31289848Sjkim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32289848Sjkim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33289848Sjkim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34289848Sjkim * SUCH DAMAGE. 35289848Sjkim * 36289848Sjkim * @(#)vm_pager.c 8.6 (Berkeley) 1/12/94 37289848Sjkim * 38289848Sjkim * 39289848Sjkim * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40289848Sjkim * All rights reserved. 41289848Sjkim * 42289848Sjkim * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43289848Sjkim * 44289848Sjkim * Permission to use, copy, modify and distribute this software and 45289848Sjkim * its documentation is hereby granted, provided that both the copyright 46289848Sjkim * notice and this permission notice appear in all copies of the 47289848Sjkim * software, derivative works or modified versions, and any portions 48289848Sjkim * thereof, and that both notices appear in supporting documentation. 49289848Sjkim * 50289848Sjkim * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51289848Sjkim * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52289848Sjkim * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53289848Sjkim * 54289848Sjkim * Carnegie Mellon requests users of this software to return to 55289848Sjkim * 56289848Sjkim * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57289848Sjkim * School of Computer Science 58289848Sjkim * Carnegie Mellon University 59289848Sjkim * Pittsburgh PA 15213-3890 60289848Sjkim * 61289848Sjkim * any improvements or extensions that they make and grant Carnegie the 62289848Sjkim * rights to redistribute these changes. 63289848Sjkim */ 64289848Sjkim 65289848Sjkim/* 66289848Sjkim * Paging space routine stubs. Emulates a matchmaker-like interface 67289848Sjkim * for builtin pagers. 68289848Sjkim */ 69289848Sjkim 70289848Sjkim#include <sys/param.h> 71289848Sjkim#include <sys/systm.h> 72289848Sjkim#include <sys/malloc.h> 73289848Sjkim 74289848Sjkim#include <vm/vm.h> 75289848Sjkim#include <vm/vm_page.h> 76289848Sjkim#include <vm/vm_kern.h> 77289848Sjkim 78289848Sjkim#ifdef SWAPPAGER 79289848Sjkimextern struct pagerops swappagerops; 80289848Sjkim#endif 81289848Sjkim 82289848Sjkim#ifdef VNODEPAGER 83289848Sjkimextern struct pagerops vnodepagerops; 84289848Sjkim#endif 85289848Sjkim 86289848Sjkim#ifdef DEVPAGER 87289848Sjkimextern struct pagerops devicepagerops; 88289848Sjkim#endif 89289848Sjkim 90289848Sjkimstruct pagerops *pagertab[] = { 91289848Sjkim#ifdef SWAPPAGER 92289848Sjkim &swappagerops, /* PG_SWAP */ 93289848Sjkim#else 94289848Sjkim NULL, 95289848Sjkim#endif 96289848Sjkim#ifdef VNODEPAGER 97289848Sjkim &vnodepagerops, /* PG_VNODE */ 98289848Sjkim#else 99289848Sjkim NULL, 100289848Sjkim#endif 101289848Sjkim#ifdef DEVPAGER 102289848Sjkim &devicepagerops, /* PG_DEV */ 103289848Sjkim#else 104289848Sjkim NULL, 105289848Sjkim#endif 106289848Sjkim}; 107289848Sjkimint npagers = sizeof (pagertab) / sizeof (pagertab[0]); 108289848Sjkim 109289848Sjkimstruct pagerops *dfltpagerops = NULL; /* default pager */ 110289848Sjkim 111289848Sjkim/* 112289848Sjkim * Kernel address space for mapping pages. 113289848Sjkim * Used by pagers where KVAs are needed for IO. 114289848Sjkim * 115289848Sjkim * XXX needs to be large enough to support the number of pending async 116289848Sjkim * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size 117289848Sjkim * (MAXPHYS == 64k) if you want to get the most efficiency. 118289848Sjkim */ 119289848Sjkim#define PAGER_MAP_SIZE (4 * 1024 * 1024) 120289848Sjkim 121289848Sjkimvm_map_t pager_map; 122289848Sjkimboolean_t pager_map_wanted; 123289848Sjkimvm_offset_t pager_sva, pager_eva; 124289848Sjkim 125289848Sjkimvoid 126289848Sjkimvm_pager_init() 127289848Sjkim{ 128289848Sjkim struct pagerops **pgops; 129289848Sjkim 130 /* 131 * Allocate a kernel submap for tracking get/put page mappings 132 */ 133 pager_map = kmem_suballoc(kernel_map, &pager_sva, &pager_eva, 134 PAGER_MAP_SIZE, FALSE); 135 /* 136 * Initialize known pagers 137 */ 138 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++) 139 if (pgops) 140 (*(*pgops)->pgo_init)(); 141 if (dfltpagerops == NULL) 142 panic("no default pager"); 143} 144 145/* 146 * Allocate an instance of a pager of the given type. 147 * Size, protection and offset parameters are passed in for pagers that 148 * need to perform page-level validation (e.g. the device pager). 149 */ 150vm_pager_t 151vm_pager_allocate(type, handle, size, prot, off) 152 int type; 153 caddr_t handle; 154 vm_size_t size; 155 vm_prot_t prot; 156 vm_offset_t off; 157{ 158 struct pagerops *ops; 159 160 ops = (type == PG_DFLT) ? dfltpagerops : pagertab[type]; 161 if (ops) 162 return ((*ops->pgo_alloc)(handle, size, prot, off)); 163 return (NULL); 164} 165 166void 167vm_pager_deallocate(pager) 168 vm_pager_t pager; 169{ 170 if (pager == NULL) 171 panic("vm_pager_deallocate: null pager"); 172 173 (*pager->pg_ops->pgo_dealloc)(pager); 174} 175 176int 177vm_pager_get_pages(pager, mlist, npages, sync) 178 vm_pager_t pager; 179 vm_page_t *mlist; 180 int npages; 181 boolean_t sync; 182{ 183 int rv; 184 185 if (pager == NULL) { 186 rv = VM_PAGER_OK; 187 while (npages--) 188 if (!vm_page_zero_fill(*mlist)) { 189 rv = VM_PAGER_FAIL; 190 break; 191 } else 192 mlist++; 193 return (rv); 194 } 195 return ((*pager->pg_ops->pgo_getpages)(pager, mlist, npages, sync)); 196} 197 198int 199vm_pager_put_pages(pager, mlist, npages, sync) 200 vm_pager_t pager; 201 vm_page_t *mlist; 202 int npages; 203 boolean_t sync; 204{ 205 if (pager == NULL) 206 panic("vm_pager_put_pages: null pager"); 207 return ((*pager->pg_ops->pgo_putpages)(pager, mlist, npages, sync)); 208} 209 210boolean_t 211vm_pager_has_page(pager, offset) 212 vm_pager_t pager; 213 vm_offset_t offset; 214{ 215 if (pager == NULL) 216 panic("vm_pager_has_page: null pager"); 217 return ((*pager->pg_ops->pgo_haspage)(pager, offset)); 218} 219 220/* 221 * Called by pageout daemon before going back to sleep. 222 * Gives pagers a chance to clean up any completed async pageing operations. 223 */ 224void 225vm_pager_sync() 226{ 227 struct pagerops **pgops; 228 229 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++) 230 if (pgops) 231 (*(*pgops)->pgo_putpages)(NULL, NULL, 0, FALSE); 232} 233 234void 235vm_pager_cluster(pager, offset, loff, hoff) 236 vm_pager_t pager; 237 vm_offset_t offset; 238 vm_offset_t *loff; 239 vm_offset_t *hoff; 240{ 241 if (pager == NULL) 242 panic("vm_pager_cluster: null pager"); 243 return ((*pager->pg_ops->pgo_cluster)(pager, offset, loff, hoff)); 244} 245 246void 247vm_pager_clusternull(pager, offset, loff, hoff) 248 vm_pager_t pager; 249 vm_offset_t offset; 250 vm_offset_t *loff; 251 vm_offset_t *hoff; 252{ 253 panic("vm_pager_nullcluster called"); 254} 255 256vm_offset_t 257vm_pager_map_pages(mlist, npages, canwait) 258 vm_page_t *mlist; 259 int npages; 260 boolean_t canwait; 261{ 262 vm_offset_t kva, va; 263 vm_size_t size; 264 vm_page_t m; 265 266 /* 267 * Allocate space in the pager map, if none available return 0. 268 * This is basically an expansion of kmem_alloc_wait with optional 269 * blocking on no space. 270 */ 271 size = npages * PAGE_SIZE; 272 vm_map_lock(pager_map); 273 while (vm_map_findspace(pager_map, 0, size, &kva)) { 274 if (!canwait) { 275 vm_map_unlock(pager_map); 276 return (0); 277 } 278 pager_map_wanted = TRUE; 279 vm_map_unlock(pager_map); 280 (void) tsleep(pager_map, PVM, "pager_map", 0); 281 vm_map_lock(pager_map); 282 } 283 vm_map_insert(pager_map, NULL, 0, kva, kva + size); 284 vm_map_unlock(pager_map); 285 286 for (va = kva; npages--; va += PAGE_SIZE) { 287 m = *mlist++; 288#ifdef DEBUG 289 if ((m->flags & PG_BUSY) == 0) 290 panic("vm_pager_map_pages: page not busy"); 291 if (m->flags & PG_PAGEROWNED) 292 panic("vm_pager_map_pages: page already in pager"); 293#endif 294#ifdef DEBUG 295 m->flags |= PG_PAGEROWNED; 296#endif 297 pmap_enter(vm_map_pmap(pager_map), va, VM_PAGE_TO_PHYS(m), 298 VM_PROT_DEFAULT, TRUE); 299 } 300 return (kva); 301} 302 303void 304vm_pager_unmap_pages(kva, npages) 305 vm_offset_t kva; 306 int npages; 307{ 308 vm_size_t size = npages * PAGE_SIZE; 309 310#ifdef DEBUG 311 vm_offset_t va; 312 vm_page_t m; 313 int np = npages; 314 315 for (va = kva; np--; va += PAGE_SIZE) { 316 m = vm_pager_atop(va); 317 if (m->flags & PG_PAGEROWNED) 318 m->flags &= ~PG_PAGEROWNED; 319 else 320 printf("vm_pager_unmap_pages: %x(%x/%x) not owned\n", 321 m, va, VM_PAGE_TO_PHYS(m)); 322 } 323#endif 324 pmap_remove(vm_map_pmap(pager_map), kva, kva + size); 325 vm_map_lock(pager_map); 326 (void) vm_map_delete(pager_map, kva, kva + size); 327 if (pager_map_wanted) 328 wakeup(pager_map); 329 vm_map_unlock(pager_map); 330} 331 332vm_page_t 333vm_pager_atop(kva) 334 vm_offset_t kva; 335{ 336 vm_offset_t pa; 337 338 pa = pmap_extract(vm_map_pmap(pager_map), kva); 339 if (pa == 0) 340 panic("vm_pager_atop"); 341 return (PHYS_TO_VM_PAGE(pa)); 342} 343 344vm_pager_t 345vm_pager_lookup(pglist, handle) 346 register struct pagerlst *pglist; 347 caddr_t handle; 348{ 349 register vm_pager_t pager; 350 351 for (pager = pglist->tqh_first; pager; pager = pager->pg_list.tqe_next) 352 if (pager->pg_handle == handle) 353 return (pager); 354 return (NULL); 355} 356 357/* 358 * This routine gains a reference to the object. 359 * Explicit deallocation is necessary. 360 */ 361int 362pager_cache(object, should_cache) 363 vm_object_t object; 364 boolean_t should_cache; 365{ 366 if (object == NULL) 367 return (KERN_INVALID_ARGUMENT); 368 369 vm_object_cache_lock(); 370 vm_object_lock(object); 371 if (should_cache) 372 object->flags |= OBJ_CANPERSIST; 373 else 374 object->flags &= ~OBJ_CANPERSIST; 375 vm_object_unlock(object); 376 vm_object_cache_unlock(); 377 378 vm_object_deallocate(object); 379 380 return (KERN_SUCCESS); 381} 382