1/* $NetBSD: uvm_aobj.c,v 1.157 2023/02/24 11:03:13 riastradh Exp $ */ 2 3/* 4 * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and 5 * Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp 29 */ 30 31/* 32 * uvm_aobj.c: anonymous memory uvm_object pager 33 * 34 * author: Chuck Silvers <chuq@chuq.com> 35 * started: Jan-1998 36 * 37 * - design mostly from Chuck Cranor 38 */ 39 40#include <sys/cdefs.h> 41__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.157 2023/02/24 11:03:13 riastradh Exp $"); 42 43#ifdef _KERNEL_OPT 44#include "opt_uvmhist.h" 45#endif 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/kernel.h> 50#include <sys/kmem.h> 51#include <sys/pool.h> 52#include <sys/atomic.h> 53 54#include <uvm/uvm.h> 55#include <uvm/uvm_page_array.h> 56 57/* 58 * An anonymous UVM object (aobj) manages anonymous-memory. In addition to 59 * keeping the list of resident pages, it may also keep a list of allocated 60 * swap blocks. Depending on the size of the object, this list is either 61 * stored in an array (small objects) or in a hash table (large objects). 62 * 63 * Lock order 64 * 65 * uao_list_lock -> 66 * uvm_object::vmobjlock 67 */ 68 69/* 70 * Note: for hash tables, we break the address space of the aobj into blocks 71 * of UAO_SWHASH_CLUSTER_SIZE pages, which shall be a power of two. 72 */ 73 74#define UAO_SWHASH_CLUSTER_SHIFT 4 75#define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT) 76 77/* Get the "tag" for this page index. */ 78#define UAO_SWHASH_ELT_TAG(idx) ((idx) >> UAO_SWHASH_CLUSTER_SHIFT) 79#define UAO_SWHASH_ELT_PAGESLOT_IDX(idx) \ 80 ((idx) & (UAO_SWHASH_CLUSTER_SIZE - 1)) 81 82/* Given an ELT and a page index, find the swap slot. */ 83#define UAO_SWHASH_ELT_PAGESLOT(elt, idx) \ 84 ((elt)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(idx)]) 85 86/* Given an ELT, return its pageidx base. */ 87#define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \ 88 ((elt)->tag << UAO_SWHASH_CLUSTER_SHIFT) 89 90/* The hash function. */ 91#define UAO_SWHASH_HASH(aobj, idx) \ 92 (&(aobj)->u_swhash[(((idx) >> UAO_SWHASH_CLUSTER_SHIFT) \ 93 & (aobj)->u_swhashmask)]) 94 95/* 96 * The threshold which determines whether we will use an array or a 97 * hash table to store the list of allocated swap blocks. 98 */ 99#define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4) 100#define UAO_USES_SWHASH(aobj) \ 101 ((aobj)->u_pages > UAO_SWHASH_THRESHOLD) 102 103/* The number of buckets in a hash, with an upper bound. */ 104#define UAO_SWHASH_MAXBUCKETS 256 105#define UAO_SWHASH_BUCKETS(aobj) \ 106 (MIN((aobj)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, UAO_SWHASH_MAXBUCKETS)) 107 108/* 109 * uao_swhash_elt: when a hash table is being used, this structure defines 110 * the format of an entry in the bucket list. 111 */ 112 113struct uao_swhash_elt { 114 LIST_ENTRY(uao_swhash_elt) list; /* the hash list */ 115 voff_t tag; /* our 'tag' */ 116 int count; /* our number of active slots */ 117 int slots[UAO_SWHASH_CLUSTER_SIZE]; /* the slots */ 118}; 119 120/* 121 * uao_swhash: the swap hash table structure 122 */ 123 124LIST_HEAD(uao_swhash, uao_swhash_elt); 125 126/* 127 * uao_swhash_elt_pool: pool of uao_swhash_elt structures. 128 * Note: pages for this pool must not come from a pageable kernel map. 129 */ 130static struct pool uao_swhash_elt_pool __cacheline_aligned; 131 132/* 133 * uvm_aobj: the actual anon-backed uvm_object 134 * 135 * => the uvm_object is at the top of the structure, this allows 136 * (struct uvm_aobj *) == (struct uvm_object *) 137 * => only one of u_swslots and u_swhash is used in any given aobj 138 */ 139 140struct uvm_aobj { 141 struct uvm_object u_obj; /* has: lock, pgops, #pages, #refs */ 142 pgoff_t u_pages; /* number of pages in entire object */ 143 int u_flags; /* the flags (see uvm_aobj.h) */ 144 int *u_swslots; /* array of offset->swapslot mappings */ 145 /* 146 * hashtable of offset->swapslot mappings 147 * (u_swhash is an array of bucket heads) 148 */ 149 struct uao_swhash *u_swhash; 150 u_long u_swhashmask; /* mask for hashtable */ 151 LIST_ENTRY(uvm_aobj) u_list; /* global list of aobjs */ 152 int u_freelist; /* freelist to allocate pages from */ 153}; 154 155static void uao_free(struct uvm_aobj *); 156static int uao_get(struct uvm_object *, voff_t, struct vm_page **, 157 int *, int, vm_prot_t, int, int); 158static int uao_put(struct uvm_object *, voff_t, voff_t, int); 159 160#if defined(VMSWAP) 161static struct uao_swhash_elt *uao_find_swhash_elt 162 (struct uvm_aobj *, int, bool); 163 164static bool uao_pagein(struct uvm_aobj *, int, int); 165static bool uao_pagein_page(struct uvm_aobj *, int); 166#endif /* defined(VMSWAP) */ 167 168static struct vm_page *uao_pagealloc(struct uvm_object *, voff_t, int); 169 170/* 171 * aobj_pager 172 * 173 * note that some functions (e.g. put) are handled elsewhere 174 */ 175 176const struct uvm_pagerops aobj_pager = { 177 .pgo_reference = uao_reference, 178 .pgo_detach = uao_detach, 179 .pgo_get = uao_get, 180 .pgo_put = uao_put, 181}; 182 183/* 184 * uao_list: global list of active aobjs, locked by uao_list_lock 185 */ 186 187static LIST_HEAD(aobjlist, uvm_aobj) uao_list __cacheline_aligned; 188static kmutex_t uao_list_lock __cacheline_aligned; 189 190/* 191 * hash table/array related functions 192 */ 193 194#if defined(VMSWAP) 195 196/* 197 * uao_find_swhash_elt: find (or create) a hash table entry for a page 198 * offset. 199 * 200 * => the object should be locked by the caller 201 */ 202 203static struct uao_swhash_elt * 204uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, bool create) 205{ 206 struct uao_swhash *swhash; 207 struct uao_swhash_elt *elt; 208 voff_t page_tag; 209 210 swhash = UAO_SWHASH_HASH(aobj, pageidx); 211 page_tag = UAO_SWHASH_ELT_TAG(pageidx); 212 213 /* 214 * now search the bucket for the requested tag 215 */ 216 217 LIST_FOREACH(elt, swhash, list) { 218 if (elt->tag == page_tag) { 219 return elt; 220 } 221 } 222 if (!create) { 223 return NULL; 224 } 225 226 /* 227 * allocate a new entry for the bucket and init/insert it in 228 */ 229 230 elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT); 231 if (elt == NULL) { 232 return NULL; 233 } 234 LIST_INSERT_HEAD(swhash, elt, list); 235 elt->tag = page_tag; 236 elt->count = 0; 237 memset(elt->slots, 0, sizeof(elt->slots)); 238 return elt; 239} 240 241/* 242 * uao_find_swslot: find the swap slot number for an aobj/pageidx 243 * 244 * => object must be locked by caller 245 */ 246 247int 248uao_find_swslot(struct uvm_object *uobj, int pageidx) 249{ 250 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; 251 struct uao_swhash_elt *elt; 252 253 KASSERT(UVM_OBJ_IS_AOBJ(uobj)); 254 255 /* 256 * if noswap flag is set, then we never return a slot 257 */ 258 259 if (aobj->u_flags & UAO_FLAG_NOSWAP) 260 return 0; 261 262 /* 263 * if hashing, look in hash table. 264 */ 265 266 if (UAO_USES_SWHASH(aobj)) { 267 elt = uao_find_swhash_elt(aobj, pageidx, false); 268 return elt ? UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) : 0; 269 } 270 271 /* 272 * otherwise, look in the array 273 */ 274 275 return aobj->u_swslots[pageidx]; 276} 277 278/* 279 * uao_set_swslot: set the swap slot for a page in an aobj. 280 * 281 * => setting a slot to zero frees the slot 282 * => object must be locked by caller 283 * => we return the old slot number, or -1 if we failed to allocate 284 * memory to record the new slot number 285 */ 286 287int 288uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot) 289{ 290 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; 291 struct uao_swhash_elt *elt; 292 int oldslot; 293 UVMHIST_FUNC(__func__); 294 UVMHIST_CALLARGS(pdhist, "aobj %#jx pageidx %jd slot %jd", 295 (uintptr_t)aobj, pageidx, slot, 0); 296 297 KASSERT(rw_write_held(uobj->vmobjlock) || uobj->uo_refs == 0); 298 KASSERT(UVM_OBJ_IS_AOBJ(uobj)); 299 300 /* 301 * if noswap flag is set, then we can't set a non-zero slot. 302 */ 303 304 if (aobj->u_flags & UAO_FLAG_NOSWAP) { 305 KASSERTMSG(slot == 0, "uao_set_swslot: no swap object"); 306 return 0; 307 } 308 309 /* 310 * are we using a hash table? if so, add it in the hash. 311 */ 312 313 if (UAO_USES_SWHASH(aobj)) { 314 315 /* 316 * Avoid allocating an entry just to free it again if 317 * the page had not swap slot in the first place, and 318 * we are freeing. 319 */ 320 321 elt = uao_find_swhash_elt(aobj, pageidx, slot != 0); 322 if (elt == NULL) { 323 return slot ? -1 : 0; 324 } 325 326 oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx); 327 UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot; 328 329 /* 330 * now adjust the elt's reference counter and free it if we've 331 * dropped it to zero. 332 */ 333 334 if (slot) { 335 if (oldslot == 0) 336 elt->count++; 337 } else { 338 if (oldslot) 339 elt->count--; 340 341 if (elt->count == 0) { 342 LIST_REMOVE(elt, list); 343 pool_put(&uao_swhash_elt_pool, elt); 344 } 345 } 346 } else { 347 /* we are using an array */ 348 oldslot = aobj->u_swslots[pageidx]; 349 aobj->u_swslots[pageidx] = slot; 350 } 351 return oldslot; 352} 353 354#endif /* defined(VMSWAP) */ 355 356/* 357 * end of hash/array functions 358 */ 359 360/* 361 * uao_free: free all resources held by an aobj, and then free the aobj 362 * 363 * => the aobj should be dead 364 */ 365 366static void 367uao_free(struct uvm_aobj *aobj) 368{ 369 struct uvm_object *uobj = &aobj->u_obj; 370 371 KASSERT(UVM_OBJ_IS_AOBJ(uobj)); 372 KASSERT(rw_write_held(uobj->vmobjlock)); 373 uao_dropswap_range(uobj, 0, 0); 374 rw_exit(uobj->vmobjlock); 375 376#if defined(VMSWAP) 377 if (UAO_USES_SWHASH(aobj)) { 378 379 /* 380 * free the hash table itself. 381 */ 382 383 hashdone(aobj->u_swhash, HASH_LIST, aobj->u_swhashmask); 384 } else { 385 386 /* 387 * free the array itself. 388 */ 389 390 kmem_free(aobj->u_swslots, aobj->u_pages * sizeof(int)); 391 } 392#endif /* defined(VMSWAP) */ 393 394 /* 395 * finally free the aobj itself 396 */ 397 398 uvm_obj_destroy(uobj, true); 399 kmem_free(aobj, sizeof(struct uvm_aobj)); 400} 401 402/* 403 * pager functions 404 */ 405 406/* 407 * uao_create: create an aobj of the given size and return its uvm_object. 408 * 409 * => for normal use, flags are always zero 410 * => for the kernel object, the flags are: 411 * UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once) 412 * UAO_FLAG_KERNSWAP - enable swapping of kernel object (" ") 413 */ 414 415struct uvm_object * 416uao_create(voff_t size, int flags) 417{ 418 static struct uvm_aobj kernel_object_store; 419 static krwlock_t bootstrap_kernel_object_lock; 420 static int kobj_alloced __diagused = 0; 421 pgoff_t pages = round_page((uint64_t)size) >> PAGE_SHIFT; 422 struct uvm_aobj *aobj; 423 int refs; 424 425 /* 426 * Allocate a new aobj, unless kernel object is requested. 427 */ 428 429 if (flags & UAO_FLAG_KERNOBJ) { 430 KASSERT(!kobj_alloced); 431 aobj = &kernel_object_store; 432 aobj->u_pages = pages; 433 aobj->u_flags = UAO_FLAG_NOSWAP; 434 refs = UVM_OBJ_KERN; 435 kobj_alloced = UAO_FLAG_KERNOBJ; 436 } else if (flags & UAO_FLAG_KERNSWAP) { 437 KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ); 438 aobj = &kernel_object_store; 439 kobj_alloced = UAO_FLAG_KERNSWAP; 440 refs = 0xdeadbeaf; /* XXX: gcc */ 441 } else { 442 aobj = kmem_alloc(sizeof(struct uvm_aobj), KM_SLEEP); 443 aobj->u_pages = pages; 444 aobj->u_flags = 0; 445 refs = 1; 446 } 447 448 /* 449 * no freelist by default 450 */ 451 452 aobj->u_freelist = VM_NFREELIST; 453 454 /* 455 * allocate hash/array if necessary 456 * 457 * note: in the KERNSWAP case no need to worry about locking since 458 * we are still booting we should be the only thread around. 459 */ 460 461 const int kernswap = (flags & UAO_FLAG_KERNSWAP) != 0; 462 if (flags == 0 || kernswap) { 463#if defined(VMSWAP) 464 465 /* allocate hash table or array depending on object size */ 466 if (UAO_USES_SWHASH(aobj)) { 467 aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj), 468 HASH_LIST, true, &aobj->u_swhashmask); 469 } else { 470 aobj->u_swslots = kmem_zalloc(pages * sizeof(int), 471 KM_SLEEP); 472 } 473#endif /* defined(VMSWAP) */ 474 475 /* 476 * Replace kernel_object's temporary static lock with 477 * a regular rw_obj. We cannot use uvm_obj_setlock() 478 * because that would try to free the old lock. 479 */ 480 481 if (kernswap) { 482 aobj->u_obj.vmobjlock = rw_obj_alloc(); 483 rw_destroy(&bootstrap_kernel_object_lock); 484 } 485 if (flags) { 486 aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */ 487 return &aobj->u_obj; 488 } 489 } 490 491 /* 492 * Initialise UVM object. 493 */ 494 495 const bool kernobj = (flags & UAO_FLAG_KERNOBJ) != 0; 496 uvm_obj_init(&aobj->u_obj, &aobj_pager, !kernobj, refs); 497 if (__predict_false(kernobj)) { 498 /* Use a temporary static lock for kernel_object. */ 499 rw_init(&bootstrap_kernel_object_lock); 500 uvm_obj_setlock(&aobj->u_obj, &bootstrap_kernel_object_lock); 501 } 502 503 /* 504 * now that aobj is ready, add it to the global list 505 */ 506 507 mutex_enter(&uao_list_lock); 508 LIST_INSERT_HEAD(&uao_list, aobj, u_list); 509 mutex_exit(&uao_list_lock); 510 return(&aobj->u_obj); 511} 512 513/* 514 * uao_set_pgfl: allocate pages only from the specified freelist. 515 * 516 * => must be called before any pages are allocated for the object. 517 * => reset by setting it to VM_NFREELIST, meaning any freelist. 518 */ 519 520void 521uao_set_pgfl(struct uvm_object *uobj, int freelist) 522{ 523 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; 524 525 KASSERTMSG((0 <= freelist), "invalid freelist %d", freelist); 526 KASSERTMSG((freelist <= VM_NFREELIST), "invalid freelist %d", 527 freelist); 528 529 aobj->u_freelist = freelist; 530} 531 532/* 533 * uao_pagealloc: allocate a page for aobj. 534 */ 535 536static inline struct vm_page * 537uao_pagealloc(struct uvm_object *uobj, voff_t offset, int flags) 538{ 539 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; 540 541 if (__predict_true(aobj->u_freelist == VM_NFREELIST)) 542 return uvm_pagealloc(uobj, offset, NULL, flags); 543 else 544 return uvm_pagealloc_strat(uobj, offset, NULL, flags, 545 UVM_PGA_STRAT_ONLY, aobj->u_freelist); 546} 547 548/* 549 * uao_init: set up aobj pager subsystem 550 * 551 * => called at boot time from uvm_pager_init() 552 */ 553 554void 555uao_init(void) 556{ 557 static int uao_initialized; 558 559 if (uao_initialized) 560 return; 561 uao_initialized = true; 562 LIST_INIT(&uao_list); 563 mutex_init(&uao_list_lock, MUTEX_DEFAULT, IPL_NONE); 564 pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt), 565 0, 0, 0, "uaoeltpl", NULL, IPL_VM); 566} 567 568/* 569 * uao_reference: hold a reference to an anonymous UVM object. 570 */ 571void 572uao_reference(struct uvm_object *uobj) 573{ 574 /* Kernel object is persistent. */ 575 if (UVM_OBJ_IS_KERN_OBJECT(uobj)) { 576 return; 577 } 578 atomic_inc_uint(&uobj->uo_refs); 579} 580 581/* 582 * uao_detach: drop a reference to an anonymous UVM object. 583 */ 584void 585uao_detach(struct uvm_object *uobj) 586{ 587 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; 588 struct uvm_page_array a; 589 struct vm_page *pg; 590 591 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 592 593 /* 594 * Detaching from kernel object is a NOP. 595 */ 596 597 if (UVM_OBJ_IS_KERN_OBJECT(uobj)) 598 return; 599 600 /* 601 * Drop the reference. If it was the last one, destroy the object. 602 */ 603 604 KASSERT(uobj->uo_refs > 0); 605 UVMHIST_LOG(maphist," (uobj=%#jx) ref=%jd", 606 (uintptr_t)uobj, uobj->uo_refs, 0, 0); 607 membar_release(); 608 if (atomic_dec_uint_nv(&uobj->uo_refs) > 0) { 609 UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0); 610 return; 611 } 612 membar_acquire(); 613 614 /* 615 * Remove the aobj from the global list. 616 */ 617 618 mutex_enter(&uao_list_lock); 619 LIST_REMOVE(aobj, u_list); 620 mutex_exit(&uao_list_lock); 621 622 /* 623 * Free all the pages left in the aobj. For each page, when the 624 * page is no longer busy (and thus after any disk I/O that it is 625 * involved in is complete), release any swap resources and free 626 * the page itself. 627 */ 628 uvm_page_array_init(&a, uobj, 0); 629 rw_enter(uobj->vmobjlock, RW_WRITER); 630 while ((pg = uvm_page_array_fill_and_peek(&a, 0, 0)) != NULL) { 631 uvm_page_array_advance(&a); 632 pmap_page_protect(pg, VM_PROT_NONE); 633 if (pg->flags & PG_BUSY) { 634 uvm_pagewait(pg, uobj->vmobjlock, "uao_det"); 635 uvm_page_array_clear(&a); 636 rw_enter(uobj->vmobjlock, RW_WRITER); 637 continue; 638 } 639 uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT); 640 uvm_pagefree(pg); 641 } 642 uvm_page_array_fini(&a); 643 644 /* 645 * Finally, free the anonymous UVM object itself. 646 */ 647 648 uao_free(aobj); 649} 650 651/* 652 * uao_put: flush pages out of a uvm object 653 * 654 * => object should be locked by caller. we may _unlock_ the object 655 * if (and only if) we need to clean a page (PGO_CLEANIT). 656 * XXXJRT Currently, however, we don't. In the case of cleaning 657 * XXXJRT a page, we simply just deactivate it. Should probably 658 * XXXJRT handle this better, in the future (although "flushing" 659 * XXXJRT anonymous memory isn't terribly important). 660 * => if PGO_CLEANIT is not set, then we will neither unlock the object 661 * or block. 662 * => if PGO_ALLPAGE is set, then all pages in the object are valid targets 663 * for flushing. 664 * => we return 0 unless we encountered some sort of I/O error 665 * XXXJRT currently never happens, as we never directly initiate 666 * XXXJRT I/O 667 */ 668 669static int 670uao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) 671{ 672 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; 673 struct uvm_page_array a; 674 struct vm_page *pg; 675 voff_t curoff; 676 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 677 678 KASSERT(UVM_OBJ_IS_AOBJ(uobj)); 679 KASSERT(rw_write_held(uobj->vmobjlock)); 680 681 if (flags & PGO_ALLPAGES) { 682 start = 0; 683 stop = aobj->u_pages << PAGE_SHIFT; 684 } else { 685 start = trunc_page(start); 686 if (stop == 0) { 687 stop = aobj->u_pages << PAGE_SHIFT; 688 } else { 689 stop = round_page(stop); 690 } 691 if (stop > (uint64_t)(aobj->u_pages << PAGE_SHIFT)) { 692 printf("uao_put: strange, got an out of range " 693 "flush %#jx > %#jx (fixed)\n", 694 (uintmax_t)stop, 695 (uintmax_t)(aobj->u_pages << PAGE_SHIFT)); 696 stop = aobj->u_pages << PAGE_SHIFT; 697 } 698 } 699 UVMHIST_LOG(maphist, 700 " flush start=%#jx, stop=%#jx, flags=%#jx", 701 start, stop, flags, 0); 702 703 /* 704 * Don't need to do any work here if we're not freeing 705 * or deactivating pages. 706 */ 707 708 if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) { 709 rw_exit(uobj->vmobjlock); 710 return 0; 711 } 712 713 /* locked: uobj */ 714 uvm_page_array_init(&a, uobj, 0); 715 curoff = start; 716 while ((pg = uvm_page_array_fill_and_peek(&a, curoff, 0)) != NULL) { 717 if (pg->offset >= stop) { 718 break; 719 } 720 721 /* 722 * wait and try again if the page is busy. 723 */ 724 725 if (pg->flags & PG_BUSY) { 726 uvm_pagewait(pg, uobj->vmobjlock, "uao_put"); 727 uvm_page_array_clear(&a); 728 rw_enter(uobj->vmobjlock, RW_WRITER); 729 continue; 730 } 731 uvm_page_array_advance(&a); 732 curoff = pg->offset + PAGE_SIZE; 733 734 switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) { 735 736 /* 737 * XXX In these first 3 cases, we always just 738 * XXX deactivate the page. We may want to 739 * XXX handle the different cases more specifically 740 * XXX in the future. 741 */ 742 743 case PGO_CLEANIT|PGO_FREE: 744 case PGO_CLEANIT|PGO_DEACTIVATE: 745 case PGO_DEACTIVATE: 746 deactivate_it: 747 uvm_pagelock(pg); 748 uvm_pagedeactivate(pg); 749 uvm_pageunlock(pg); 750 break; 751 752 case PGO_FREE: 753 /* 754 * If there are multiple references to 755 * the object, just deactivate the page. 756 */ 757 758 if (uobj->uo_refs > 1) 759 goto deactivate_it; 760 761 /* 762 * free the swap slot and the page. 763 */ 764 765 pmap_page_protect(pg, VM_PROT_NONE); 766 767 /* 768 * freeing swapslot here is not strictly necessary. 769 * however, leaving it here doesn't save much 770 * because we need to update swap accounting anyway. 771 */ 772 773 uao_dropswap(uobj, pg->offset >> PAGE_SHIFT); 774 uvm_pagefree(pg); 775 break; 776 777 default: 778 panic("%s: impossible", __func__); 779 } 780 } 781 rw_exit(uobj->vmobjlock); 782 uvm_page_array_fini(&a); 783 return 0; 784} 785 786/* 787 * uao_get: fetch me a page 788 * 789 * we have three cases: 790 * 1: page is resident -> just return the page. 791 * 2: page is zero-fill -> allocate a new page and zero it. 792 * 3: page is swapped out -> fetch the page from swap. 793 * 794 * case 1 can be handled with PGO_LOCKED, cases 2 and 3 cannot. 795 * so, if the "center" page hits case 2/3 then we will need to return EBUSY. 796 * 797 * => prefer map unlocked (not required) 798 * => object must be locked! we will _unlock_ it before starting any I/O. 799 * => flags: PGO_LOCKED: fault data structures are locked 800 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx] 801 * => NOTE: caller must check for released pages!! 802 */ 803 804static int 805uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps, 806 int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags) 807{ 808 voff_t current_offset; 809 struct vm_page *ptmp; 810 int lcv, gotpages, maxpages, swslot, pageidx; 811 bool overwrite = ((flags & PGO_OVERWRITE) != 0); 812 struct uvm_page_array a; 813 814 UVMHIST_FUNC(__func__); 815 UVMHIST_CALLARGS(pdhist, "aobj=%#jx offset=%jd, flags=%#jx", 816 (uintptr_t)uobj, offset, flags,0); 817 818 /* 819 * the object must be locked. it can only be a read lock when 820 * processing a read fault with PGO_LOCKED. 821 */ 822 823 KASSERT(UVM_OBJ_IS_AOBJ(uobj)); 824 KASSERT(rw_lock_held(uobj->vmobjlock)); 825 KASSERT(rw_write_held(uobj->vmobjlock) || 826 ((flags & PGO_LOCKED) != 0 && (access_type & VM_PROT_WRITE) == 0)); 827 828 /* 829 * get number of pages 830 */ 831 832 maxpages = *npagesp; 833 834 /* 835 * step 1: handled the case where fault data structures are locked. 836 */ 837 838 if (flags & PGO_LOCKED) { 839 840 /* 841 * step 1a: get pages that are already resident. only do 842 * this if the data structures are locked (i.e. the first 843 * time through). 844 */ 845 846 uvm_page_array_init(&a, uobj, 0); 847 gotpages = 0; /* # of pages we got so far */ 848 for (lcv = 0; lcv < maxpages; lcv++) { 849 ptmp = uvm_page_array_fill_and_peek(&a, 850 offset + (lcv << PAGE_SHIFT), maxpages); 851 if (ptmp == NULL) { 852 break; 853 } 854 KASSERT(ptmp->offset >= offset); 855 lcv = (ptmp->offset - offset) >> PAGE_SHIFT; 856 if (lcv >= maxpages) { 857 break; 858 } 859 uvm_page_array_advance(&a); 860 861 /* 862 * to be useful must get a non-busy page 863 */ 864 865 if ((ptmp->flags & PG_BUSY) != 0) { 866 continue; 867 } 868 869 /* 870 * useful page: plug it in our result array 871 */ 872 873 KASSERT(uvm_pagegetdirty(ptmp) != 874 UVM_PAGE_STATUS_CLEAN); 875 pps[lcv] = ptmp; 876 gotpages++; 877 } 878 uvm_page_array_fini(&a); 879 880 /* 881 * step 1b: now we've either done everything needed or we 882 * to unlock and do some waiting or I/O. 883 */ 884 885 UVMHIST_LOG(pdhist, "<- done (done=%jd)", 886 (pps[centeridx] != NULL), 0,0,0); 887 *npagesp = gotpages; 888 return pps[centeridx] != NULL ? 0 : EBUSY; 889 } 890 891 /* 892 * step 2: get non-resident or busy pages. 893 * object is locked. data structures are unlocked. 894 */ 895 896 if ((flags & PGO_SYNCIO) == 0) { 897 goto done; 898 } 899 900 uvm_page_array_init(&a, uobj, 0); 901 for (lcv = 0, current_offset = offset ; lcv < maxpages ;) { 902 903 /* 904 * we have yet to locate the current page (pps[lcv]). we 905 * first look for a page that is already at the current offset. 906 * if we find a page, we check to see if it is busy or 907 * released. if that is the case, then we sleep on the page 908 * until it is no longer busy or released and repeat the lookup. 909 * if the page we found is neither busy nor released, then we 910 * busy it (so we own it) and plug it into pps[lcv]. we are 911 * ready to move on to the next page. 912 */ 913 914 ptmp = uvm_page_array_fill_and_peek(&a, current_offset, 915 maxpages - lcv); 916 917 if (ptmp != NULL && ptmp->offset == current_offset) { 918 /* page is there, see if we need to wait on it */ 919 if ((ptmp->flags & PG_BUSY) != 0) { 920 UVMHIST_LOG(pdhist, 921 "sleeping, ptmp->flags %#jx\n", 922 ptmp->flags,0,0,0); 923 uvm_pagewait(ptmp, uobj->vmobjlock, "uao_get"); 924 rw_enter(uobj->vmobjlock, RW_WRITER); 925 uvm_page_array_clear(&a); 926 continue; 927 } 928 929 /* 930 * if we get here then the page is resident and 931 * unbusy. we busy it now (so we own it). if 932 * overwriting, mark the page dirty up front as 933 * it will be zapped via an unmanaged mapping. 934 */ 935 936 KASSERT(uvm_pagegetdirty(ptmp) != 937 UVM_PAGE_STATUS_CLEAN); 938 if (overwrite) { 939 uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_DIRTY); 940 } 941 /* we own it, caller must un-busy */ 942 ptmp->flags |= PG_BUSY; 943 UVM_PAGE_OWN(ptmp, "uao_get2"); 944 pps[lcv++] = ptmp; 945 current_offset += PAGE_SIZE; 946 uvm_page_array_advance(&a); 947 continue; 948 } else { 949 KASSERT(ptmp == NULL || ptmp->offset > current_offset); 950 } 951 952 /* 953 * not resident. allocate a new busy/fake/clean page in the 954 * object. if it's in swap we need to do I/O to fill in the 955 * data, otherwise the page needs to be cleared: if it's not 956 * destined to be overwritten, then zero it here and now. 957 */ 958 959 pageidx = current_offset >> PAGE_SHIFT; 960 swslot = uao_find_swslot(uobj, pageidx); 961 ptmp = uao_pagealloc(uobj, current_offset, 962 swslot != 0 || overwrite ? 0 : UVM_PGA_ZERO); 963 964 /* out of RAM? */ 965 if (ptmp == NULL) { 966 rw_exit(uobj->vmobjlock); 967 UVMHIST_LOG(pdhist, "sleeping, ptmp == NULL",0,0,0,0); 968 uvm_wait("uao_getpage"); 969 rw_enter(uobj->vmobjlock, RW_WRITER); 970 uvm_page_array_clear(&a); 971 continue; 972 } 973 974 /* 975 * if swslot == 0, page hasn't existed before and is zeroed. 976 * otherwise we have a "fake/busy/clean" page that we just 977 * allocated. do the needed "i/o", reading from swap. 978 */ 979 980 if (swslot != 0) { 981#if defined(VMSWAP) 982 int error; 983 984 UVMHIST_LOG(pdhist, "pagein from swslot %jd", 985 swslot, 0,0,0); 986 987 /* 988 * page in the swapped-out page. 989 * unlock object for i/o, relock when done. 990 */ 991 992 uvm_page_array_clear(&a); 993 rw_exit(uobj->vmobjlock); 994 error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO); 995 rw_enter(uobj->vmobjlock, RW_WRITER); 996 997 /* 998 * I/O done. check for errors. 999 */ 1000 1001 if (error != 0) { 1002 UVMHIST_LOG(pdhist, "<- done (error=%jd)", 1003 error,0,0,0); 1004 1005 /* 1006 * remove the swap slot from the aobj 1007 * and mark the aobj as having no real slot. 1008 * don't free the swap slot, thus preventing 1009 * it from being used again. 1010 */ 1011 1012 swslot = uao_set_swslot(uobj, pageidx, 1013 SWSLOT_BAD); 1014 if (swslot > 0) { 1015 uvm_swap_markbad(swslot, 1); 1016 } 1017 1018 uvm_pagefree(ptmp); 1019 rw_exit(uobj->vmobjlock); 1020 UVMHIST_LOG(pdhist, "<- done (error)", 1021 error,lcv,0,0); 1022 if (lcv != 0) { 1023 uvm_page_unbusy(pps, lcv); 1024 } 1025 memset(pps, 0, maxpages * sizeof(pps[0])); 1026 uvm_page_array_fini(&a); 1027 return error; 1028 } 1029#else /* defined(VMSWAP) */ 1030 panic("%s: pagein", __func__); 1031#endif /* defined(VMSWAP) */ 1032 } 1033 1034 /* 1035 * note that we will allow the page being writably-mapped 1036 * (!PG_RDONLY) regardless of access_type. if overwrite, 1037 * the page can be modified through an unmanaged mapping 1038 * so mark it dirty up front. 1039 */ 1040 if (overwrite) { 1041 uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_DIRTY); 1042 } else { 1043 uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_UNKNOWN); 1044 } 1045 1046 /* 1047 * we got the page! clear the fake flag (indicates valid 1048 * data now in page) and plug into our result array. note 1049 * that page is still busy. 1050 * 1051 * it is the callers job to: 1052 * => check if the page is released 1053 * => unbusy the page 1054 * => activate the page 1055 */ 1056 KASSERT(uvm_pagegetdirty(ptmp) != UVM_PAGE_STATUS_CLEAN); 1057 KASSERT((ptmp->flags & PG_FAKE) != 0); 1058 KASSERT(ptmp->offset == current_offset); 1059 ptmp->flags &= ~PG_FAKE; 1060 pps[lcv++] = ptmp; 1061 current_offset += PAGE_SIZE; 1062 } 1063 uvm_page_array_fini(&a); 1064 1065 /* 1066 * finally, unlock object and return. 1067 */ 1068 1069done: 1070 rw_exit(uobj->vmobjlock); 1071 UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0); 1072 return 0; 1073} 1074 1075#if defined(VMSWAP) 1076 1077/* 1078 * uao_dropswap: release any swap resources from this aobj page. 1079 * 1080 * => aobj must be locked or have a reference count of 0. 1081 */ 1082 1083void 1084uao_dropswap(struct uvm_object *uobj, int pageidx) 1085{ 1086 int slot; 1087 1088 KASSERT(UVM_OBJ_IS_AOBJ(uobj)); 1089 1090 slot = uao_set_swslot(uobj, pageidx, 0); 1091 if (slot) { 1092 uvm_swap_free(slot, 1); 1093 } 1094} 1095 1096/* 1097 * page in every page in every aobj that is paged-out to a range of swslots. 1098 * 1099 * => nothing should be locked. 1100 * => returns true if pagein was aborted due to lack of memory. 1101 */ 1102 1103bool 1104uao_swap_off(int startslot, int endslot) 1105{ 1106 struct uvm_aobj *aobj; 1107 1108 /* 1109 * Walk the list of all anonymous UVM objects. Grab the first. 1110 */ 1111 mutex_enter(&uao_list_lock); 1112 if ((aobj = LIST_FIRST(&uao_list)) == NULL) { 1113 mutex_exit(&uao_list_lock); 1114 return false; 1115 } 1116 uao_reference(&aobj->u_obj); 1117 1118 do { 1119 struct uvm_aobj *nextaobj; 1120 bool rv; 1121 1122 /* 1123 * Prefetch the next object and immediately hold a reference 1124 * on it, so neither the current nor the next entry could 1125 * disappear while we are iterating. 1126 */ 1127 if ((nextaobj = LIST_NEXT(aobj, u_list)) != NULL) { 1128 uao_reference(&nextaobj->u_obj); 1129 } 1130 mutex_exit(&uao_list_lock); 1131 1132 /* 1133 * Page in all pages in the swap slot range. 1134 */ 1135 rw_enter(aobj->u_obj.vmobjlock, RW_WRITER); 1136 rv = uao_pagein(aobj, startslot, endslot); 1137 rw_exit(aobj->u_obj.vmobjlock); 1138 1139 /* Drop the reference of the current object. */ 1140 uao_detach(&aobj->u_obj); 1141 if (rv) { 1142 if (nextaobj) { 1143 uao_detach(&nextaobj->u_obj); 1144 } 1145 return rv; 1146 } 1147 1148 aobj = nextaobj; 1149 mutex_enter(&uao_list_lock); 1150 } while (aobj); 1151 1152 mutex_exit(&uao_list_lock); 1153 return false; 1154} 1155 1156/* 1157 * page in any pages from aobj in the given range. 1158 * 1159 * => aobj must be locked and is returned locked. 1160 * => returns true if pagein was aborted due to lack of memory. 1161 */ 1162static bool 1163uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot) 1164{ 1165 bool rv; 1166 1167 if (UAO_USES_SWHASH(aobj)) { 1168 struct uao_swhash_elt *elt; 1169 int buck; 1170 1171restart: 1172 for (buck = aobj->u_swhashmask; buck >= 0; buck--) { 1173 for (elt = LIST_FIRST(&aobj->u_swhash[buck]); 1174 elt != NULL; 1175 elt = LIST_NEXT(elt, list)) { 1176 int i; 1177 1178 for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) { 1179 int slot = elt->slots[i]; 1180 1181 /* 1182 * if the slot isn't in range, skip it. 1183 */ 1184 1185 if (slot < startslot || 1186 slot >= endslot) { 1187 continue; 1188 } 1189 1190 /* 1191 * process the page, 1192 * the start over on this object 1193 * since the swhash elt 1194 * may have been freed. 1195 */ 1196 1197 rv = uao_pagein_page(aobj, 1198 UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i); 1199 if (rv) { 1200 return rv; 1201 } 1202 goto restart; 1203 } 1204 } 1205 } 1206 } else { 1207 int i; 1208 1209 for (i = 0; i < aobj->u_pages; i++) { 1210 int slot = aobj->u_swslots[i]; 1211 1212 /* 1213 * if the slot isn't in range, skip it 1214 */ 1215 1216 if (slot < startslot || slot >= endslot) { 1217 continue; 1218 } 1219 1220 /* 1221 * process the page. 1222 */ 1223 1224 rv = uao_pagein_page(aobj, i); 1225 if (rv) { 1226 return rv; 1227 } 1228 } 1229 } 1230 1231 return false; 1232} 1233 1234/* 1235 * uao_pagein_page: page in a single page from an anonymous UVM object. 1236 * 1237 * => Returns true if pagein was aborted due to lack of memory. 1238 * => Object must be locked and is returned locked. 1239 */ 1240 1241static bool 1242uao_pagein_page(struct uvm_aobj *aobj, int pageidx) 1243{ 1244 struct uvm_object *uobj = &aobj->u_obj; 1245 struct vm_page *pg; 1246 int rv, npages; 1247 1248 pg = NULL; 1249 npages = 1; 1250 1251 KASSERT(rw_write_held(uobj->vmobjlock)); 1252 rv = uao_get(uobj, (voff_t)pageidx << PAGE_SHIFT, &pg, &npages, 1253 0, VM_PROT_READ | VM_PROT_WRITE, 0, PGO_SYNCIO); 1254 1255 /* 1256 * relock and finish up. 1257 */ 1258 1259 rw_enter(uobj->vmobjlock, RW_WRITER); 1260 switch (rv) { 1261 case 0: 1262 break; 1263 1264 case EIO: 1265 case ERESTART: 1266 1267 /* 1268 * nothing more to do on errors. 1269 * ERESTART can only mean that the anon was freed, 1270 * so again there's nothing to do. 1271 */ 1272 1273 return false; 1274 1275 default: 1276 return true; 1277 } 1278 1279 /* 1280 * ok, we've got the page now. 1281 * mark it as dirty, clear its swslot and un-busy it. 1282 */ 1283 uao_dropswap(&aobj->u_obj, pageidx); 1284 1285 /* 1286 * make sure it's on a page queue. 1287 */ 1288 uvm_pagelock(pg); 1289 uvm_pageenqueue(pg); 1290 uvm_pagewakeup(pg); 1291 uvm_pageunlock(pg); 1292 1293 pg->flags &= ~(PG_BUSY|PG_FAKE); 1294 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY); 1295 UVM_PAGE_OWN(pg, NULL); 1296 1297 return false; 1298} 1299 1300/* 1301 * uao_dropswap_range: drop swapslots in the range. 1302 * 1303 * => aobj must be locked and is returned locked. 1304 * => start is inclusive. end is exclusive. 1305 */ 1306 1307void 1308uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end) 1309{ 1310 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; 1311 int swpgonlydelta = 0; 1312 1313 KASSERT(UVM_OBJ_IS_AOBJ(uobj)); 1314 KASSERT(rw_write_held(uobj->vmobjlock)); 1315 1316 if (end == 0) { 1317 end = INT64_MAX; 1318 } 1319 1320 if (UAO_USES_SWHASH(aobj)) { 1321 int i, hashbuckets = aobj->u_swhashmask + 1; 1322 voff_t taghi; 1323 voff_t taglo; 1324 1325 taglo = UAO_SWHASH_ELT_TAG(start); 1326 taghi = UAO_SWHASH_ELT_TAG(end); 1327 1328 for (i = 0; i < hashbuckets; i++) { 1329 struct uao_swhash_elt *elt, *next; 1330 1331 for (elt = LIST_FIRST(&aobj->u_swhash[i]); 1332 elt != NULL; 1333 elt = next) { 1334 int startidx, endidx; 1335 int j; 1336 1337 next = LIST_NEXT(elt, list); 1338 1339 if (elt->tag < taglo || taghi < elt->tag) { 1340 continue; 1341 } 1342 1343 if (elt->tag == taglo) { 1344 startidx = 1345 UAO_SWHASH_ELT_PAGESLOT_IDX(start); 1346 } else { 1347 startidx = 0; 1348 } 1349 1350 if (elt->tag == taghi) { 1351 endidx = 1352 UAO_SWHASH_ELT_PAGESLOT_IDX(end); 1353 } else { 1354 endidx = UAO_SWHASH_CLUSTER_SIZE; 1355 } 1356 1357 for (j = startidx; j < endidx; j++) { 1358 int slot = elt->slots[j]; 1359 1360 KASSERT(uvm_pagelookup(&aobj->u_obj, 1361 (UAO_SWHASH_ELT_PAGEIDX_BASE(elt) 1362 + j) << PAGE_SHIFT) == NULL); 1363 if (slot > 0) { 1364 uvm_swap_free(slot, 1); 1365 swpgonlydelta++; 1366 KASSERT(elt->count > 0); 1367 elt->slots[j] = 0; 1368 elt->count--; 1369 } 1370 } 1371 1372 if (elt->count == 0) { 1373 LIST_REMOVE(elt, list); 1374 pool_put(&uao_swhash_elt_pool, elt); 1375 } 1376 } 1377 } 1378 } else { 1379 int i; 1380 1381 if (aobj->u_pages < end) { 1382 end = aobj->u_pages; 1383 } 1384 for (i = start; i < end; i++) { 1385 int slot = aobj->u_swslots[i]; 1386 1387 if (slot > 0) { 1388 uvm_swap_free(slot, 1); 1389 swpgonlydelta++; 1390 } 1391 } 1392 } 1393 1394 /* 1395 * adjust the counter of pages only in swap for all 1396 * the swap slots we've freed. 1397 */ 1398 1399 if (swpgonlydelta > 0) { 1400 KASSERT(uvmexp.swpgonly >= swpgonlydelta); 1401 atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta); 1402 } 1403} 1404 1405#endif /* defined(VMSWAP) */ 1406