Lines Matching defs:aobj

58  * An anonymous UVM object (aobj) manages anonymous-memory.  In addition to
70 * Note: for hash tables, we break the address space of the aobj into blocks
91 #define UAO_SWHASH_HASH(aobj, idx) \
92 (&(aobj)->u_swhash[(((idx) >> UAO_SWHASH_CLUSTER_SHIFT) \
93 & (aobj)->u_swhashmask)])
100 #define UAO_USES_SWHASH(aobj) \
101 ((aobj)->u_pages > UAO_SWHASH_THRESHOLD)
105 #define UAO_SWHASH_BUCKETS(aobj) \
106 (MIN((aobj)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, UAO_SWHASH_MAXBUCKETS))
137 * => only one of u_swslots and u_swhash is used in any given aobj
204 uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, bool create)
210 swhash = UAO_SWHASH_HASH(aobj, pageidx);
242 * uao_find_swslot: find the swap slot number for an aobj/pageidx
250 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
259 if (aobj->u_flags & UAO_FLAG_NOSWAP)
266 if (UAO_USES_SWHASH(aobj)) {
267 elt = uao_find_swhash_elt(aobj, pageidx, false);
275 return aobj->u_swslots[pageidx];
279 * uao_set_swslot: set the swap slot for a page in an aobj.
290 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
294 UVMHIST_CALLARGS(pdhist, "aobj %#jx pageidx %jd slot %jd",
295 (uintptr_t)aobj, pageidx, slot, 0);
304 if (aobj->u_flags & UAO_FLAG_NOSWAP) {
313 if (UAO_USES_SWHASH(aobj)) {
321 elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
348 oldslot = aobj->u_swslots[pageidx];
349 aobj->u_swslots[pageidx] = slot;
361 * uao_free: free all resources held by an aobj, and then free the aobj
363 * => the aobj should be dead
367 uao_free(struct uvm_aobj *aobj)
369 struct uvm_object *uobj = &aobj->u_obj;
377 if (UAO_USES_SWHASH(aobj)) {
383 hashdone(aobj->u_swhash, HASH_LIST, aobj->u_swhashmask);
390 kmem_free(aobj->u_swslots, aobj->u_pages * sizeof(int));
395 * finally free the aobj itself
399 kmem_free(aobj, sizeof(struct uvm_aobj));
407 * uao_create: create an aobj of the given size and return its uvm_object.
422 struct uvm_aobj *aobj;
426 * Allocate a new aobj, unless kernel object is requested.
431 aobj = &kernel_object_store;
432 aobj->u_pages = pages;
433 aobj->u_flags = UAO_FLAG_NOSWAP;
438 aobj = &kernel_object_store;
442 aobj = kmem_alloc(sizeof(struct uvm_aobj), KM_SLEEP);
443 aobj->u_pages = pages;
444 aobj->u_flags = 0;
452 aobj->u_freelist = VM_NFREELIST;
466 if (UAO_USES_SWHASH(aobj)) {
467 aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
468 HASH_LIST, true, &aobj->u_swhashmask);
470 aobj->u_swslots = kmem_zalloc(pages * sizeof(int),
482 aobj->u_obj.vmobjlock = rw_obj_alloc();
486 aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
487 return &aobj->u_obj;
496 uvm_obj_init(&aobj->u_obj, &aobj_pager, !kernobj, refs);
500 uvm_obj_setlock(&aobj->u_obj, &bootstrap_kernel_object_lock);
504 * now that aobj is ready, add it to the global list
508 LIST_INSERT_HEAD(&uao_list, aobj, u_list);
510 return(&aobj->u_obj);
523 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
529 aobj->u_freelist = freelist;
533 * uao_pagealloc: allocate a page for aobj.
539 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
541 if (__predict_true(aobj->u_freelist == VM_NFREELIST))
545 UVM_PGA_STRAT_ONLY, aobj->u_freelist);
549 * uao_init: set up aobj pager subsystem
587 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
615 * Remove the aobj from the global list.
619 LIST_REMOVE(aobj, u_list);
623 * Free all the pages left in the aobj. For each page, when the
639 uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
648 uao_free(aobj);
672 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
683 stop = aobj->u_pages << PAGE_SHIFT;
687 stop = aobj->u_pages << PAGE_SHIFT;
691 if (stop > (uint64_t)(aobj->u_pages << PAGE_SHIFT)) {
695 (uintmax_t)(aobj->u_pages << PAGE_SHIFT));
696 stop = aobj->u_pages << PAGE_SHIFT;
815 UVMHIST_CALLARGS(pdhist, "aobj=%#jx offset=%jd, flags=%#jx",
1006 * remove the swap slot from the aobj
1007 * and mark the aobj as having no real slot.
1078 * uao_dropswap: release any swap resources from this aobj page.
1080 * => aobj must be locked or have a reference count of 0.
1097 * page in every page in every aobj that is paged-out to a range of swslots.
1106 struct uvm_aobj *aobj;
1112 if ((aobj = LIST_FIRST(&uao_list)) == NULL) {
1116 uao_reference(&aobj->u_obj);
1127 if ((nextaobj = LIST_NEXT(aobj, u_list)) != NULL) {
1135 rw_enter(aobj->u_obj.vmobjlock, RW_WRITER);
1136 rv = uao_pagein(aobj, startslot, endslot);
1137 rw_exit(aobj->u_obj.vmobjlock);
1140 uao_detach(&aobj->u_obj);
1148 aobj = nextaobj;
1150 } while (aobj);
1157 * page in any pages from aobj in the given range.
1159 * => aobj must be locked and is returned locked.
1163 uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
1167 if (UAO_USES_SWHASH(aobj)) {
1172 for (buck = aobj->u_swhashmask; buck >= 0; buck--) {
1173 for (elt = LIST_FIRST(&aobj->u_swhash[buck]);
1197 rv = uao_pagein_page(aobj,
1209 for (i = 0; i < aobj->u_pages; i++) {
1210 int slot = aobj->u_swslots[i];
1224 rv = uao_pagein_page(aobj, i);
1242 uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
1244 struct uvm_object *uobj = &aobj->u_obj;
1283 uao_dropswap(&aobj->u_obj, pageidx);
1303 * => aobj must be locked and is returned locked.
1310 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
1320 if (UAO_USES_SWHASH(aobj)) {
1321 int i, hashbuckets = aobj->u_swhashmask + 1;
1331 for (elt = LIST_FIRST(&aobj->u_swhash[i]);
1360 KASSERT(uvm_pagelookup(&aobj->u_obj,
1381 if (aobj->u_pages < end) {
1382 end = aobj->u_pages;
1385 int slot = aobj->u_swslots[i];