Deleted Added
full compact
vm_page.c (254138) vm_page.c (254141)
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * The Mach Operating System project at Carnegie-Mellon University.
8 *

--- 68 unchanged lines hidden (view full) ---

77 *
78 */
79
80/*
81 * Resident memory management module.
82 */
83
84#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * The Mach Operating System project at Carnegie-Mellon University.
8 *

--- 68 unchanged lines hidden (view full) ---

77 *
78 */
79
80/*
81 * Resident memory management module.
82 */
83
84#include <sys/cdefs.h>
85__FBSDID("$FreeBSD: head/sys/vm/vm_page.c 254138 2013-08-09 11:11:11Z attilio $");
85__FBSDID("$FreeBSD: head/sys/vm/vm_page.c 254141 2013-08-09 11:28:55Z attilio $");
86
87#include "opt_vm.h"
88
89#include <sys/param.h>
90#include <sys/systm.h>
91#include <sys/lock.h>
92#include <sys/kernel.h>
93#include <sys/limits.h>

--- 46 unchanged lines hidden (view full) ---

140
141static int pa_tryrelock_restart;
142SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD,
143 &pa_tryrelock_restart, 0, "Number of tryrelock restarts");
144
145static uma_zone_t fakepg_zone;
146
147static struct vnode *vm_page_alloc_init(vm_page_t m);
86
87#include "opt_vm.h"
88
89#include <sys/param.h>
90#include <sys/systm.h>
91#include <sys/lock.h>
92#include <sys/kernel.h>
93#include <sys/limits.h>

--- 46 unchanged lines hidden (view full) ---

140
141static int pa_tryrelock_restart;
142SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD,
143 &pa_tryrelock_restart, 0, "Number of tryrelock restarts");
144
145static uma_zone_t fakepg_zone;
146
147static struct vnode *vm_page_alloc_init(vm_page_t m);
148static void vm_page_cache_turn_free(vm_page_t m);
148static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
149static void vm_page_enqueue(int queue, vm_page_t m);
150static void vm_page_init_fakepg(void *dummy);
149static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
150static void vm_page_enqueue(int queue, vm_page_t m);
151static void vm_page_init_fakepg(void *dummy);
151static void vm_page_insert_after(vm_page_t m, vm_object_t object,
152static int vm_page_insert_after(vm_page_t m, vm_object_t object,
152 vm_pindex_t pindex, vm_page_t mpred);
153 vm_pindex_t pindex, vm_page_t mpred);
154static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
155 vm_page_t mpred);
153
154SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL);
155
156static void
157vm_page_init_fakepg(void *dummy)
158{
159
160 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,

--- 764 unchanged lines hidden (view full) ---

925
926/*
927 * vm_page_insert: [ internal use only ]
928 *
929 * Inserts the given mem entry into the object and object list.
930 *
931 * The object must be locked.
932 */
156
157SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL);
158
159static void
160vm_page_init_fakepg(void *dummy)
161{
162
163 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,

--- 764 unchanged lines hidden (view full) ---

928
929/*
930 * vm_page_insert: [ internal use only ]
931 *
932 * Inserts the given mem entry into the object and object list.
933 *
934 * The object must be locked.
935 */
933void
936int
934vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
935{
936 vm_page_t mpred;
937
938 VM_OBJECT_ASSERT_WLOCKED(object);
939 mpred = vm_radix_lookup_le(&object->rtree, pindex);
937vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
938{
939 vm_page_t mpred;
940
941 VM_OBJECT_ASSERT_WLOCKED(object);
942 mpred = vm_radix_lookup_le(&object->rtree, pindex);
940 vm_page_insert_after(m, object, pindex, mpred);
943 return (vm_page_insert_after(m, object, pindex, mpred));
941}
942
943/*
944 * vm_page_insert_after:
945 *
946 * Inserts the page "m" into the specified object at offset "pindex".
947 *
948 * The page "mpred" must immediately precede the offset "pindex" within
949 * the specified object.
950 *
951 * The object must be locked.
952 */
944}
945
946/*
947 * vm_page_insert_after:
948 *
949 * Inserts the page "m" into the specified object at offset "pindex".
950 *
951 * The page "mpred" must immediately precede the offset "pindex" within
952 * the specified object.
953 *
954 * The object must be locked.
955 */
953static void
956static int
954vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
955 vm_page_t mpred)
956{
957vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
958 vm_page_t mpred)
959{
960 vm_pindex_t sidx;
961 vm_object_t sobj;
957 vm_page_t msucc;
958
959 VM_OBJECT_ASSERT_WLOCKED(object);
960 KASSERT(m->object == NULL,
961 ("vm_page_insert_after: page already inserted"));
962 if (mpred != NULL) {
963 KASSERT(mpred->object == object ||
964 (mpred->flags & PG_SLAB) != 0,

--- 5 unchanged lines hidden (view full) ---

970 msucc = TAILQ_FIRST(&object->memq);
971 if (msucc != NULL)
972 KASSERT(msucc->pindex > pindex,
973 ("vm_page_insert_after: msucc doesn't succeed pindex"));
974
975 /*
976 * Record the object/offset pair in this page
977 */
962 vm_page_t msucc;
963
964 VM_OBJECT_ASSERT_WLOCKED(object);
965 KASSERT(m->object == NULL,
966 ("vm_page_insert_after: page already inserted"));
967 if (mpred != NULL) {
968 KASSERT(mpred->object == object ||
969 (mpred->flags & PG_SLAB) != 0,

--- 5 unchanged lines hidden (view full) ---

975 msucc = TAILQ_FIRST(&object->memq);
976 if (msucc != NULL)
977 KASSERT(msucc->pindex > pindex,
978 ("vm_page_insert_after: msucc doesn't succeed pindex"));
979
980 /*
981 * Record the object/offset pair in this page
982 */
983 sobj = m->object;
984 sidx = m->pindex;
978 m->object = object;
979 m->pindex = pindex;
980
981 /*
982 * Now link into the object's ordered list of backed pages.
983 */
985 m->object = object;
986 m->pindex = pindex;
987
988 /*
989 * Now link into the object's ordered list of backed pages.
990 */
991 if (vm_radix_insert(&object->rtree, m)) {
992 m->object = sobj;
993 m->pindex = sidx;
994 return (1);
995 }
996 vm_page_insert_radixdone(m, object, mpred);
997 return (0);
998}
999
1000/*
1001 * vm_page_insert_radixdone:
1002 *
1003 * Complete page "m" insertion into the specified object after the
1004 * radix trie hooking.
1005 *
1006 * The page "mpred" must precede the offset "m->pindex" within the
1007 * specified object.
1008 *
1009 * The object must be locked.
1010 */
1011static void
1012vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred)
1013{
1014
1015 VM_OBJECT_ASSERT_WLOCKED(object);
1016 KASSERT(object != NULL && m->object == object,
1017 ("vm_page_insert_radixdone: page %p has inconsistent object", m));
1018 if (mpred != NULL) {
1019 KASSERT(mpred->object == object ||
1020 (mpred->flags & PG_SLAB) != 0,
1021 ("vm_page_insert_after: object doesn't contain mpred"));
1022 KASSERT(mpred->pindex < m->pindex,
1023 ("vm_page_insert_after: mpred doesn't precede pindex"));
1024 }
1025
984 if (mpred != NULL)
985 TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq);
986 else
987 TAILQ_INSERT_HEAD(&object->memq, m, listq);
1026 if (mpred != NULL)
1027 TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq);
1028 else
1029 TAILQ_INSERT_HEAD(&object->memq, m, listq);
988 vm_radix_insert(&object->rtree, m);
989
990 /*
991 * Show that the object has one more resident page.
992 */
993 object->resident_page_count++;
994
995 /*
996 * Hold the vnode until the last page is released.

--- 129 unchanged lines hidden (view full) ---

1126 VM_OBJECT_ASSERT_WLOCKED(m->object);
1127 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
1128 prev->pindex != m->pindex - 1)
1129 prev = NULL;
1130 return (prev);
1131}
1132
1133/*
1030
1031 /*
1032 * Show that the object has one more resident page.
1033 */
1034 object->resident_page_count++;
1035
1036 /*
1037 * Hold the vnode until the last page is released.

--- 129 unchanged lines hidden (view full) ---

1167 VM_OBJECT_ASSERT_WLOCKED(m->object);
1168 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
1169 prev->pindex != m->pindex - 1)
1170 prev = NULL;
1171 return (prev);
1172}
1173
1174/*
1175 * Uses the page mnew as a replacement for an existing page at index
1176 * pindex which must be already present in the object.
1177 */
1178vm_page_t
1179vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex)
1180{
1181 vm_page_t mold, mpred;
1182
1183 VM_OBJECT_ASSERT_WLOCKED(object);
1184
1185 /*
1186 * This function mostly follows vm_page_insert() and
1187 * vm_page_remove() without the radix, object count and vnode
1188 * dance. Double check such functions for more comments.
1189 */
1190 mpred = vm_radix_lookup(&object->rtree, pindex);
1191 KASSERT(mpred != NULL,
1192 ("vm_page_replace: replacing page not present with pindex"));
1193 mpred = TAILQ_PREV(mpred, respgs, listq);
1194 if (mpred != NULL)
1195 KASSERT(mpred->pindex < pindex,
1196 ("vm_page_insert_after: mpred doesn't precede pindex"));
1197
1198 mnew->object = object;
1199 mnew->pindex = pindex;
1200 mold = vm_radix_replace(&object->rtree, mnew, pindex);
1201
1202 /* Detach the old page from the resident tailq. */
1203 TAILQ_REMOVE(&object->memq, mold, listq);
1204 vm_page_lock(mold);
1205 if (mold->oflags & VPO_BUSY) {
1206 mold->oflags &= ~VPO_BUSY;
1207 vm_page_flash(mold);
1208 }
1209 mold->object = NULL;
1210 vm_page_unlock(mold);
1211
1212 /* Insert the new page in the resident tailq. */
1213 if (mpred != NULL)
1214 TAILQ_INSERT_AFTER(&object->memq, mpred, mnew, listq);
1215 else
1216 TAILQ_INSERT_HEAD(&object->memq, mnew, listq);
1217 if (pmap_page_is_write_mapped(mnew))
1218 vm_object_set_writeable_dirty(object);
1219 return (mold);
1220}
1221
1222/*
1134 * vm_page_rename:
1135 *
1136 * Move the given memory entry from its
1137 * current object to the specified target object/offset.
1138 *
1139 * Note: swap associated with the page must be invalidated by the move. We
1140 * have to do this for several reasons: (1) we aren't freeing the
1141 * page, (2) we are dirtying the page, (3) the VM system is probably
1142 * moving the page from object A to B, and will then later move
1143 * the backing store from A to B and we can't have a conflict.
1144 *
1145 * Note: we *always* dirty the page. It is necessary both for the
1146 * fact that we moved it, and because we may be invalidating
1147 * swap. If the page is on the cache, we have to deactivate it
1148 * or vm_page_dirty() will panic. Dirty pages are not allowed
1149 * on the cache.
1150 *
1223 * vm_page_rename:
1224 *
1225 * Move the given memory entry from its
1226 * current object to the specified target object/offset.
1227 *
1228 * Note: swap associated with the page must be invalidated by the move. We
1229 * have to do this for several reasons: (1) we aren't freeing the
1230 * page, (2) we are dirtying the page, (3) the VM system is probably
1231 * moving the page from object A to B, and will then later move
1232 * the backing store from A to B and we can't have a conflict.
1233 *
1234 * Note: we *always* dirty the page. It is necessary both for the
1235 * fact that we moved it, and because we may be invalidating
1236 * swap. If the page is on the cache, we have to deactivate it
1237 * or vm_page_dirty() will panic. Dirty pages are not allowed
1238 * on the cache.
1239 *
1151 * The objects must be locked. The page must be locked if it is managed.
1240 * The objects must be locked.
1152 */
1241 */
1153void
1242int
1154vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
1155{
1243vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
1244{
1245 vm_page_t mpred;
1246 vm_pindex_t opidx;
1156
1247
1248 VM_OBJECT_ASSERT_WLOCKED(new_object);
1249
1250 mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex);
1251 KASSERT(mpred == NULL || mpred->pindex != new_pindex,
1252 ("vm_page_rename: pindex already renamed"));
1253
1254 /*
1255 * Create a custom version of vm_page_insert() which does not depend
1256 * by m_prev and can cheat on the implementation aspects of the
1257 * function.
1258 */
1259 opidx = m->pindex;
1260 m->pindex = new_pindex;
1261 if (vm_radix_insert(&new_object->rtree, m)) {
1262 m->pindex = opidx;
1263 return (1);
1264 }
1265
1266 /*
1267 * The operation cannot fail anymore. The removal must happen before
1268 * the listq iterator is tainted.
1269 */
1270 m->pindex = opidx;
1271 vm_page_lock(m);
1157 vm_page_remove(m);
1272 vm_page_remove(m);
1158 vm_page_insert(m, new_object, new_pindex);
1273
1274 /* Return back to the new pindex to complete vm_page_insert(). */
1275 m->pindex = new_pindex;
1276 m->object = new_object;
1277 vm_page_unlock(m);
1278 vm_page_insert_radixdone(m, new_object, mpred);
1159 vm_page_dirty(m);
1279 vm_page_dirty(m);
1280 return (0);
1160}
1161
1162/*
1163 * Convert all of the given object's cached pages that have a
1164 * pindex within the given range into free pages. If the value
1165 * zero is given for "end", then the range's upper bound is
1166 * infinity. If the given object is backed by a vnode and it
1167 * transitions from having one or more cached pages to none, the

--- 9 unchanged lines hidden (view full) ---

1177 if (__predict_false(vm_radix_is_empty(&object->cache))) {
1178 mtx_unlock(&vm_page_queue_free_mtx);
1179 return;
1180 }
1181 while ((m = vm_radix_lookup_ge(&object->cache, start)) != NULL) {
1182 if (end != 0 && m->pindex >= end)
1183 break;
1184 vm_radix_remove(&object->cache, m->pindex);
1281}
1282
1283/*
1284 * Convert all of the given object's cached pages that have a
1285 * pindex within the given range into free pages. If the value
1286 * zero is given for "end", then the range's upper bound is
1287 * infinity. If the given object is backed by a vnode and it
1288 * transitions from having one or more cached pages to none, the

--- 9 unchanged lines hidden (view full) ---

1298 if (__predict_false(vm_radix_is_empty(&object->cache))) {
1299 mtx_unlock(&vm_page_queue_free_mtx);
1300 return;
1301 }
1302 while ((m = vm_radix_lookup_ge(&object->cache, start)) != NULL) {
1303 if (end != 0 && m->pindex >= end)
1304 break;
1305 vm_radix_remove(&object->cache, m->pindex);
1185 m->object = NULL;
1186 m->valid = 0;
1187 /* Clear PG_CACHED and set PG_FREE. */
1188 m->flags ^= PG_CACHED | PG_FREE;
1189 KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE,
1190 ("vm_page_cache_free: page %p has inconsistent flags", m));
1191 cnt.v_cache_count--;
1192 vm_phys_freecnt_adj(m, 1);
1306 vm_page_cache_turn_free(m);
1193 }
1194 empty = vm_radix_is_empty(&object->cache);
1195 mtx_unlock(&vm_page_queue_free_mtx);
1196 if (object->type == OBJT_VNODE && empty)
1197 vdrop(object->handle);
1198}
1199
1200/*

--- 63 unchanged lines hidden (view full) ---

1264 * cache to the new object's cache.
1265 */
1266 if ((m->pindex - offidxstart) >= new_object->size)
1267 break;
1268 vm_radix_remove(&orig_object->cache, m->pindex);
1269 /* Update the page's object and offset. */
1270 m->object = new_object;
1271 m->pindex -= offidxstart;
1307 }
1308 empty = vm_radix_is_empty(&object->cache);
1309 mtx_unlock(&vm_page_queue_free_mtx);
1310 if (object->type == OBJT_VNODE && empty)
1311 vdrop(object->handle);
1312}
1313
1314/*

--- 63 unchanged lines hidden (view full) ---

1378 * cache to the new object's cache.
1379 */
1380 if ((m->pindex - offidxstart) >= new_object->size)
1381 break;
1382 vm_radix_remove(&orig_object->cache, m->pindex);
1383 /* Update the page's object and offset. */
1384 m->object = new_object;
1385 m->pindex -= offidxstart;
1272 vm_radix_insert(&new_object->cache, m);
1386 if (vm_radix_insert(&new_object->cache, m))
1387 vm_page_cache_turn_free(m);
1273 }
1274 mtx_unlock(&vm_page_queue_free_mtx);
1275}
1276
1277/*
1278 * Returns TRUE if a cached page is associated with the given object and
1279 * offset, and FALSE otherwise.
1280 *

--- 75 unchanged lines hidden (view full) ---

1356 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1357 req_class = VM_ALLOC_SYSTEM;
1358
1359 if (object != NULL) {
1360 mpred = vm_radix_lookup_le(&object->rtree, pindex);
1361 KASSERT(mpred == NULL || mpred->pindex != pindex,
1362 ("vm_page_alloc: pindex already allocated"));
1363 }
1388 }
1389 mtx_unlock(&vm_page_queue_free_mtx);
1390}
1391
1392/*
1393 * Returns TRUE if a cached page is associated with the given object and
1394 * offset, and FALSE otherwise.
1395 *

--- 75 unchanged lines hidden (view full) ---

1471 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1472 req_class = VM_ALLOC_SYSTEM;
1473
1474 if (object != NULL) {
1475 mpred = vm_radix_lookup_le(&object->rtree, pindex);
1476 KASSERT(mpred == NULL || mpred->pindex != pindex,
1477 ("vm_page_alloc: pindex already allocated"));
1478 }
1364 mtx_lock(&vm_page_queue_free_mtx);
1479
1480 /*
1481 * The page allocation request can came from consumers which already
1482 * hold the free page queue mutex, like vm_page_insert() in
1483 * vm_page_cache().
1484 */
1485 mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE);
1365 if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
1366 (req_class == VM_ALLOC_SYSTEM &&
1367 cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
1368 (req_class == VM_ALLOC_INTERRUPT &&
1369 cnt.v_free_count + cnt.v_cache_count > 0)) {
1370 /*
1371 * Allocate from the free queue if the number of free pages
1372 * exceeds the minimum for the request class.

--- 108 unchanged lines hidden (view full) ---

1481 * page is inserted into the object.
1482 */
1483 atomic_add_int(&cnt.v_wire_count, 1);
1484 m->wire_count = 1;
1485 }
1486 m->act_count = 0;
1487
1488 if (object != NULL) {
1486 if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
1487 (req_class == VM_ALLOC_SYSTEM &&
1488 cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
1489 (req_class == VM_ALLOC_INTERRUPT &&
1490 cnt.v_free_count + cnt.v_cache_count > 0)) {
1491 /*
1492 * Allocate from the free queue if the number of free pages
1493 * exceeds the minimum for the request class.

--- 108 unchanged lines hidden (view full) ---

1602 * page is inserted into the object.
1603 */
1604 atomic_add_int(&cnt.v_wire_count, 1);
1605 m->wire_count = 1;
1606 }
1607 m->act_count = 0;
1608
1609 if (object != NULL) {
1610 if (vm_page_insert_after(m, object, pindex, mpred)) {
1611 /* See the comment below about hold count. */
1612 if (vp != NULL)
1613 vdrop(vp);
1614 pagedaemon_wakeup();
1615 m->object = NULL;
1616 vm_page_free(m);
1617 return (NULL);
1618 }
1619
1489 /* Ignore device objects; the pager sets "memattr" for them. */
1490 if (object->memattr != VM_MEMATTR_DEFAULT &&
1491 (object->flags & OBJ_FICTITIOUS) == 0)
1492 pmap_page_set_memattr(m, object->memattr);
1620 /* Ignore device objects; the pager sets "memattr" for them. */
1621 if (object->memattr != VM_MEMATTR_DEFAULT &&
1622 (object->flags & OBJ_FICTITIOUS) == 0)
1623 pmap_page_set_memattr(m, object->memattr);
1493 vm_page_insert_after(m, object, pindex, mpred);
1494 } else
1495 m->pindex = pindex;
1496
1497 /*
1498 * The following call to vdrop() must come after the above call
1499 * to vm_page_insert() in case both affect the same object and
1500 * vnode. Otherwise, the affected vnode's hold count could
1501 * temporarily become zero.

--- 50 unchanged lines hidden (view full) ---

1552 * This routine may not sleep.
1553 */
1554vm_page_t
1555vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
1556 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
1557 vm_paddr_t boundary, vm_memattr_t memattr)
1558{
1559 struct vnode *drop;
1624 } else
1625 m->pindex = pindex;
1626
1627 /*
1628 * The following call to vdrop() must come after the above call
1629 * to vm_page_insert() in case both affect the same object and
1630 * vnode. Otherwise, the affected vnode's hold count could
1631 * temporarily become zero.

--- 50 unchanged lines hidden (view full) ---

1682 * This routine may not sleep.
1683 */
1684vm_page_t
1685vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
1686 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
1687 vm_paddr_t boundary, vm_memattr_t memattr)
1688{
1689 struct vnode *drop;
1560 vm_page_t deferred_vdrop_list, m, m_ret;
1690 vm_page_t deferred_vdrop_list, m, m_tmp, m_ret;
1561 u_int flags, oflags;
1562 int req_class;
1563
1564 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
1565 (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
1566 ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
1567 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
1568 ("vm_page_alloc: inconsistent object(%p)/req(%x)", (void *)object,

--- 86 unchanged lines hidden (view full) ---

1655 m->busy_lock = VPB_SINGLE_EXCLUSIVER;
1656 if ((req & VM_ALLOC_SBUSY) != 0)
1657 m->busy_lock = VPB_SHARERS_WORD(1);
1658 }
1659 if ((req & VM_ALLOC_WIRED) != 0)
1660 m->wire_count = 1;
1661 /* Unmanaged pages don't use "act_count". */
1662 m->oflags = oflags;
1691 u_int flags, oflags;
1692 int req_class;
1693
1694 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
1695 (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
1696 ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
1697 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
1698 ("vm_page_alloc: inconsistent object(%p)/req(%x)", (void *)object,

--- 86 unchanged lines hidden (view full) ---

1785 m->busy_lock = VPB_SINGLE_EXCLUSIVER;
1786 if ((req & VM_ALLOC_SBUSY) != 0)
1787 m->busy_lock = VPB_SHARERS_WORD(1);
1788 }
1789 if ((req & VM_ALLOC_WIRED) != 0)
1790 m->wire_count = 1;
1791 /* Unmanaged pages don't use "act_count". */
1792 m->oflags = oflags;
1793 if (object != NULL) {
1794 if (vm_page_insert(m, object, pindex)) {
1795 while (deferred_vdrop_list != NULL) {
1796 vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);
1797 deferred_vdrop_list =
1798 deferred_vdrop_list->pageq.tqe_next;
1799 }
1800 if (vm_paging_needed())
1801 pagedaemon_wakeup();
1802 for (m = m_ret, m_tmp = m_ret;
1803 m < &m_ret[npages]; m++) {
1804 if (m_tmp < m)
1805 m_tmp++;
1806 else
1807 m->object = NULL;
1808 vm_page_free(m);
1809 }
1810 return (NULL);
1811 }
1812 } else
1813 m->pindex = pindex;
1663 if (memattr != VM_MEMATTR_DEFAULT)
1664 pmap_page_set_memattr(m, memattr);
1814 if (memattr != VM_MEMATTR_DEFAULT)
1815 pmap_page_set_memattr(m, memattr);
1665 if (object != NULL)
1666 vm_page_insert(m, object, pindex);
1667 else
1668 m->pindex = pindex;
1669 pindex++;
1670 }
1671 while (deferred_vdrop_list != NULL) {
1672 vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);
1673 deferred_vdrop_list = deferred_vdrop_list->pageq.tqe_next;
1674 }
1675 if (vm_paging_needed())
1676 pagedaemon_wakeup();

--- 360 unchanged lines hidden (view full) ---

2037 */
2038 if (vm_pages_needed && !vm_page_count_min()) {
2039 vm_pages_needed = 0;
2040 wakeup(&cnt.v_free_count);
2041 }
2042}
2043
2044/*
1816 pindex++;
1817 }
1818 while (deferred_vdrop_list != NULL) {
1819 vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);
1820 deferred_vdrop_list = deferred_vdrop_list->pageq.tqe_next;
1821 }
1822 if (vm_paging_needed())
1823 pagedaemon_wakeup();

--- 360 unchanged lines hidden (view full) ---

2184 */
2185 if (vm_pages_needed && !vm_page_count_min()) {
2186 vm_pages_needed = 0;
2187 wakeup(&cnt.v_free_count);
2188 }
2189}
2190
2191/*
2192 * Turn a cached page into a free page, by changing its attributes.
2193 * Keep the statistics up-to-date.
2194 *
2195 * The free page queue must be locked.
2196 */
2197static void
2198vm_page_cache_turn_free(vm_page_t m)
2199{
2200
2201 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
2202
2203 m->object = NULL;
2204 m->valid = 0;
2205 /* Clear PG_CACHED and set PG_FREE. */
2206 m->flags ^= PG_CACHED | PG_FREE;
2207 KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE,
2208 ("vm_page_cache_free: page %p has inconsistent flags", m));
2209 cnt.v_cache_count--;
2210 vm_phys_freecnt_adj(m, 1);
2211}
2212
2213/*
2045 * vm_page_free_toq:
2046 *
2047 * Returns the given page to the free list,
2048 * disassociating it with any VM object.
2049 *
2050 * The object must be locked. The page must be locked if it is managed.
2051 */
2052void

--- 285 unchanged lines hidden (view full) ---

2338 * default object or swap object but without a backing
2339 * store must be zero filled.
2340 */
2341 vm_page_free(m);
2342 return;
2343 }
2344 KASSERT((m->flags & PG_CACHED) == 0,
2345 ("vm_page_cache: page %p is already cached", m));
2214 * vm_page_free_toq:
2215 *
2216 * Returns the given page to the free list,
2217 * disassociating it with any VM object.
2218 *
2219 * The object must be locked. The page must be locked if it is managed.
2220 */
2221void

--- 285 unchanged lines hidden (view full) ---

2507 * default object or swap object but without a backing
2508 * store must be zero filled.
2509 */
2510 vm_page_free(m);
2511 return;
2512 }
2513 KASSERT((m->flags & PG_CACHED) == 0,
2514 ("vm_page_cache: page %p is already cached", m));
2346 PCPU_INC(cnt.v_tcached);
2347
2348 /*
2349 * Remove the page from the paging queues.
2350 */
2351 vm_page_remque(m);
2352
2353 /*
2354 * Remove the page from the object's collection of resident

--- 10 unchanged lines hidden (view full) ---

2365 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
2366
2367 /*
2368 * Insert the page into the object's collection of cached pages
2369 * and the physical memory allocator's cache/free page queues.
2370 */
2371 m->flags &= ~PG_ZERO;
2372 mtx_lock(&vm_page_queue_free_mtx);
2515
2516 /*
2517 * Remove the page from the paging queues.
2518 */
2519 vm_page_remque(m);
2520
2521 /*
2522 * Remove the page from the object's collection of resident

--- 10 unchanged lines hidden (view full) ---

2533 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
2534
2535 /*
2536 * Insert the page into the object's collection of cached pages
2537 * and the physical memory allocator's cache/free page queues.
2538 */
2539 m->flags &= ~PG_ZERO;
2540 mtx_lock(&vm_page_queue_free_mtx);
2541 cache_was_empty = vm_radix_is_empty(&object->cache);
2542 if (vm_radix_insert(&object->cache, m)) {
2543 mtx_unlock(&vm_page_queue_free_mtx);
2544 if (object->resident_page_count == 0)
2545 vdrop(object->handle);
2546 m->object = NULL;
2547 vm_page_free(m);
2548 return;
2549 }
2373 m->flags |= PG_CACHED;
2374 cnt.v_cache_count++;
2550 m->flags |= PG_CACHED;
2551 cnt.v_cache_count++;
2375 cache_was_empty = vm_radix_is_empty(&object->cache);
2376 vm_radix_insert(&object->cache, m);
2552 PCPU_INC(cnt.v_tcached);
2377#if VM_NRESERVLEVEL > 0
2378 if (!vm_reserv_free_page(m)) {
2379#else
2380 if (TRUE) {
2381#endif
2382 vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0);
2383 vm_phys_free_pages(m, 0);
2384 }

--- 556 unchanged lines hidden (view full) ---

2941 object = m->object;
2942 VM_OBJECT_ASSERT_WLOCKED(object);
2943 KASSERT(object->paging_in_progress != 0,
2944 ("vm_page_cowfault: object %p's paging-in-progress count is zero.",
2945 object));
2946 pindex = m->pindex;
2947
2948 retry_alloc:
2553#if VM_NRESERVLEVEL > 0
2554 if (!vm_reserv_free_page(m)) {
2555#else
2556 if (TRUE) {
2557#endif
2558 vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0);
2559 vm_phys_free_pages(m, 0);
2560 }

--- 556 unchanged lines hidden (view full) ---

3117 object = m->object;
3118 VM_OBJECT_ASSERT_WLOCKED(object);
3119 KASSERT(object->paging_in_progress != 0,
3120 ("vm_page_cowfault: object %p's paging-in-progress count is zero.",
3121 object));
3122 pindex = m->pindex;
3123
3124 retry_alloc:
2949 pmap_remove_all(m);
2950 vm_page_remove(m);
2951 mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
3125 mnew = vm_page_alloc(NULL, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
2952 if (mnew == NULL) {
3126 if (mnew == NULL) {
2953 vm_page_insert(m, object, pindex);
2954 vm_page_unlock(m);
2955 VM_OBJECT_WUNLOCK(object);
2956 VM_WAIT;
2957 VM_OBJECT_WLOCK(object);
2958 if (m == vm_page_lookup(object, pindex)) {
2959 vm_page_lock(m);
2960 goto retry_alloc;
2961 } else {

--- 9 unchanged lines hidden (view full) ---

2971 * check to see if we raced with an xmit complete when
2972 * waiting to allocate a page. If so, put things back
2973 * the way they were
2974 */
2975 vm_page_unlock(m);
2976 vm_page_lock(mnew);
2977 vm_page_free(mnew);
2978 vm_page_unlock(mnew);
3127 vm_page_unlock(m);
3128 VM_OBJECT_WUNLOCK(object);
3129 VM_WAIT;
3130 VM_OBJECT_WLOCK(object);
3131 if (m == vm_page_lookup(object, pindex)) {
3132 vm_page_lock(m);
3133 goto retry_alloc;
3134 } else {

--- 9 unchanged lines hidden (view full) ---

3144 * check to see if we raced with an xmit complete when
3145 * waiting to allocate a page. If so, put things back
3146 * the way they were
3147 */
3148 vm_page_unlock(m);
3149 vm_page_lock(mnew);
3150 vm_page_free(mnew);
3151 vm_page_unlock(mnew);
2979 vm_page_insert(m, object, pindex);
2980 } else { /* clear COW & copy page */
3152 } else { /* clear COW & copy page */
3153 pmap_remove_all(m);
3154 mnew->object = object;
3155 if (object->memattr != VM_MEMATTR_DEFAULT &&
3156 (object->flags & OBJ_FICTITIOUS) == 0)
3157 pmap_page_set_memattr(mnew, object->memattr);
3158 if (vm_page_replace(mnew, object, pindex) != m)
3159 panic("vm_page_cowfault: invalid page replacement");
2981 if (!so_zerocp_fullpage)
2982 pmap_copy_page(m, mnew);
2983 mnew->valid = VM_PAGE_BITS_ALL;
2984 vm_page_dirty(mnew);
2985 mnew->wire_count = m->wire_count - m->cow;
2986 m->wire_count = m->cow;
2987 vm_page_unlock(m);
2988 }

--- 110 unchanged lines hidden ---
3160 if (!so_zerocp_fullpage)
3161 pmap_copy_page(m, mnew);
3162 mnew->valid = VM_PAGE_BITS_ALL;
3163 vm_page_dirty(mnew);
3164 mnew->wire_count = m->wire_count - m->cow;
3165 m->wire_count = m->cow;
3166 vm_page_unlock(m);
3167 }

--- 110 unchanged lines hidden ---