Deleted Added
full compact
vm_page.c (50477) vm_page.c (51337)
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 20 unchanged lines hidden (view full) ---

29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 20 unchanged lines hidden (view full) ---

29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
37 * $FreeBSD: head/sys/vm/vm_page.c 50477 1999-08-28 01:08:13Z peter $
37 * $FreeBSD: head/sys/vm/vm_page.c 51337 1999-09-17 04:56:40Z dillon $
38 */
39
40/*
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *

--- 564 unchanged lines hidden (view full) ---

610 struct vpgqueues *pq;
611 if (queue != PQ_NONE) {
612 m->queue = PQ_NONE;
613 pq = &vm_page_queues[queue];
614 TAILQ_REMOVE(pq->pl, m, pageq);
615 (*pq->cnt)--;
616 pq->lcnt--;
617 if ((queue - m->pc) == PQ_CACHE) {
38 */
39
40/*
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *

--- 564 unchanged lines hidden (view full) ---

610 struct vpgqueues *pq;
611 if (queue != PQ_NONE) {
612 m->queue = PQ_NONE;
613 pq = &vm_page_queues[queue];
614 TAILQ_REMOVE(pq->pl, m, pageq);
615 (*pq->cnt)--;
616 pq->lcnt--;
617 if ((queue - m->pc) == PQ_CACHE) {
618 if ((cnt.v_cache_count + cnt.v_free_count) <
619 (cnt.v_free_reserved + cnt.v_cache_min))
618 if (vm_paging_needed())
620 pagedaemon_wakeup();
621 }
622 }
623}
624
625#if PQ_L2_SIZE > 1
626
627/*

--- 238 unchanged lines hidden (view full) ---

866 */
867
868 vm_page_insert(m, object, pindex);
869
870 /*
871 * Don't wakeup too often - wakeup the pageout daemon when
872 * we would be nearly out of memory.
873 */
619 pagedaemon_wakeup();
620 }
621 }
622}
623
624#if PQ_L2_SIZE > 1
625
626/*

--- 238 unchanged lines hidden (view full) ---

865 */
866
867 vm_page_insert(m, object, pindex);
868
869 /*
870 * Don't wakeup too often - wakeup the pageout daemon when
871 * we would be nearly out of memory.
872 */
874 if (((cnt.v_free_count + cnt.v_cache_count) <
875 (cnt.v_free_reserved + cnt.v_cache_min)) ||
876 (cnt.v_free_count < cnt.v_pageout_free_min))
873 if (vm_paging_needed() || cnt.v_free_count < cnt.v_pageout_free_min)
877 pagedaemon_wakeup();
878
879 splx(s);
880
881 return (m);
882}
883
884/*

--- 101 unchanged lines hidden (view full) ---

986}
987
988#endif
989
990/*
991 * vm_page_activate:
992 *
993 * Put the specified page on the active list (if appropriate).
874 pagedaemon_wakeup();
875
876 splx(s);
877
878 return (m);
879}
880
881/*

--- 101 unchanged lines hidden (view full) ---

983}
984
985#endif
986
987/*
988 * vm_page_activate:
989 *
990 * Put the specified page on the active list (if appropriate).
991 * Ensure that act_count is at least ACT_INIT but do not otherwise
992 * mess with it.
994 *
995 * The page queues must be locked.
996 * This routine may not block.
997 */
998void
999vm_page_activate(m)
1000 register vm_page_t m;
1001{

--- 43 unchanged lines hidden (view full) ---

1045 wakeup(&vm_pageout_pages_needed);
1046 vm_pageout_pages_needed = 0;
1047 }
1048 /*
1049 * wakeup processes that are waiting on memory if we hit a
1050 * high water mark. And wakeup scheduler process if we have
1051 * lots of memory. this process will swapin processes.
1052 */
993 *
994 * The page queues must be locked.
995 * This routine may not block.
996 */
997void
998vm_page_activate(m)
999 register vm_page_t m;
1000{

--- 43 unchanged lines hidden (view full) ---

1044 wakeup(&vm_pageout_pages_needed);
1045 vm_pageout_pages_needed = 0;
1046 }
1047 /*
1048 * wakeup processes that are waiting on memory if we hit a
1049 * high water mark. And wakeup scheduler process if we have
1050 * lots of memory. this process will swapin processes.
1051 */
1053 if (vm_pages_needed &&
1054 ((cnt.v_free_count + cnt.v_cache_count) >= cnt.v_free_min)) {
1052 if (vm_pages_needed && vm_page_count_min()) {
1055 wakeup(&cnt.v_free_count);
1056 vm_pages_needed = 0;
1057 }
1058}
1059
1060/*
1061 * vm_page_free_toq:
1062 *

--- 193 unchanged lines hidden (view full) ---

1256 splx(s);
1257}
1258
1259
1260/*
1261 * Move the specified page to the inactive queue. If the page has
1262 * any associated swap, the swap is deallocated.
1263 *
1053 wakeup(&cnt.v_free_count);
1054 vm_pages_needed = 0;
1055 }
1056}
1057
1058/*
1059 * vm_page_free_toq:
1060 *

--- 193 unchanged lines hidden (view full) ---

1254 splx(s);
1255}
1256
1257
1258/*
1259 * Move the specified page to the inactive queue. If the page has
1260 * any associated swap, the swap is deallocated.
1261 *
1262 * Normally athead is 0 resulting in LRU operation. athead is set
1263 * to 1 if we want this page to be 'as if it were placed in the cache',
1264 * except without unmapping it from the process address space.
1265 *
1264 * This routine may not block.
1265 */
1266 * This routine may not block.
1267 */
1266void
1267vm_page_deactivate(m)
1268 register vm_page_t m;
1268static __inline void
1269_vm_page_deactivate(vm_page_t m, int athead)
1269{
1270 int s;
1271
1272 /*
1273 * Ignore if already inactive.
1274 */
1275 if (m->queue == PQ_INACTIVE)
1276 return;
1277
1278 s = splvm();
1279 if (m->wire_count == 0) {
1280 if ((m->queue - m->pc) == PQ_CACHE)
1281 cnt.v_reactivated++;
1282 vm_page_unqueue(m);
1270{
1271 int s;
1272
1273 /*
1274 * Ignore if already inactive.
1275 */
1276 if (m->queue == PQ_INACTIVE)
1277 return;
1278
1279 s = splvm();
1280 if (m->wire_count == 0) {
1281 if ((m->queue - m->pc) == PQ_CACHE)
1282 cnt.v_reactivated++;
1283 vm_page_unqueue(m);
1283 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
1284 if (athead)
1285 TAILQ_INSERT_HEAD(&vm_page_queue_inactive, m, pageq);
1286 else
1287 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
1284 m->queue = PQ_INACTIVE;
1285 vm_page_queues[PQ_INACTIVE].lcnt++;
1286 cnt.v_inactive_count++;
1287 }
1288 splx(s);
1289}
1290
1288 m->queue = PQ_INACTIVE;
1289 vm_page_queues[PQ_INACTIVE].lcnt++;
1290 cnt.v_inactive_count++;
1291 }
1292 splx(s);
1293}
1294
1295void
1296vm_page_deactivate(vm_page_t m)
1297{
1298 _vm_page_deactivate(m, 0);
1299}
1300
1291/*
1292 * vm_page_cache
1293 *
1294 * Put the specified page onto the page cache queue (if appropriate).
1295 *
1296 * This routine may not block.
1297 */
1298void

--- 29 unchanged lines hidden (view full) ---

1328 vm_page_queues[m->queue].lcnt++;
1329 TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq);
1330 cnt.v_cache_count++;
1331 vm_page_free_wakeup();
1332 splx(s);
1333}
1334
1335/*
1301/*
1302 * vm_page_cache
1303 *
1304 * Put the specified page onto the page cache queue (if appropriate).
1305 *
1306 * This routine may not block.
1307 */
1308void

--- 29 unchanged lines hidden (view full) ---

1338 vm_page_queues[m->queue].lcnt++;
1339 TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq);
1340 cnt.v_cache_count++;
1341 vm_page_free_wakeup();
1342 splx(s);
1343}
1344
1345/*
1346 * vm_page_dontneed
1347 *
1348 * Cache, deactivate, or do nothing as appropriate. This routine
1349 * is typically used by madvise() MADV_DONTNEED.
1350 *
1351 * Generally speaking we want to move the page into the cache so
1352 * it gets reused quickly. However, this can result in a silly syndrome
1353 * due to the page recycling too quickly. Small objects will not be
1354 * fully cached. On the otherhand, if we move the page to the inactive
1355 * queue we wind up with a problem whereby very large objects
1356 * unnecessarily blow away our inactive and cache queues.
1357 *
1358 * The solution is to move the pages based on a fixed weighting. We
1359 * either leave them alone, deactivate them, or move them to the cache,
1360 * where moving them to the cache has the highest weighting.
1361 * By forcing some pages into other queues we eventually force the
1362 * system to balance the queues, potentially recovering other unrelated
1363 * space from active. The idea is to not force this to happen too
1364 * often.
1365 */
1366
1367void
1368vm_page_dontneed(m)
1369 vm_page_t m;
1370{
1371 static int dnweight;
1372 int dnw;
1373 int head;
1374
1375 dnw = ++dnweight;
1376
1377 /*
1378 * occassionally leave the page alone
1379 */
1380
1381 if ((dnw & 0x01F0) == 0 ||
1382 m->queue == PQ_INACTIVE ||
1383 m->queue - m->pc == PQ_CACHE
1384 ) {
1385 if (m->act_count >= ACT_INIT)
1386 --m->act_count;
1387 return;
1388 }
1389
1390 if (m->dirty == 0)
1391 vm_page_test_dirty(m);
1392
1393 if (m->dirty || (dnw & 0x0070) == 0) {
1394 /*
1395 * Deactivate the page 3 times out of 32.
1396 */
1397 head = 0;
1398 } else {
1399 /*
1400 * Cache the page 28 times out of every 32. Note that
1401 * the page is deactivated instead of cached, but placed
1402 * at the head of the queue instead of the tail.
1403 */
1404 head = 1;
1405 }
1406 _vm_page_deactivate(m, head);
1407}
1408
1409/*
1336 * Grab a page, waiting until we are waken up due to the page
1337 * changing state. We keep on waiting, if the page continues
1338 * to be in the object. If the page doesn't exist, allocate it.
1339 *
1340 * This routine may block.
1341 */
1342vm_page_t
1343vm_page_grab(object, pindex, allocflags)

--- 532 unchanged lines hidden ---
1410 * Grab a page, waiting until we are waken up due to the page
1411 * changing state. We keep on waiting, if the page continues
1412 * to be in the object. If the page doesn't exist, allocate it.
1413 *
1414 * This routine may block.
1415 */
1416vm_page_t
1417vm_page_grab(object, pindex, allocflags)

--- 532 unchanged lines hidden ---