Deleted Added
full compact
vm_pageout.c (15203) vm_pageout.c (15809)
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 51 unchanged lines hidden (view full) ---

60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 *
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 51 unchanged lines hidden (view full) ---

60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 *
68 * $Id: vm_pageout.c,v 1.69 1996/03/28 04:53:28 dyson Exp $
68 * $Id: vm_pageout.c,v 1.70 1996/04/11 21:05:25 bde Exp $
69 */
70
71/*
72 * The proverbial page-out daemon.
73 */
74
75#include <sys/param.h>
76#include <sys/systm.h>

--- 56 unchanged lines hidden (view full) ---

133
134extern int npendingio;
135static int vm_pageout_req_swapout; /* XXX */
136static int vm_daemon_needed;
137extern int nswiodone;
138extern int vm_swap_size;
139extern int vfs_update_wakeup;
140
69 */
70
71/*
72 * The proverbial page-out daemon.
73 */
74
75#include <sys/param.h>
76#include <sys/systm.h>

--- 56 unchanged lines hidden (view full) ---

133
134extern int npendingio;
135static int vm_pageout_req_swapout; /* XXX */
136static int vm_daemon_needed;
137extern int nswiodone;
138extern int vm_swap_size;
139extern int vfs_update_wakeup;
140
141#define MAXSCAN 1024 /* maximum number of pages to scan in queues */
142
143#define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16)
144
145#define VM_PAGEOUT_PAGE_COUNT 16
146int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
147
148int vm_page_max_wired; /* XXX max # of wired pages system-wide */
149
150typedef int freeer_fcn_t __P((vm_map_t, vm_object_t, int, int));

--- 259 unchanged lines hidden (view full) ---

410 }
411 if (object->paging_in_progress)
412 return dcount;
413
414 /*
415 * scan the objects entire memory queue
416 */
417 rcount = object->resident_page_count;
141#define MAXLAUNDER (cnt.v_page_count > 1800 ? 32 : 16)
142
143#define VM_PAGEOUT_PAGE_COUNT 16
144int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
145
146int vm_page_max_wired; /* XXX max # of wired pages system-wide */
147
148typedef int freeer_fcn_t __P((vm_map_t, vm_object_t, int, int));

--- 259 unchanged lines hidden (view full) ---

408 }
409 if (object->paging_in_progress)
410 return dcount;
411
412 /*
413 * scan the objects entire memory queue
414 */
415 rcount = object->resident_page_count;
418 p = object->memq.tqh_first;
416 p = TAILQ_FIRST(&object->memq);
419 while (p && (rcount-- > 0)) {
417 while (p && (rcount-- > 0)) {
420 next = p->listq.tqe_next;
418 next = TAILQ_NEXT(p, listq);
421 cnt.v_pdpages++;
422 if (p->wire_count != 0 ||
423 p->hold_count != 0 ||
424 p->busy != 0 ||
425 (p->flags & PG_BUSY) ||
426 !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
427 p = next;
428 continue;
429 }
430 /*
431 * if a page is active, not wired and is in the processes
432 * pmap, then deactivate the page.
433 */
434 if (p->queue == PQ_ACTIVE) {
435 if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p)) &&
436 (p->flags & PG_REFERENCED) == 0) {
419 cnt.v_pdpages++;
420 if (p->wire_count != 0 ||
421 p->hold_count != 0 ||
422 p->busy != 0 ||
423 (p->flags & PG_BUSY) ||
424 !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) {
425 p = next;
426 continue;
427 }
428 /*
429 * if a page is active, not wired and is in the processes
430 * pmap, then deactivate the page.
431 */
432 if (p->queue == PQ_ACTIVE) {
433 if (!pmap_is_referenced(VM_PAGE_TO_PHYS(p)) &&
434 (p->flags & PG_REFERENCED) == 0) {
437 p->act_count -= min(p->act_count, ACT_DECLINE);
435 vm_page_protect(p, VM_PROT_NONE);
436 if (!map_remove_only)
437 vm_page_deactivate(p);
438 /*
438 /*
439 * if the page act_count is zero -- then we
440 * deactivate
441 */
442 if (!p->act_count) {
443 if (!map_remove_only)
444 vm_page_deactivate(p);
445 vm_page_protect(p, VM_PROT_NONE);
446 /*
447 * else if on the next go-around we
448 * will deactivate the page we need to
449 * place the page on the end of the
450 * queue to age the other pages in
451 * memory.
452 */
453 } else {
454 TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
455 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
456 }
457 /*
458 * see if we are done yet
459 */
460 if (p->queue == PQ_INACTIVE) {
461 --count;
462 ++dcount;
463 if (count <= 0 &&
464 cnt.v_inactive_count > cnt.v_inactive_target) {
465 return dcount;
466 }
467 }
468 } else {
469 /*
470 * Move the page to the bottom of the queue.
471 */
472 pmap_clear_reference(VM_PAGE_TO_PHYS(p));
473 p->flags &= ~PG_REFERENCED;
439 * see if we are done yet
440 */
441 if (p->queue == PQ_INACTIVE) {
442 --count;
443 ++dcount;
444 if (count <= 0 &&
445 cnt.v_inactive_count > cnt.v_inactive_target) {
446 return dcount;
447 }
448 }
449 } else {
450 /*
451 * Move the page to the bottom of the queue.
452 */
453 pmap_clear_reference(VM_PAGE_TO_PHYS(p));
454 p->flags &= ~PG_REFERENCED;
474 if (p->act_count < ACT_MAX)
475 p->act_count += ACT_ADVANCE;
476
477 TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
478 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
479 }
480 } else if (p->queue == PQ_INACTIVE) {
481 vm_page_protect(p, VM_PROT_NONE);
482 }
483 p = next;

--- 55 unchanged lines hidden (view full) ---

539 int page_shortage, maxscan, maxlaunder, pcount;
540 int pages_freed;
541 vm_page_t next;
542 struct proc *p, *bigproc;
543 vm_offset_t size, bigsize;
544 vm_object_t object;
545 int force_wakeup = 0;
546 int vnodes_skipped = 0;
455
456 TAILQ_REMOVE(&vm_page_queue_active, p, pageq);
457 TAILQ_INSERT_TAIL(&vm_page_queue_active, p, pageq);
458 }
459 } else if (p->queue == PQ_INACTIVE) {
460 vm_page_protect(p, VM_PROT_NONE);
461 }
462 p = next;

--- 55 unchanged lines hidden (view full) ---

518 int page_shortage, maxscan, maxlaunder, pcount;
519 int pages_freed;
520 vm_page_t next;
521 struct proc *p, *bigproc;
522 vm_offset_t size, bigsize;
523 vm_object_t object;
524 int force_wakeup = 0;
525 int vnodes_skipped = 0;
526 int usagefloor;
527 int i;
547
548 pages_freed = 0;
549
528
529 pages_freed = 0;
530
531
550 /*
551 * Start scanning the inactive queue for pages we can free. We keep
552 * scanning until we have enough free pages or we have scanned through
553 * the entire queue. If we encounter dirty pages, we start cleaning
554 * them.
555 */
556
557 maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ?
558 MAXLAUNDER : cnt.v_inactive_target;
559
560rescan1:
561 maxscan = cnt.v_inactive_count;
532 /*
533 * Start scanning the inactive queue for pages we can free. We keep
534 * scanning until we have enough free pages or we have scanned through
535 * the entire queue. If we encounter dirty pages, we start cleaning
536 * them.
537 */
538
539 maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ?
540 MAXLAUNDER : cnt.v_inactive_target;
541
542rescan1:
543 maxscan = cnt.v_inactive_count;
562 m = vm_page_queue_inactive.tqh_first;
544 m = TAILQ_FIRST(&vm_page_queue_inactive);
563 while ((m != NULL) && (maxscan-- > 0) &&
545 while ((m != NULL) && (maxscan-- > 0) &&
564 ((cnt.v_cache_count + cnt.v_free_count) < (cnt.v_cache_min + cnt.v_free_target))) {
546 ((cnt.v_cache_count + cnt.v_free_count) <
547 (cnt.v_cache_min + cnt.v_free_target))) {
565 vm_page_t next;
566
567 cnt.v_pdpages++;
548 vm_page_t next;
549
550 cnt.v_pdpages++;
568 next = m->pageq.tqe_next;
551 next = TAILQ_NEXT(m, pageq);
569
570#if defined(VM_DIAGNOSE)
571 if (m->queue != PQ_INACTIVE) {
572 printf("vm_pageout_scan: page not inactive?\n");
573 break;
574 }
575#endif
576
577 /*
552
553#if defined(VM_DIAGNOSE)
554 if (m->queue != PQ_INACTIVE) {
555 printf("vm_pageout_scan: page not inactive?\n");
556 break;
557 }
558#endif
559
560 /*
578 * dont mess with busy pages
561 * Dont mess with busy pages, keep in the front of the
562 * queue, most likely are being paged out.
579 */
580 if (m->busy || (m->flags & PG_BUSY)) {
581 m = next;
582 continue;
583 }
584 if (m->hold_count) {
585 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
586 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);

--- 8 unchanged lines hidden (view full) ---

595 if (m->object->ref_count == 0) {
596 m->flags &= ~PG_REFERENCED;
597 pmap_clear_reference(VM_PAGE_TO_PHYS(m));
598 }
599 if ((m->flags & PG_REFERENCED) != 0) {
600 m->flags &= ~PG_REFERENCED;
601 pmap_clear_reference(VM_PAGE_TO_PHYS(m));
602 vm_page_activate(m);
563 */
564 if (m->busy || (m->flags & PG_BUSY)) {
565 m = next;
566 continue;
567 }
568 if (m->hold_count) {
569 TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
570 TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);

--- 8 unchanged lines hidden (view full) ---

579 if (m->object->ref_count == 0) {
580 m->flags &= ~PG_REFERENCED;
581 pmap_clear_reference(VM_PAGE_TO_PHYS(m));
582 }
583 if ((m->flags & PG_REFERENCED) != 0) {
584 m->flags &= ~PG_REFERENCED;
585 pmap_clear_reference(VM_PAGE_TO_PHYS(m));
586 vm_page_activate(m);
603 if (m->act_count < ACT_MAX)
604 m->act_count += ACT_ADVANCE;
605 m = next;
606 continue;
607 }
608
609 if (m->dirty == 0) {
610 vm_page_test_dirty(m);
611 } else if (m->dirty != 0) {
612 m->dirty = VM_PAGE_BITS_ALL;

--- 63 unchanged lines hidden (view full) ---

676 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
677 if (page_shortage <= 0) {
678 if (pages_freed == 0) {
679 page_shortage = cnt.v_free_min - cnt.v_free_count;
680 } else {
681 page_shortage = 1;
682 }
683 }
587 m = next;
588 continue;
589 }
590
591 if (m->dirty == 0) {
592 vm_page_test_dirty(m);
593 } else if (m->dirty != 0) {
594 m->dirty = VM_PAGE_BITS_ALL;

--- 63 unchanged lines hidden (view full) ---

658 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
659 if (page_shortage <= 0) {
660 if (pages_freed == 0) {
661 page_shortage = cnt.v_free_min - cnt.v_free_count;
662 } else {
663 page_shortage = 1;
664 }
665 }
684 maxscan = MAXSCAN;
666
685 pcount = cnt.v_active_count;
667 pcount = cnt.v_active_count;
686 m = vm_page_queue_active.tqh_first;
687 while ((m != NULL) && (maxscan > 0) &&
688 (pcount-- > 0) && (page_shortage > 0)) {
668 m = TAILQ_FIRST(&vm_page_queue_active);
669 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
670 next = TAILQ_NEXT(m, pageq);
689
671
690 cnt.v_pdpages++;
691 next = m->pageq.tqe_next;
692
693 /*
694 * Don't deactivate pages that are busy.
695 */
696 if ((m->busy != 0) ||
697 (m->flags & PG_BUSY) ||
698 (m->hold_count != 0)) {
699 TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
700 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
701 m = next;
702 continue;
703 }
672 /*
673 * Don't deactivate pages that are busy.
674 */
675 if ((m->busy != 0) ||
676 (m->flags & PG_BUSY) ||
677 (m->hold_count != 0)) {
678 TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
679 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
680 m = next;
681 continue;
682 }
704 if (m->object->ref_count &&
705 ((m->flags & PG_REFERENCED) ||
706 pmap_is_referenced(VM_PAGE_TO_PHYS(m))) ) {
683
684 /*
685 * The count for pagedaemon pages is done after checking the
686 * page for eligbility...
687 */
688 cnt.v_pdpages++;
689 if ((m->flags & PG_REFERENCED) == 0) {
690 if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
691 pmap_clear_reference(VM_PAGE_TO_PHYS(m));
692 m->flags |= PG_REFERENCED;
693 }
694 } else {
707 pmap_clear_reference(VM_PAGE_TO_PHYS(m));
695 pmap_clear_reference(VM_PAGE_TO_PHYS(m));
696 }
697 if ( (m->object->ref_count != 0) &&
698 (m->flags & PG_REFERENCED) ) {
708 m->flags &= ~PG_REFERENCED;
699 m->flags &= ~PG_REFERENCED;
709 if (m->act_count < ACT_MAX) {
710 m->act_count += ACT_ADVANCE;
711 }
712 TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
713 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
714 } else {
715 m->flags &= ~PG_REFERENCED;
700 TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
701 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
702 } else {
703 m->flags &= ~PG_REFERENCED;
716 pmap_clear_reference(VM_PAGE_TO_PHYS(m));
717 m->act_count -= min(m->act_count, ACT_DECLINE);
718
719 /*
720 * if the page act_count is zero -- then we deactivate
721 */
722 if (!m->act_count && (page_shortage > 0)) {
723 if (m->object->ref_count == 0) {
724 --page_shortage;
725 vm_page_test_dirty(m);
726 if (m->dirty == 0) {
727 m->act_count = 0;
728 vm_page_cache(m);
729 } else {
730 vm_page_deactivate(m);
731 }
704 if (page_shortage > 0) {
705 --page_shortage;
706 vm_page_test_dirty(m);
707 if (m->dirty == 0) {
708 vm_page_cache(m);
732 } else {
733 vm_page_protect(m, VM_PROT_NONE);
734 vm_page_deactivate(m);
709 } else {
710 vm_page_protect(m, VM_PROT_NONE);
711 vm_page_deactivate(m);
735 --page_shortage;
736 }
712 }
737 } else if (m->act_count) {
738 TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
739 TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
740 }
741 }
713 }
714 }
742 maxscan--;
743 m = next;
744 }
715 m = next;
716 }
745
717
746 /*
747 * We try to maintain some *really* free pages, this allows interrupt
748 * code to be guaranteed space.
749 */
750 while (cnt.v_free_count < cnt.v_free_reserved) {
718 /*
719 * We try to maintain some *really* free pages, this allows interrupt
720 * code to be guaranteed space.
721 */
722 while (cnt.v_free_count < cnt.v_free_reserved) {
751 m = vm_page_queue_cache.tqh_first;
723 m = TAILQ_FIRST(&vm_page_queue_cache);
752 if (!m)
753 break;
754 vm_page_free(m);
755 cnt.v_dfree++;
756 }
757
758 /*
759 * If we didn't get enough free pages, and we have skipped a vnode

--- 5 unchanged lines hidden (view full) ---

765 if (vnodes_skipped &&
766 (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) {
767 if (!vfs_update_wakeup) {
768 vfs_update_wakeup = 1;
769 wakeup(&vfs_update_wakeup);
770 }
771 }
772#ifndef NO_SWAPPING
724 if (!m)
725 break;
726 vm_page_free(m);
727 cnt.v_dfree++;
728 }
729
730 /*
731 * If we didn't get enough free pages, and we have skipped a vnode

--- 5 unchanged lines hidden (view full) ---

737 if (vnodes_skipped &&
738 (cnt.v_cache_count + cnt.v_free_count) < cnt.v_free_min) {
739 if (!vfs_update_wakeup) {
740 vfs_update_wakeup = 1;
741 wakeup(&vfs_update_wakeup);
742 }
743 }
744#ifndef NO_SWAPPING
773 /*
774 * now swap processes out if we are in low memory conditions
775 */
776 if (!swap_pager_full && vm_swap_size &&
777 vm_pageout_req_swapout == 0) {
778 vm_pageout_req_swapout = 1;
745 if (cnt.v_free_count + cnt.v_cache_count < cnt.v_free_target) {
779 vm_req_vmdaemon();
746 vm_req_vmdaemon();
747 vm_pageout_req_swapout = 1;
780 }
781#endif
782 }
783
748 }
749#endif
750 }
751
784#ifndef NO_SWAPPING
785 if ((cnt.v_inactive_count + cnt.v_free_count + cnt.v_cache_count) <
786 (cnt.v_inactive_target + cnt.v_free_min)) {
787 vm_req_vmdaemon();
788 }
789#endif
790
791 /*
792 * make sure that we have swap space -- if we are low on memory and
793 * swap -- then kill the biggest process.
794 */
795 if ((vm_swap_size == 0 || swap_pager_full) &&
796 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) {
797 bigproc = NULL;

--- 80 unchanged lines hidden (view full) ---

878 vm_page_max_wired = cnt.v_free_count / 3;
879
880
881 swap_pager_swap_init();
882 /*
883 * The pageout daemon is never done, so loop forever.
884 */
885 while (TRUE) {
752
753 /*
754 * make sure that we have swap space -- if we are low on memory and
755 * swap -- then kill the biggest process.
756 */
757 if ((vm_swap_size == 0 || swap_pager_full) &&
758 ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min)) {
759 bigproc = NULL;

--- 80 unchanged lines hidden (view full) ---

840 vm_page_max_wired = cnt.v_free_count / 3;
841
842
843 swap_pager_swap_init();
844 /*
845 * The pageout daemon is never done, so loop forever.
846 */
847 while (TRUE) {
886 int s = splhigh();
887
848 int s = splvm();
888 if (!vm_pages_needed ||
889 ((cnt.v_free_count >= cnt.v_free_reserved) &&
890 (cnt.v_free_count + cnt.v_cache_count >= cnt.v_free_min))) {
891 vm_pages_needed = 0;
892 tsleep(&vm_pages_needed, PVM, "psleep", 0);
849 if (!vm_pages_needed ||
850 ((cnt.v_free_count >= cnt.v_free_reserved) &&
851 (cnt.v_free_count + cnt.v_cache_count >= cnt.v_free_min))) {
852 vm_pages_needed = 0;
853 tsleep(&vm_pages_needed, PVM, "psleep", 0);
854 } else if (!vm_pages_needed) {
855 tsleep(&vm_pages_needed, PVM, "psleep", hz/3);
893 }
856 }
857 if (vm_pages_needed)
858 cnt.v_pdwakeups++;
894 vm_pages_needed = 0;
895 splx(s);
859 vm_pages_needed = 0;
860 splx(s);
896 cnt.v_pdwakeups++;
897 vm_pager_sync();
898 vm_pageout_scan();
899 vm_pager_sync();
900 wakeup(&cnt.v_free_count);
861 vm_pager_sync();
862 vm_pageout_scan();
863 vm_pager_sync();
864 wakeup(&cnt.v_free_count);
901 wakeup(kmem_map);
902 }
903}
904
905#ifndef NO_SWAPPING
906static void
907vm_req_vmdaemon()
908{
909 static int lastrun = 0;
910
865 }
866}
867
868#ifndef NO_SWAPPING
869static void
870vm_req_vmdaemon()
871{
872 static int lastrun = 0;
873
911 if ((ticks > (lastrun + hz / 10)) || (ticks < lastrun)) {
874 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
912 wakeup(&vm_daemon_needed);
913 lastrun = ticks;
914 }
915}
916
917static void
918vm_daemon()
919{

--- 53 unchanged lines hidden (view full) ---

973 (vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages);
974 }
975 }
976
977 /*
978 * we remove cached objects that have no RSS...
979 */
980restart:
875 wakeup(&vm_daemon_needed);
876 lastrun = ticks;
877 }
878}
879
880static void
881vm_daemon()
882{

--- 53 unchanged lines hidden (view full) ---

936 (vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages);
937 }
938 }
939
940 /*
941 * we remove cached objects that have no RSS...
942 */
943restart:
981 object = vm_object_cached_list.tqh_first;
944 object = TAILQ_FIRST(&vm_object_cached_list);
982 while (object) {
983 /*
984 * if there are no resident pages -- get rid of the object
985 */
986 if (object->resident_page_count == 0) {
987 vm_object_reference(object);
988 pager_cache(object, FALSE);
989 goto restart;
990 }
945 while (object) {
946 /*
947 * if there are no resident pages -- get rid of the object
948 */
949 if (object->resident_page_count == 0) {
950 vm_object_reference(object);
951 pager_cache(object, FALSE);
952 goto restart;
953 }
991 object = object->cached_list.tqe_next;
954 object = TAILQ_NEXT(object, cached_list);
992 }
993 }
994}
995#endif /* !NO_SWAPPING */
955 }
956 }
957}
958#endif /* !NO_SWAPPING */