Deleted Added
full compact
vm_pageout.c (69972) vm_pageout.c (70374)
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 51 unchanged lines hidden (view full) ---

60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 *
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *

--- 51 unchanged lines hidden (view full) ---

60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 *
68 * $FreeBSD: head/sys/vm/vm_pageout.c 69972 2000-12-13 10:01:00Z tanimura $
68 * $FreeBSD: head/sys/vm/vm_pageout.c 70374 2000-12-26 19:41:38Z dillon $
69 */
70
71/*
72 * The proverbial page-out daemon.
73 */
74
75#include "opt_vm.h"
76#include <sys/param.h>

--- 24 unchanged lines hidden (view full) ---

101
102/*
103 * System initialization
104 */
105
106/* the kernel process "vm_pageout"*/
107static void vm_pageout __P((void));
108static int vm_pageout_clean __P((vm_page_t));
69 */
70
71/*
72 * The proverbial page-out daemon.
73 */
74
75#include "opt_vm.h"
76#include <sys/param.h>

--- 24 unchanged lines hidden (view full) ---

101
102/*
103 * System initialization
104 */
105
106/* the kernel process "vm_pageout"*/
107static void vm_pageout __P((void));
108static int vm_pageout_clean __P((vm_page_t));
109static int vm_pageout_scan __P((void));
109static void vm_pageout_scan __P((int pass));
110static int vm_pageout_free_page_calc __P((vm_size_t count));
111struct proc *pageproc;
112
113static struct kproc_desc page_kp = {
114 "pagedaemon",
115 vm_pageout,
116 &pageproc
117};

--- 17 unchanged lines hidden (view full) ---

135int vm_pageout_deficit=0; /* Estimated number of pages deficit */
136int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */
137
138#if !defined(NO_SWAPPING)
139static int vm_pageout_req_swapout; /* XXX */
140static int vm_daemon_needed;
141#endif
142extern int vm_swap_size;
110static int vm_pageout_free_page_calc __P((vm_size_t count));
111struct proc *pageproc;
112
113static struct kproc_desc page_kp = {
114 "pagedaemon",
115 vm_pageout,
116 &pageproc
117};

--- 17 unchanged lines hidden (view full) ---

135int vm_pageout_deficit=0; /* Estimated number of pages deficit */
136int vm_pageout_pages_needed=0; /* flag saying that the pageout daemon needs pages */
137
138#if !defined(NO_SWAPPING)
139static int vm_pageout_req_swapout; /* XXX */
140static int vm_daemon_needed;
141#endif
142extern int vm_swap_size;
143static int vm_max_launder = 32;
143static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
144static int vm_pageout_full_stats_interval = 0;
144static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
145static int vm_pageout_full_stats_interval = 0;
145static int vm_pageout_stats_free_max=0, vm_pageout_algorithm_lru=0;
146static int vm_pageout_stats_free_max=0, vm_pageout_algorithm=0;
146static int defer_swap_pageouts=0;
147static int disable_swap_pageouts=0;
148
147static int defer_swap_pageouts=0;
148static int disable_swap_pageouts=0;
149
149static int max_page_launder=100;
150static int vm_pageout_actcmp=0;
151#if defined(NO_SWAPPING)
152static int vm_swap_enabled=0;
153static int vm_swap_idle_enabled=0;
154#else
155static int vm_swap_enabled=1;
156static int vm_swap_idle_enabled=0;
157#endif
158
159SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
150#if defined(NO_SWAPPING)
151static int vm_swap_enabled=0;
152static int vm_swap_idle_enabled=0;
153#else
154static int vm_swap_enabled=1;
155static int vm_swap_idle_enabled=0;
156#endif
157
158SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
160 CTLFLAG_RW, &vm_pageout_algorithm_lru, 0, "LRU page mgmt");
159 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt");
161
160
161SYSCTL_INT(_vm, OID_AUTO, max_launder,
162 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
163
162SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
163 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
164
165SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
166 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
167
168SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
169 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");

--- 14 unchanged lines hidden (view full) ---

184#endif
185
186SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
187 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
188
189SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
190 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
191
164SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
165 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
166
167SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
168 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
169
170SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
171 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");

--- 14 unchanged lines hidden (view full) ---

186#endif
187
188SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
189 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
190
191SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
192 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
193
192SYSCTL_INT(_vm, OID_AUTO, max_page_launder,
193 CTLFLAG_RW, &max_page_launder, 0, "Maximum number of pages to clean per pass");
194SYSCTL_INT(_vm, OID_AUTO, vm_pageout_actcmp,
195 CTLFLAG_RD, &vm_pageout_actcmp, 0, "pagedaemon agressiveness");
196
197
198#define VM_PAGEOUT_PAGE_COUNT 16
199int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
200
201int vm_page_max_wired; /* XXX max # of wired pages system-wide */
202
203#if !defined(NO_SWAPPING)
204typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int));
205static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t));

--- 298 unchanged lines hidden (view full) ---

504 if ((p->queue != PQ_ACTIVE) &&
505 (p->flags & PG_REFERENCED)) {
506 vm_page_activate(p);
507 p->act_count += actcount;
508 vm_page_flag_clear(p, PG_REFERENCED);
509 } else if (p->queue == PQ_ACTIVE) {
510 if ((p->flags & PG_REFERENCED) == 0) {
511 p->act_count -= min(p->act_count, ACT_DECLINE);
194#define VM_PAGEOUT_PAGE_COUNT 16
195int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
196
197int vm_page_max_wired; /* XXX max # of wired pages system-wide */
198
199#if !defined(NO_SWAPPING)
200typedef void freeer_fcn_t __P((vm_map_t, vm_object_t, vm_pindex_t, int));
201static void vm_pageout_map_deactivate_pages __P((vm_map_t, vm_pindex_t));

--- 298 unchanged lines hidden (view full) ---

500 if ((p->queue != PQ_ACTIVE) &&
501 (p->flags & PG_REFERENCED)) {
502 vm_page_activate(p);
503 p->act_count += actcount;
504 vm_page_flag_clear(p, PG_REFERENCED);
505 } else if (p->queue == PQ_ACTIVE) {
506 if ((p->flags & PG_REFERENCED) == 0) {
507 p->act_count -= min(p->act_count, ACT_DECLINE);
512 if (!remove_mode && (vm_pageout_algorithm_lru || (p->act_count == 0))) {
508 if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) {
513 vm_page_protect(p, VM_PROT_NONE);
514 vm_page_deactivate(p);
515 } else {
516 s = splvm();
517 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq);
518 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq);
519 splx(s);
520 }

--- 101 unchanged lines hidden (view full) ---

622 vm_page_free(m);
623 if (type == OBJT_SWAP || type == OBJT_DEFAULT)
624 vm_object_deallocate(object);
625}
626
627/*
628 * vm_pageout_scan does the dirty work for the pageout daemon.
629 */
509 vm_page_protect(p, VM_PROT_NONE);
510 vm_page_deactivate(p);
511 } else {
512 s = splvm();
513 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, p, pageq);
514 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, p, pageq);
515 splx(s);
516 }

--- 101 unchanged lines hidden (view full) ---

618 vm_page_free(m);
619 if (type == OBJT_SWAP || type == OBJT_DEFAULT)
620 vm_object_deallocate(object);
621}
622
623/*
624 * vm_pageout_scan does the dirty work for the pageout daemon.
625 */
630static int
631vm_pageout_scan()
626static void
627vm_pageout_scan(int pass)
632{
633 vm_page_t m, next;
634 struct vm_page marker;
628{
629 vm_page_t m, next;
630 struct vm_page marker;
631 int save_page_shortage;
632 int save_inactive_count;
635 int page_shortage, maxscan, pcount;
636 int addl_page_shortage, addl_page_shortage_init;
633 int page_shortage, maxscan, pcount;
634 int addl_page_shortage, addl_page_shortage_init;
637 int maxlaunder;
638 struct proc *p, *bigproc;
639 vm_offset_t size, bigsize;
640 vm_object_t object;
635 struct proc *p, *bigproc;
636 vm_offset_t size, bigsize;
637 vm_object_t object;
641 int force_wakeup = 0;
642 int actcount;
643 int vnodes_skipped = 0;
638 int actcount;
639 int vnodes_skipped = 0;
640 int maxlaunder;
644 int s;
645
646 /*
647 * Do whatever cleanup that the pmap code can.
648 */
649 pmap_collect();
650
651 addl_page_shortage_init = vm_pageout_deficit;
652 vm_pageout_deficit = 0;
653
641 int s;
642
643 /*
644 * Do whatever cleanup that the pmap code can.
645 */
646 pmap_collect();
647
648 addl_page_shortage_init = vm_pageout_deficit;
649 vm_pageout_deficit = 0;
650
654 if (max_page_launder == 0)
655 max_page_launder = 1;
656
657 /*
658 * Calculate the number of pages we want to either free or move
651 /*
652 * Calculate the number of pages we want to either free or move
659 * to the cache. Be more agressive if we aren't making our target.
653 * to the cache.
660 */
654 */
655 page_shortage = vm_paging_target() + addl_page_shortage_init;
656 save_page_shortage = page_shortage;
657 save_inactive_count = cnt.v_inactive_count;
661
658
662 page_shortage = vm_paging_target() +
663 addl_page_shortage_init + vm_pageout_actcmp;
664
665 /*
659 /*
666 * Figure out how agressively we should flush dirty pages.
667 */
668 {
669 int factor = vm_pageout_actcmp;
670
671 maxlaunder = cnt.v_inactive_target / 3 + factor;
672 if (maxlaunder > max_page_launder + factor)
673 maxlaunder = max_page_launder + factor;
674 }
675
676 /*
677 * Initialize our marker
678 */
679 bzero(&marker, sizeof(marker));
680 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
681 marker.queue = PQ_INACTIVE;
682 marker.wire_count = 1;
683
684 /*
685 * Start scanning the inactive queue for pages we can move to the
686 * cache or free. The scan will stop when the target is reached or
687 * we have scanned the entire inactive queue. Note that m->act_count
688 * is not used to form decisions for the inactive queue, only for the
689 * active queue.
660 * Initialize our marker
661 */
662 bzero(&marker, sizeof(marker));
663 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
664 marker.queue = PQ_INACTIVE;
665 marker.wire_count = 1;
666
667 /*
668 * Start scanning the inactive queue for pages we can move to the
669 * cache or free. The scan will stop when the target is reached or
670 * we have scanned the entire inactive queue. Note that m->act_count
671 * is not used to form decisions for the inactive queue, only for the
672 * active queue.
673 *
674 * maxlaunder limits the number of dirty pages we flush per scan.
675 * For most systems a smaller value (16 or 32) is more robust under
676 * extreme memory and disk pressure because any unnecessary writes
677 * to disk can result in extreme performance degredation. However,
678 * systems with excessive dirty pages (especially when MAP_NOSYNC is
679 * used) will die horribly with limited laundering. If the pageout
680 * daemon cannot clean enough pages in the first pass, we let it go
681 * all out in succeeding passes.
690 */
691
682 */
683
684 if ((maxlaunder = vm_max_launder) <= 1)
685 maxlaunder = 1;
686 if (pass)
687 maxlaunder = 10000;
688
692rescan0:
693 addl_page_shortage = addl_page_shortage_init;
694 maxscan = cnt.v_inactive_count;
695 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl);
696 m != NULL && maxscan-- > 0 && page_shortage > 0;
697 m = next) {
698
699 cnt.v_pdpages++;

--- 87 unchanged lines hidden (view full) ---

787
788 /*
789 * Clean pages can be placed onto the cache queue. This
790 * effectively frees them.
791 */
792 } else if (m->dirty == 0) {
793 vm_page_cache(m);
794 --page_shortage;
689rescan0:
690 addl_page_shortage = addl_page_shortage_init;
691 maxscan = cnt.v_inactive_count;
692 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl);
693 m != NULL && maxscan-- > 0 && page_shortage > 0;
694 m = next) {
695
696 cnt.v_pdpages++;

--- 87 unchanged lines hidden (view full) ---

784
785 /*
786 * Clean pages can be placed onto the cache queue. This
787 * effectively frees them.
788 */
789 } else if (m->dirty == 0) {
790 vm_page_cache(m);
791 --page_shortage;
795
796 /*
797 * Dirty pages need to be paged out. Note that we clean
798 * only a limited number of pages per pagedaemon pass.
799 */
792 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
793 /*
794 * Dirty pages need to be paged out, but flushing
795 * a page is extremely expensive verses freeing
796 * a clean page. Rather then artificially limiting
797 * the number of pages we can flush, we instead give
798 * dirty pages extra priority on the inactive queue
799 * by forcing them to be cycled through the queue
800 * twice before being flushed, after which the
801 * (now clean) page will cycle through once more
802 * before being freed. This significantly extends
803 * the thrash point for a heavily loaded machine.
804 */
805 s = splvm();
806 vm_page_flag_set(m, PG_WINATCFLS);
807 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
808 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
809 splx(s);
800 } else if (maxlaunder > 0) {
810 } else if (maxlaunder > 0) {
811 /*
812 * We always want to try to flush some dirty pages if
813 * we encounter them, to keep the system stable.
814 * Normally this number is small, but under extreme
815 * pressure where there are insufficient clean pages
816 * on the inactive queue, we may have to go all out.
817 */
801 int swap_pageouts_ok;
802 struct vnode *vp = NULL;
803 struct mount *mp;
804
805 object = m->object;
806
807 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
808 swap_pageouts_ok = 1;

--- 12 unchanged lines hidden (view full) ---

821 s = splvm();
822 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
823 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
824 splx(s);
825 continue;
826 }
827
828 /*
818 int swap_pageouts_ok;
819 struct vnode *vp = NULL;
820 struct mount *mp;
821
822 object = m->object;
823
824 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
825 swap_pageouts_ok = 1;

--- 12 unchanged lines hidden (view full) ---

838 s = splvm();
839 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
840 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
841 splx(s);
842 continue;
843 }
844
845 /*
829 * Presumably we have sufficient free memory to do
830 * the more sophisticated checks and locking required
831 * for vnodes.
846 * The object is already known NOT to be dead. It
847 * is possible for the vget() to block the whole
848 * pageout daemon, but the new low-memory handling
849 * code should prevent it.
832 *
850 *
833 * The object is already known NOT to be dead. The
834 * vget() may still block, though, because
835 * VOP_ISLOCKED() doesn't check to see if an inode
836 * (v_data) is associated with the vnode. If it isn't,
837 * vget() will load in it from disk. Worse, vget()
838 * may actually get stuck waiting on "inode" if another
839 * process is in the process of bringing the inode in.
840 * This is bad news for us either way.
851 * The previous code skipped locked vnodes and, worse,
852 * reordered pages in the queue. This results in
853 * completely non-deterministic operation and, on a
854 * busy system, can lead to extremely non-optimal
855 * pageouts. For example, it can cause clean pages
856 * to be freed and dirty pages to be moved to the end
857 * of the queue. Since dirty pages are also moved to
858 * the end of the queue once-cleaned, this gives
859 * way too large a weighting to defering the freeing
860 * of dirty pages.
841 *
861 *
842 * So for the moment we check v_data == NULL as a
843 * workaround. This means that vnodes which do not
844 * use v_data in the way we expect probably will not
845 * wind up being paged out by the pager and it will be
846 * up to the syncer to get them. That's better then
847 * us blocking here.
848 *
849 * This whole code section is bogus - we need to fix
850 * the vnode pager to handle vm_page_t's without us
851 * having to do any sophisticated VOP tests.
862 * XXX we need to be able to apply a timeout to the
863 * vget() lock attempt.
852 */
853
854 if (object->type == OBJT_VNODE) {
855 vp = object->handle;
856
857 mp = NULL;
858 if (vp->v_type == VREG)
859 vn_start_write(vp, &mp, V_NOWAIT);
864 */
865
866 if (object->type == OBJT_VNODE) {
867 vp = object->handle;
868
869 mp = NULL;
870 if (vp->v_type == VREG)
871 vn_start_write(vp, &mp, V_NOWAIT);
860 if (VOP_ISLOCKED(vp, NULL) ||
861 vp->v_data == NULL ||
862 vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) {
872 if (vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) {
863 vn_finished_write(mp);
873 vn_finished_write(mp);
864 if ((m->queue == PQ_INACTIVE) &&
865 (m->hold_count == 0) &&
866 (m->busy == 0) &&
867 (m->flags & PG_BUSY) == 0) {
868 s = splvm();
869 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
870 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
871 splx(s);
872 }
873 if (object->flags & OBJ_MIGHTBEDIRTY)
874 vnodes_skipped++;
875 continue;
876 }
877
878 /*
879 * The page might have been moved to another
880 * queue during potential blocking in vget()

--- 38 unchanged lines hidden (view full) ---

919 continue;
920 }
921 }
922
923 /*
924 * If a page is dirty, then it is either being washed
925 * (but not yet cleaned) or it is still in the
926 * laundry. If it is still in the laundry, then we
874 if (object->flags & OBJ_MIGHTBEDIRTY)
875 vnodes_skipped++;
876 continue;
877 }
878
879 /*
880 * The page might have been moved to another
881 * queue during potential blocking in vget()

--- 38 unchanged lines hidden (view full) ---

920 continue;
921 }
922 }
923
924 /*
925 * If a page is dirty, then it is either being washed
926 * (but not yet cleaned) or it is still in the
927 * laundry. If it is still in the laundry, then we
927 * start the cleaning operation. maxlaunder nominally
928 * counts I/O cost (seeks) rather then bytes.
928 * start the cleaning operation.
929 *
930 * This operation may cluster, invalidating the 'next'
931 * pointer. To prevent an inordinate number of
932 * restarts we use our marker to remember our place.
929 *
930 * This operation may cluster, invalidating the 'next'
931 * pointer. To prevent an inordinate number of
932 * restarts we use our marker to remember our place.
933 *
934 * decrement page_shortage on success to account for
935 * the (future) cleaned page. Otherwise we could wind
936 * up laundering or cleaning too many pages.
933 */
934 s = splvm();
935 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq);
936 splx(s);
937 */
938 s = splvm();
939 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq);
940 splx(s);
937 if (vm_pageout_clean(m) != 0)
941 if (vm_pageout_clean(m) != 0) {
942 --page_shortage;
938 --maxlaunder;
943 --maxlaunder;
944 }
939 s = splvm();
940 next = TAILQ_NEXT(&marker, pageq);
941 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq);
942 splx(s);
943 if (vp) {
944 vput(vp);
945 vn_finished_write(mp);
946 }
947 }
948 }
949
950 /*
945 s = splvm();
946 next = TAILQ_NEXT(&marker, pageq);
947 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq);
948 splx(s);
949 if (vp) {
950 vput(vp);
951 vn_finished_write(mp);
952 }
953 }
954 }
955
956 /*
951 * If we were not able to meet our target, increase actcmp
952 */
953
954 if (vm_page_count_min()) {
955 if (vm_pageout_actcmp < ACT_MAX / 2)
956 vm_pageout_actcmp += ACT_ADVANCE;
957 } else {
958 if (vm_pageout_actcmp < ACT_DECLINE)
959 vm_pageout_actcmp = 0;
960 else
961 vm_pageout_actcmp -= ACT_DECLINE;
962 }
963
964 /*
965 * Compute the number of pages we want to try to move from the
966 * active queue to the inactive queue.
967 */
957 * Compute the number of pages we want to try to move from the
958 * active queue to the inactive queue.
959 */
968
969 page_shortage = vm_paging_target() +
970 cnt.v_inactive_target - cnt.v_inactive_count;
971 page_shortage += addl_page_shortage;
960 page_shortage = vm_paging_target() +
961 cnt.v_inactive_target - cnt.v_inactive_count;
962 page_shortage += addl_page_shortage;
972 page_shortage += vm_pageout_actcmp;
973
974 /*
975 * Scan the active queue for things we can deactivate. We nominally
976 * track the per-page activity counter and use it to locate
977 * deactivation candidates.
978 */
979
980 pcount = cnt.v_active_count;

--- 57 unchanged lines hidden (view full) ---

1038 */
1039 if (actcount && (m->object->ref_count != 0)) {
1040 s = splvm();
1041 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1042 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1043 splx(s);
1044 } else {
1045 m->act_count -= min(m->act_count, ACT_DECLINE);
963
964 /*
965 * Scan the active queue for things we can deactivate. We nominally
966 * track the per-page activity counter and use it to locate
967 * deactivation candidates.
968 */
969
970 pcount = cnt.v_active_count;

--- 57 unchanged lines hidden (view full) ---

1028 */
1029 if (actcount && (m->object->ref_count != 0)) {
1030 s = splvm();
1031 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1032 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1033 splx(s);
1034 } else {
1035 m->act_count -= min(m->act_count, ACT_DECLINE);
1046 if (vm_pageout_algorithm_lru ||
1047 (m->object->ref_count == 0) ||
1048 (m->act_count <= vm_pageout_actcmp)) {
1036 if (vm_pageout_algorithm ||
1037 m->object->ref_count == 0 ||
1038 m->act_count == 0) {
1049 page_shortage--;
1050 if (m->object->ref_count == 0) {
1051 vm_page_protect(m, VM_PROT_NONE);
1052 if (m->dirty == 0)
1053 vm_page_cache(m);
1054 else
1055 vm_page_deactivate(m);
1056 } else {

--- 113 unchanged lines hidden (view full) ---

1170 if (bigproc != NULL) {
1171 killproc(bigproc, "out of swap space");
1172 bigproc->p_estcpu = 0;
1173 bigproc->p_nice = PRIO_MIN;
1174 resetpriority(bigproc);
1175 wakeup(&cnt.v_free_count);
1176 }
1177 }
1039 page_shortage--;
1040 if (m->object->ref_count == 0) {
1041 vm_page_protect(m, VM_PROT_NONE);
1042 if (m->dirty == 0)
1043 vm_page_cache(m);
1044 else
1045 vm_page_deactivate(m);
1046 } else {

--- 113 unchanged lines hidden (view full) ---

1160 if (bigproc != NULL) {
1161 killproc(bigproc, "out of swap space");
1162 bigproc->p_estcpu = 0;
1163 bigproc->p_nice = PRIO_MIN;
1164 resetpriority(bigproc);
1165 wakeup(&cnt.v_free_count);
1166 }
1167 }
1178 return force_wakeup;
1179}
1180
1181/*
1182 * This routine tries to maintain the pseudo LRU active queue,
1183 * so that during long periods of time where there is no paging,
1184 * that some statistic accumulation still occurs. This code
1185 * helps the situation where paging just starts to occur.
1186 */

--- 62 unchanged lines hidden (view full) ---

1249 m->act_count = ACT_MAX;
1250 s = splvm();
1251 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1252 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1253 splx(s);
1254 } else {
1255 if (m->act_count == 0) {
1256 /*
1168}
1169
1170/*
1171 * This routine tries to maintain the pseudo LRU active queue,
1172 * so that during long periods of time where there is no paging,
1173 * that some statistic accumulation still occurs. This code
1174 * helps the situation where paging just starts to occur.
1175 */

--- 62 unchanged lines hidden (view full) ---

1238 m->act_count = ACT_MAX;
1239 s = splvm();
1240 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1241 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1242 splx(s);
1243 } else {
1244 if (m->act_count == 0) {
1245 /*
1257 * We turn off page access, so that we have more accurate
1258 * RSS stats. We don't do this in the normal page deactivation
1259 * when the system is loaded VM wise, because the cost of
1260 * the large number of page protect operations would be higher
1261 * than the value of doing the operation.
1246 * We turn off page access, so that we have
1247 * more accurate RSS stats. We don't do this
1248 * in the normal page deactivation when the
1249 * system is loaded VM wise, because the
1250 * cost of the large number of page protect
1251 * operations would be higher than the value
1252 * of doing the operation.
1262 */
1263 vm_page_protect(m, VM_PROT_NONE);
1264 vm_page_deactivate(m);
1265 } else {
1266 m->act_count -= min(m->act_count, ACT_DECLINE);
1267 s = splvm();
1268 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1269 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);

--- 32 unchanged lines hidden (view full) ---

1302
1303
1304/*
1305 * vm_pageout is the high level pageout daemon.
1306 */
1307static void
1308vm_pageout()
1309{
1253 */
1254 vm_page_protect(m, VM_PROT_NONE);
1255 vm_page_deactivate(m);
1256 } else {
1257 m->act_count -= min(m->act_count, ACT_DECLINE);
1258 s = splvm();
1259 TAILQ_REMOVE(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1260 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);

--- 32 unchanged lines hidden (view full) ---

1293
1294
1295/*
1296 * vm_pageout is the high level pageout daemon.
1297 */
1298static void
1299vm_pageout()
1300{
1301 int pass;
1310
1311 mtx_enter(&Giant, MTX_DEF);
1312
1313 /*
1314 * Initialize some paging parameters.
1315 */
1316
1317 cnt.v_interrupt_free_min = 2;
1318 if (cnt.v_page_count < 2000)
1319 vm_pageout_page_count = 8;
1320
1321 vm_pageout_free_page_calc(cnt.v_page_count);
1322 /*
1302
1303 mtx_enter(&Giant, MTX_DEF);
1304
1305 /*
1306 * Initialize some paging parameters.
1307 */
1308
1309 cnt.v_interrupt_free_min = 2;
1310 if (cnt.v_page_count < 2000)
1311 vm_pageout_page_count = 8;
1312
1313 vm_pageout_free_page_calc(cnt.v_page_count);
1314 /*
1323 * free_reserved needs to include enough for the largest swap pager
1324 * structures plus enough for any pv_entry structs when paging.
1315 * v_free_target and v_cache_min control pageout hysteresis. Note
1316 * that these are more a measure of the VM cache queue hysteresis
1317 * then the VM free queue. Specifically, v_free_target is the
1318 * high water mark (free+cache pages).
1319 *
1320 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1321 * low water mark, while v_free_min is the stop. v_cache_min must
1322 * be big enough to handle memory needs while the pageout daemon
1323 * is signalled and run to free more pages.
1325 */
1326 if (cnt.v_free_count > 6144)
1324 */
1325 if (cnt.v_free_count > 6144)
1327 cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
1326 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved;
1328 else
1329 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
1330
1331 if (cnt.v_free_count > 2048) {
1332 cnt.v_cache_min = cnt.v_free_target;
1333 cnt.v_cache_max = 2 * cnt.v_cache_min;
1334 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
1335 } else {

--- 21 unchanged lines hidden (view full) ---

1357
1358
1359 /*
1360 * Set maximum free per pass
1361 */
1362 if (vm_pageout_stats_free_max == 0)
1363 vm_pageout_stats_free_max = 5;
1364
1327 else
1328 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
1329
1330 if (cnt.v_free_count > 2048) {
1331 cnt.v_cache_min = cnt.v_free_target;
1332 cnt.v_cache_max = 2 * cnt.v_cache_min;
1333 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
1334 } else {

--- 21 unchanged lines hidden (view full) ---

1356
1357
1358 /*
1359 * Set maximum free per pass
1360 */
1361 if (vm_pageout_stats_free_max == 0)
1362 vm_pageout_stats_free_max = 5;
1363
1365 max_page_launder = (cnt.v_page_count > 1800 ? 32 : 16);
1366
1367 curproc->p_flag |= P_BUFEXHAUST;
1368 swap_pager_swap_init();
1364 curproc->p_flag |= P_BUFEXHAUST;
1365 swap_pager_swap_init();
1366 pass = 0;
1369 /*
1370 * The pageout daemon is never done, so loop forever.
1371 */
1372 while (TRUE) {
1373 int error;
1374 int s = splvm();
1375
1376 /*

--- 4 unchanged lines hidden (view full) ---

1381 */
1382 if (vm_pages_needed && !vm_page_count_min()) {
1383 if (vm_paging_needed() <= 0)
1384 vm_pages_needed = 0;
1385 wakeup(&cnt.v_free_count);
1386 }
1387 if (vm_pages_needed) {
1388 /*
1367 /*
1368 * The pageout daemon is never done, so loop forever.
1369 */
1370 while (TRUE) {
1371 int error;
1372 int s = splvm();
1373
1374 /*

--- 4 unchanged lines hidden (view full) ---

1379 */
1380 if (vm_pages_needed && !vm_page_count_min()) {
1381 if (vm_paging_needed() <= 0)
1382 vm_pages_needed = 0;
1383 wakeup(&cnt.v_free_count);
1384 }
1385 if (vm_pages_needed) {
1386 /*
1389 * Still not done, sleep a bit and go again
1387 * Still not done, take a second pass without waiting
1388 * (unlimited dirty cleaning), otherwise sleep a bit
1389 * and try again.
1390 */
1390 */
1391 tsleep(&vm_pages_needed, PVM, "psleep", hz/2);
1391 ++pass;
1392 if (pass > 1)
1393 tsleep(&vm_pages_needed, PVM, "psleep", hz/2);
1392 } else {
1393 /*
1394 } else {
1395 /*
1394 * Good enough, sleep & handle stats
1396 * Good enough, sleep & handle stats. Prime the pass
1397 * for the next run.
1395 */
1398 */
1399 if (pass > 1)
1400 pass = 1;
1401 else
1402 pass = 0;
1396 error = tsleep(&vm_pages_needed,
1397 PVM, "psleep", vm_pageout_stats_interval * hz);
1398 if (error && !vm_pages_needed) {
1403 error = tsleep(&vm_pages_needed,
1404 PVM, "psleep", vm_pageout_stats_interval * hz);
1405 if (error && !vm_pages_needed) {
1399 if (vm_pageout_actcmp > 0)
1400 --vm_pageout_actcmp;
1401 splx(s);
1406 splx(s);
1407 pass = 0;
1402 vm_pageout_page_stats();
1403 continue;
1404 }
1405 }
1406
1407 if (vm_pages_needed)
1408 cnt.v_pdwakeups++;
1409 splx(s);
1408 vm_pageout_page_stats();
1409 continue;
1410 }
1411 }
1412
1413 if (vm_pages_needed)
1414 cnt.v_pdwakeups++;
1415 splx(s);
1410 vm_pageout_scan();
1416 vm_pageout_scan(pass);
1411 vm_pageout_deficit = 0;
1412 }
1413}
1414
1415void
1416pagedaemon_wakeup()
1417{
1418 if (!vm_pages_needed && curproc != pageproc) {

--- 81 unchanged lines hidden ---
1417 vm_pageout_deficit = 0;
1418 }
1419}
1420
1421void
1422pagedaemon_wakeup()
1423{
1424 if (!vm_pages_needed && curproc != pageproc) {

--- 81 unchanged lines hidden ---