vm_zeroidle.c revision 100193
1/*-
2 * Copyright (c) 1994 John Dyson
3 * Copyright (c) 2001 Matt Dillon
4 *
5 * All rights reserved.  Terms for use and redistribution
6 * are covered by the BSD Copyright as found in /usr/src/COPYRIGHT.
7 *
8 *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
9 *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
10 * $FreeBSD: head/sys/vm/vm_zeroidle.c 100193 2002-07-16 19:39:40Z alc $
11 */
12
13#include <sys/param.h>
14#include <sys/systm.h>
15#include <sys/kernel.h>
16#include <sys/proc.h>
17#include <sys/resourcevar.h>
18#include <sys/vmmeter.h>
19#include <sys/lock.h>
20#include <sys/mutex.h>
21#include <sys/sysctl.h>
22#include <sys/kthread.h>
23
24#include <vm/vm.h>
25#include <vm/vm_page.h>
26
27SYSCTL_DECL(_vm_stats_misc);
28
29static int cnt_prezero;
30SYSCTL_INT(_vm_stats_misc, OID_AUTO,
31	cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, "");
32
33static int idlezero_enable = 1;
34SYSCTL_INT(_vm, OID_AUTO, idlezero_enable, CTLFLAG_RW, &idlezero_enable, 0, "");
35TUNABLE_INT("vm.idlezero_enable", &idlezero_enable);
36
37static int idlezero_maxrun = 16;
38SYSCTL_INT(_vm, OID_AUTO, idlezero_maxrun, CTLFLAG_RW, &idlezero_maxrun, 0, "");
39TUNABLE_INT("vm.idlezero_maxrun", &idlezero_maxrun);
40
41/*
42 * Implement the pre-zeroed page mechanism.
43 */
44
45#define ZIDLE_LO(v)	((v) * 2 / 3)
46#define ZIDLE_HI(v)	((v) * 4 / 5)
47
48static int zero_state;
49
50static int
51vm_page_zero_check(void)
52{
53
54	if (!idlezero_enable)
55		return 0;
56	/*
57	 * Attempt to maintain approximately 1/2 of our free pages in a
58	 * PG_ZERO'd state.   Add some hysteresis to (attempt to) avoid
59	 * generally zeroing a page when the system is near steady-state.
60	 * Otherwise we might get 'flutter' during disk I/O / IPC or
61	 * fast sleeps.  We also do not want to be continuously zeroing
62	 * pages because doing so may flush our L1 and L2 caches too much.
63	 */
64	if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
65		return 0;
66	if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
67		return 0;
68	return 1;
69}
70
71static int
72vm_page_zero_idle(void)
73{
74	static int free_rover;
75	vm_page_t m;
76
77	mtx_lock(&Giant);
78	mtx_lock_spin(&vm_page_queue_free_mtx);
79	zero_state = 0;
80	m = vm_pageq_find(PQ_FREE, free_rover, FALSE);
81	if (m != NULL && (m->flags & PG_ZERO) == 0) {
82		vm_pageq_remove_nowakeup(m);
83		mtx_unlock_spin(&vm_page_queue_free_mtx);
84		mtx_unlock(&Giant);
85		pmap_zero_page_idle(m);
86		mtx_lock(&Giant);
87		mtx_lock_spin(&vm_page_queue_free_mtx);
88		vm_page_flag_set(m, PG_ZERO);
89		vm_pageq_enqueue(PQ_FREE + m->pc, m);
90		++vm_page_zero_count;
91		++cnt_prezero;
92		if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
93			zero_state = 1;
94	}
95	free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
96	mtx_unlock_spin(&vm_page_queue_free_mtx);
97	mtx_unlock(&Giant);
98	return 1;
99}
100
101
102/* Called by vm_page_free to hint that a new page is available */
103void
104vm_page_zero_idle_wakeup(void)
105{
106
107	if (idlezero_enable && vm_page_zero_check())
108		wakeup(&zero_state);
109}
110
111static void
112vm_pagezero(void)
113{
114	struct thread *td = curthread;
115	struct rtprio rtp;
116	int pages = 0;
117	int pri;
118
119	rtp.prio = RTP_PRIO_MAX;
120	rtp.type = RTP_PRIO_IDLE;
121	mtx_lock_spin(&sched_lock);
122	rtp_to_pri(&rtp, td->td_ksegrp);
123	pri = td->td_priority;
124	mtx_unlock_spin(&sched_lock);
125
126	for (;;) {
127		if (vm_page_zero_check()) {
128			pages += vm_page_zero_idle();
129			if (pages > idlezero_maxrun || kserunnable()) {
130				mtx_lock_spin(&sched_lock);
131				td->td_proc->p_stats->p_ru.ru_nvcsw++;
132				mi_switch();
133				mtx_unlock_spin(&sched_lock);
134				pages = 0;
135			}
136		} else {
137			tsleep(&zero_state, pri, "pgzero", hz * 300);
138			pages = 0;
139		}
140	}
141}
142
143static struct proc *pagezero;
144static struct kproc_desc pagezero_kp = {
145	 "pagezero",
146	 vm_pagezero,
147	 &pagezero
148};
149SYSINIT(pagezero, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, kproc_start, &pagezero_kp)
150