179265Sdillon/*-
279265Sdillon * Copyright (c) 1994 John Dyson
379265Sdillon * Copyright (c) 2001 Matt Dillon
479265Sdillon *
5118848Simp * All Rights Reserved.
6118848Simp * Redistribution and use in source and binary forms, with or without
7118848Simp * modification, are permitted provided that the following conditions
8118848Simp * are met:
9118848Simp * 1. Redistributions of source code must retain the above copyright
10118848Simp *    notice, this list of conditions and the following disclaimer.
11118848Simp * 2. Redistributions in binary form must reproduce the above copyright
12118848Simp *    notice, this list of conditions and the following disclaimer in the
13118848Simp *    documentation and/or other materials provided with the distribution.
14118848Simp * 4. Neither the name of the University nor the names of its contributors
15118848Simp *    may be used to endorse or promote products derived from this software
16118848Simp *    without specific prior written permission.
1779265Sdillon *
18118848Simp * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19118848Simp * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20118848Simp * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21118848Simp * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
22118848Simp * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23118848Simp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
24118848Simp * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25118848Simp * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
26118848Simp * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27118848Simp * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28118848Simp * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29118848Simp *
3079265Sdillon *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
3179265Sdillon *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
32126588Sbde * from: FreeBSD: .../i386/vm_machdep.c,v 1.165 2001/07/04 23:27:04 dillon
3379265Sdillon */
3479265Sdillon
35116226Sobrien#include <sys/cdefs.h>
36116226Sobrien__FBSDID("$FreeBSD$");
37116226Sobrien
38134649Sscottl#include <opt_sched.h>
39134649Sscottl
4079265Sdillon#include <sys/param.h>
4179265Sdillon#include <sys/systm.h>
4282314Speter#include <sys/kernel.h>
4379265Sdillon#include <sys/proc.h>
4479265Sdillon#include <sys/vmmeter.h>
4582314Speter#include <sys/lock.h>
4679265Sdillon#include <sys/mutex.h>
47104964Sjeff#include <sys/sched.h>
4879265Sdillon#include <sys/sysctl.h>
4982314Speter#include <sys/kthread.h>
50125314Sjeff#include <sys/unistd.h>
5179265Sdillon
5279265Sdillon#include <vm/vm.h>
53254065Skib#include <vm/vm_param.h>
5479265Sdillon#include <vm/vm_page.h>
55170816Salc#include <vm/vm_phys.h>
5679265Sdillon
57170816Salcstatic int idlezero_enable_default = 0;
58134461SiedowseTUNABLE_INT("vm.idlezero_enable", &idlezero_enable_default);
59134461Siedowse/* Defer setting the enable flag until the kthread is running. */
60134461Siedowsestatic int idlezero_enable = 0;
61181239StrhodesSYSCTL_INT(_vm, OID_AUTO, idlezero_enable, CTLFLAG_RW, &idlezero_enable, 0,
62181239Strhodes    "Allow the kernel to use idle cpu cycles to zero-out pages");
6379265Sdillon/*
6479265Sdillon * Implement the pre-zeroed page mechanism.
6579265Sdillon */
6679265Sdillon
6779265Sdillon#define ZIDLE_LO(v)	((v) * 2 / 3)
6879265Sdillon#define ZIDLE_HI(v)	((v) * 4 / 5)
6979265Sdillon
70137104Salcstatic boolean_t wakeup_needed = FALSE;
7182314Speterstatic int zero_state;
7282314Speter
7382314Speterstatic int
7482314Spetervm_page_zero_check(void)
7579265Sdillon{
7679265Sdillon
7782314Speter	if (!idlezero_enable)
78126588Sbde		return (0);
7979265Sdillon	/*
8079265Sdillon	 * Attempt to maintain approximately 1/2 of our free pages in a
8179265Sdillon	 * PG_ZERO'd state.   Add some hysteresis to (attempt to) avoid
8279265Sdillon	 * generally zeroing a page when the system is near steady-state.
8379265Sdillon	 * Otherwise we might get 'flutter' during disk I/O / IPC or
8479265Sdillon	 * fast sleeps.  We also do not want to be continuously zeroing
8579265Sdillon	 * pages because doing so may flush our L1 and L2 caches too much.
8679265Sdillon	 */
87170170Sattilio	if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
88126588Sbde		return (0);
89170170Sattilio	if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
90126588Sbde		return (0);
91126588Sbde	return (1);
9282314Speter}
9379265Sdillon
94161489Salcstatic void
9582314Spetervm_page_zero_idle(void)
9682314Speter{
9782314Speter
98166637Salc	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
9982314Speter	zero_state = 0;
100170816Salc	if (vm_phys_zero_pages_idle()) {
101170170Sattilio		if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
10282314Speter			zero_state = 1;
10382314Speter	}
10482314Speter}
10582314Speter
106126588Sbde/* Called by vm_page_free to hint that a new page is available. */
10782314Spetervoid
10882314Spetervm_page_zero_idle_wakeup(void)
10982314Speter{
11082314Speter
111166637Salc	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
112137104Salc	if (wakeup_needed && vm_page_zero_check()) {
113137104Salc		wakeup_needed = FALSE;
11482314Speter		wakeup(&zero_state);
115137104Salc	}
11682314Speter}
11782314Speter
11882314Speterstatic void
119125314Sjeffvm_pagezero(void __unused *arg)
12082314Speter{
12182314Speter
122134461Siedowse	idlezero_enable = idlezero_enable_default;
12382314Speter
124166637Salc	mtx_lock(&vm_page_queue_free_mtx);
12582314Speter	for (;;) {
12682314Speter		if (vm_page_zero_check()) {
127137268Sjhb			vm_page_zero_idle();
128131481Sjhb#ifndef PREEMPTION
129137268Sjhb			if (sched_runnable()) {
130170307Sjeff				thread_lock(curthread);
131178272Sjeff				mi_switch(SW_VOL | SWT_IDLE, NULL);
132170307Sjeff				thread_unlock(curthread);
13382314Speter			}
134131481Sjhb#endif
13582314Speter		} else {
136137104Salc			wakeup_needed = TRUE;
137166637Salc			msleep(&zero_state, &vm_page_queue_free_mtx, 0,
138166637Salc			    "pgzero", hz * 300);
13979265Sdillon		}
14079265Sdillon	}
14179265Sdillon}
14279265Sdillon
143125314Sjeffstatic void
144125314Sjeffpagezero_start(void __unused *arg)
145125314Sjeff{
146125314Sjeff	int error;
147198854Sattilio	struct proc *p;
148141247Sssouhlal	struct thread *td;
149125314Sjeff
150198854Sattilio	error = kproc_create(vm_pagezero, NULL, &p, RFSTOPPED, 0, "pagezero");
151125314Sjeff	if (error)
152125314Sjeff		panic("pagezero_start: error %d\n", error);
153198854Sattilio	td = FIRST_THREAD_IN_PROC(p);
154170307Sjeff	thread_lock(td);
155198854Sattilio
156198854Sattilio	/* We're an idle task, don't count us in the load. */
157198854Sattilio	td->td_flags |= TDF_NOLOAD;
158163709Sjb	sched_class(td, PRI_IDLE);
159141247Sssouhlal	sched_prio(td, PRI_MAX_IDLE);
160166188Sjeff	sched_add(td, SRQ_BORING);
161170307Sjeff	thread_unlock(td);
162125314Sjeff}
163177253SrwatsonSYSINIT(pagezero, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, pagezero_start, NULL);
164