vm_zeroidle.c revision 79265
1/*- 2 * Copyright (c) 1994 John Dyson 3 * Copyright (c) 2001 Matt Dillon 4 * 5 * All rights reserved. Terms for use and redistribution 6 * are covered by the BSD Copyright as found in /usr/src/COPYRIGHT. 7 * 8 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 9 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 10 * $FreeBSD: head/sys/vm/vm_zeroidle.c 79265 2001-07-05 01:32:42Z dillon $ 11 */ 12 13#include "opt_npx.h" 14#ifdef PC98 15#include "opt_pc98.h" 16#endif 17#include "opt_reset.h" 18#include "opt_isa.h" 19 20#include <sys/param.h> 21#include <sys/systm.h> 22#include <sys/malloc.h> 23#include <sys/proc.h> 24#include <sys/bio.h> 25#include <sys/buf.h> 26#include <sys/vnode.h> 27#include <sys/vmmeter.h> 28#include <sys/kernel.h> 29#include <sys/ktr.h> 30#include <sys/mutex.h> 31#include <sys/smp.h> 32#include <sys/sysctl.h> 33#include <sys/unistd.h> 34 35#include <machine/cpu.h> 36#include <machine/md_var.h> 37#include <machine/pcb.h> 38#include <machine/pcb_ext.h> 39#include <machine/vm86.h> 40 41#include <vm/vm.h> 42#include <vm/vm_param.h> 43#include <sys/lock.h> 44#include <vm/vm_kern.h> 45#include <vm/vm_page.h> 46#include <vm/vm_map.h> 47#include <vm/vm_extern.h> 48 49#include <sys/user.h> 50 51#ifdef PC98 52#include <pc98/pc98/pc98.h> 53#else 54#include <i386/isa/isa.h> 55#endif 56 57SYSCTL_DECL(_vm_stats_misc); 58 59static int cnt_prezero; 60 61SYSCTL_INT(_vm_stats_misc, OID_AUTO, 62 cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); 63 64/* 65 * Implement the pre-zeroed page mechanism. 66 * This routine is called from the idle loop. 67 */ 68 69#define ZIDLE_LO(v) ((v) * 2 / 3) 70#define ZIDLE_HI(v) ((v) * 4 / 5) 71 72int 73vm_page_zero_idle(void) 74{ 75 static int free_rover; 76 static int zero_state; 77 vm_page_t m; 78 79 /* 80 * Attempt to maintain approximately 1/2 of our free pages in a 81 * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid 82 * generally zeroing a page when the system is near steady-state. 83 * Otherwise we might get 'flutter' during disk I/O / IPC or 84 * fast sleeps. We also do not want to be continuously zeroing 85 * pages because doing so may flush our L1 and L2 caches too much. 86 */ 87 88 if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) 89 return(0); 90 if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) 91 return(0); 92 93 if (mtx_trylock(&Giant)) { 94 zero_state = 0; 95 m = vm_pageq_find(PQ_FREE, free_rover, FALSE); 96 if (m != NULL && (m->flags & PG_ZERO) == 0) { 97 vm_page_queues[m->queue].lcnt--; 98 TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); 99 m->queue = PQ_NONE; 100 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 101 vm_page_flag_set(m, PG_ZERO); 102 m->queue = PQ_FREE + m->pc; 103 vm_page_queues[m->queue].lcnt++; 104 TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, 105 pageq); 106 ++vm_page_zero_count; 107 ++cnt_prezero; 108 if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) 109 zero_state = 1; 110 } 111 free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; 112 mtx_unlock(&Giant); 113 return (1); 114 } 115 return(0); 116} 117 118