Deleted Added
full compact
vm_page.c (92029) vm_page.c (92654)
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
37 * $FreeBSD: head/sys/vm/vm_page.c 92029 2002-03-10 21:52:48Z eivind $
37 * $FreeBSD: head/sys/vm/vm_page.c 92654 2002-03-19 09:11:49Z jeff $
38 */
39
40/*
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
59 * School of Computer Science
60 * Carnegie Mellon University
61 * Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 */
66
67/*
68 * GENERAL RULES ON VM_PAGE MANIPULATION
69 *
70 * - a pageq mutex is required when adding or removing a page from a
71 * page queue (vm_page_queue[]), regardless of other mutexes or the
72 * busy state of a page.
73 *
74 * - a hash chain mutex is required when associating or disassociating
75 * a page from the VM PAGE CACHE hash table (vm_page_buckets),
76 * regardless of other mutexes or the busy state of a page.
77 *
78 * - either a hash chain mutex OR a busied page is required in order
79 * to modify the page flags. A hash chain mutex must be obtained in
80 * order to busy a page. A page's flags cannot be modified by a
81 * hash chain mutex if the page is marked busy.
82 *
83 * - The object memq mutex is held when inserting or removing
84 * pages from an object (vm_page_insert() or vm_page_remove()). This
85 * is different from the object's main mutex.
86 *
87 * Generally speaking, you have to be aware of side effects when running
88 * vm_page ops. A vm_page_lookup() will return with the hash chain
89 * locked, whether it was able to lookup the page or not. vm_page_free(),
90 * vm_page_cache(), vm_page_activate(), and a number of other routines
91 * will release the hash chain mutex for you. Intermediate manipulation
92 * routines such as vm_page_flag_set() expect the hash chain to be held
93 * on entry and the hash chain will remain held on return.
94 *
95 * pageq scanning can only occur with the pageq in question locked.
96 * We have a known bottleneck with the active queue, but the cache
97 * and free queues are actually arrays already.
98 */
99
100/*
101 * Resident memory management module.
102 */
103
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/lock.h>
107#include <sys/malloc.h>
108#include <sys/mutex.h>
109#include <sys/proc.h>
110#include <sys/vmmeter.h>
111#include <sys/vnode.h>
112
113#include <vm/vm.h>
114#include <vm/vm_param.h>
115#include <vm/vm_kern.h>
116#include <vm/vm_object.h>
117#include <vm/vm_page.h>
118#include <vm/vm_pageout.h>
119#include <vm/vm_pager.h>
120#include <vm/vm_extern.h>
38 */
39
40/*
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
59 * School of Computer Science
60 * Carnegie Mellon University
61 * Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 */
66
67/*
68 * GENERAL RULES ON VM_PAGE MANIPULATION
69 *
70 * - a pageq mutex is required when adding or removing a page from a
71 * page queue (vm_page_queue[]), regardless of other mutexes or the
72 * busy state of a page.
73 *
74 * - a hash chain mutex is required when associating or disassociating
75 * a page from the VM PAGE CACHE hash table (vm_page_buckets),
76 * regardless of other mutexes or the busy state of a page.
77 *
78 * - either a hash chain mutex OR a busied page is required in order
79 * to modify the page flags. A hash chain mutex must be obtained in
80 * order to busy a page. A page's flags cannot be modified by a
81 * hash chain mutex if the page is marked busy.
82 *
83 * - The object memq mutex is held when inserting or removing
84 * pages from an object (vm_page_insert() or vm_page_remove()). This
85 * is different from the object's main mutex.
86 *
87 * Generally speaking, you have to be aware of side effects when running
88 * vm_page ops. A vm_page_lookup() will return with the hash chain
89 * locked, whether it was able to lookup the page or not. vm_page_free(),
90 * vm_page_cache(), vm_page_activate(), and a number of other routines
91 * will release the hash chain mutex for you. Intermediate manipulation
92 * routines such as vm_page_flag_set() expect the hash chain to be held
93 * on entry and the hash chain will remain held on return.
94 *
95 * pageq scanning can only occur with the pageq in question locked.
96 * We have a known bottleneck with the active queue, but the cache
97 * and free queues are actually arrays already.
98 */
99
100/*
101 * Resident memory management module.
102 */
103
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/lock.h>
107#include <sys/malloc.h>
108#include <sys/mutex.h>
109#include <sys/proc.h>
110#include <sys/vmmeter.h>
111#include <sys/vnode.h>
112
113#include <vm/vm.h>
114#include <vm/vm_param.h>
115#include <vm/vm_kern.h>
116#include <vm/vm_object.h>
117#include <vm/vm_page.h>
118#include <vm/vm_pageout.h>
119#include <vm/vm_pager.h>
120#include <vm/vm_extern.h>
121#include <vm/uma.h>
122#include <vm/uma_int.h>
121
122/*
123 * Associated with page of user-allocatable memory is a
124 * page structure.
125 */
126static struct vm_page **vm_page_buckets; /* Array of buckets */
127static int vm_page_bucket_count; /* How big is array? */
128static int vm_page_hash_mask; /* Mask for hash function */
129static volatile int vm_page_bucket_generation;
130static struct mtx vm_buckets_mtx[BUCKET_HASH_SIZE];
131
132vm_page_t vm_page_array = 0;
133int vm_page_array_size = 0;
134long first_page = 0;
135int vm_page_zero_count = 0;
136
137/*
138 * vm_set_page_size:
139 *
140 * Sets the page size, perhaps based upon the memory
141 * size. Must be called before any use of page-size
142 * dependent functions.
143 */
144void
145vm_set_page_size(void)
146{
147 if (cnt.v_page_size == 0)
148 cnt.v_page_size = PAGE_SIZE;
149 if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
150 panic("vm_set_page_size: page size not a power of two");
151}
152
153/*
154 * vm_page_startup:
155 *
156 * Initializes the resident memory module.
157 *
158 * Allocates memory for the page cells, and
159 * for the object/offset-to-page hash table headers.
160 * Each page cell is initialized and placed on the free list.
161 */
162vm_offset_t
163vm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr)
164{
165 vm_offset_t mapped;
166 struct vm_page **bucket;
167 vm_size_t npages, page_range;
168 vm_offset_t new_end;
169 int i;
170 vm_offset_t pa;
171 int nblocks;
172 vm_offset_t last_pa;
173
174 /* the biggest memory array is the second group of pages */
175 vm_offset_t end;
176 vm_offset_t biggestone, biggestsize;
177
178 vm_offset_t total;
123
124/*
125 * Associated with page of user-allocatable memory is a
126 * page structure.
127 */
128static struct vm_page **vm_page_buckets; /* Array of buckets */
129static int vm_page_bucket_count; /* How big is array? */
130static int vm_page_hash_mask; /* Mask for hash function */
131static volatile int vm_page_bucket_generation;
132static struct mtx vm_buckets_mtx[BUCKET_HASH_SIZE];
133
134vm_page_t vm_page_array = 0;
135int vm_page_array_size = 0;
136long first_page = 0;
137int vm_page_zero_count = 0;
138
139/*
140 * vm_set_page_size:
141 *
142 * Sets the page size, perhaps based upon the memory
143 * size. Must be called before any use of page-size
144 * dependent functions.
145 */
146void
147vm_set_page_size(void)
148{
149 if (cnt.v_page_size == 0)
150 cnt.v_page_size = PAGE_SIZE;
151 if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
152 panic("vm_set_page_size: page size not a power of two");
153}
154
155/*
156 * vm_page_startup:
157 *
158 * Initializes the resident memory module.
159 *
160 * Allocates memory for the page cells, and
161 * for the object/offset-to-page hash table headers.
162 * Each page cell is initialized and placed on the free list.
163 */
164vm_offset_t
165vm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr)
166{
167 vm_offset_t mapped;
168 struct vm_page **bucket;
169 vm_size_t npages, page_range;
170 vm_offset_t new_end;
171 int i;
172 vm_offset_t pa;
173 int nblocks;
174 vm_offset_t last_pa;
175
176 /* the biggest memory array is the second group of pages */
177 vm_offset_t end;
178 vm_offset_t biggestone, biggestsize;
179
180 vm_offset_t total;
181 vm_size_t bootpages;
179
180 total = 0;
181 biggestsize = 0;
182 biggestone = 0;
183 nblocks = 0;
184 vaddr = round_page(vaddr);
185
186 for (i = 0; phys_avail[i + 1]; i += 2) {
187 phys_avail[i] = round_page(phys_avail[i]);
188 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
189 }
190
191 for (i = 0; phys_avail[i + 1]; i += 2) {
192 int size = phys_avail[i + 1] - phys_avail[i];
193
194 if (size > biggestsize) {
195 biggestone = i;
196 biggestsize = size;
197 }
198 ++nblocks;
199 total += size;
200 }
201
202 end = phys_avail[biggestone+1];
203
204 /*
205 * Initialize the queue headers for the free queue, the active queue
206 * and the inactive queue.
207 */
208 vm_pageq_init();
209
210 /*
182
183 total = 0;
184 biggestsize = 0;
185 biggestone = 0;
186 nblocks = 0;
187 vaddr = round_page(vaddr);
188
189 for (i = 0; phys_avail[i + 1]; i += 2) {
190 phys_avail[i] = round_page(phys_avail[i]);
191 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
192 }
193
194 for (i = 0; phys_avail[i + 1]; i += 2) {
195 int size = phys_avail[i + 1] - phys_avail[i];
196
197 if (size > biggestsize) {
198 biggestone = i;
199 biggestsize = size;
200 }
201 ++nblocks;
202 total += size;
203 }
204
205 end = phys_avail[biggestone+1];
206
207 /*
208 * Initialize the queue headers for the free queue, the active queue
209 * and the inactive queue.
210 */
211 vm_pageq_init();
212
213 /*
214 * Allocate memory for use when boot strapping the kernel memory allocator
215 */
216 bootpages = UMA_BOOT_PAGES * UMA_SLAB_SIZE;
217 new_end = end - bootpages;
218 new_end = trunc_page(new_end);
219 mapped = pmap_map(&vaddr, new_end, end,
220 VM_PROT_READ | VM_PROT_WRITE);
221 bzero((caddr_t) mapped, end - new_end);
222 uma_startup((caddr_t)mapped);
223
224 end = new_end;
225
226 /*
211 * Allocate (and initialize) the hash table buckets.
212 *
213 * The number of buckets MUST BE a power of 2, and the actual value is
214 * the next power of 2 greater than the number of physical pages in
215 * the system.
216 *
217 * We make the hash table approximately 2x the number of pages to
218 * reduce the chain length. This is about the same size using the
219 * singly-linked list as the 1x hash table we were using before
220 * using TAILQ but the chain length will be smaller.
221 *
222 * Note: This computation can be tweaked if desired.
223 */
224 if (vm_page_bucket_count == 0) {
225 vm_page_bucket_count = 1;
226 while (vm_page_bucket_count < atop(total))
227 vm_page_bucket_count <<= 1;
228 }
229 vm_page_bucket_count <<= 1;
230 vm_page_hash_mask = vm_page_bucket_count - 1;
231
232 /*
233 * Validate these addresses.
234 */
235 new_end = end - vm_page_bucket_count * sizeof(struct vm_page *);
236 new_end = trunc_page(new_end);
237 mapped = pmap_map(&vaddr, new_end, end,
238 VM_PROT_READ | VM_PROT_WRITE);
239 bzero((caddr_t) mapped, end - new_end);
240
241 vm_page_buckets = (struct vm_page **)mapped;
242 bucket = vm_page_buckets;
243 for (i = 0; i < vm_page_bucket_count; i++) {
244 *bucket = NULL;
245 bucket++;
246 }
247 for (i = 0; i < BUCKET_HASH_SIZE; ++i)
248 mtx_init(&vm_buckets_mtx[i], "vm buckets hash mutexes", MTX_DEF);
249
250 /*
251 * Compute the number of pages of memory that will be available for
252 * use (taking into account the overhead of a page structure per
253 * page).
254 */
255 first_page = phys_avail[0] / PAGE_SIZE;
256 page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
257 npages = (total - (page_range * sizeof(struct vm_page)) -
258 (end - new_end)) / PAGE_SIZE;
259 end = new_end;
260
261 /*
262 * Initialize the mem entry structures now, and put them in the free
263 * queue.
264 */
265 new_end = trunc_page(end - page_range * sizeof(struct vm_page));
266 mapped = pmap_map(&vaddr, new_end, end,
267 VM_PROT_READ | VM_PROT_WRITE);
268 vm_page_array = (vm_page_t) mapped;
269
270 /*
271 * Clear all of the page structures
272 */
273 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
274 vm_page_array_size = page_range;
275
276 /*
277 * Construct the free queue(s) in descending order (by physical
278 * address) so that the first 16MB of physical memory is allocated
279 * last rather than first. On large-memory machines, this avoids
280 * the exhaustion of low physical memory before isa_dmainit has run.
281 */
282 cnt.v_page_count = 0;
283 cnt.v_free_count = 0;
284 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
285 pa = phys_avail[i];
286 if (i == biggestone)
287 last_pa = new_end;
288 else
289 last_pa = phys_avail[i + 1];
290 while (pa < last_pa && npages-- > 0) {
291 vm_pageq_add_new_page(pa);
292 pa += PAGE_SIZE;
293 }
294 }
295 return (vaddr);
296}
297
298/*
299 * vm_page_hash:
300 *
301 * Distributes the object/offset key pair among hash buckets.
302 *
303 * NOTE: This macro depends on vm_page_bucket_count being a power of 2.
304 * This routine may not block.
305 *
306 * We try to randomize the hash based on the object to spread the pages
307 * out in the hash table without it costing us too much.
308 */
309static __inline int
310vm_page_hash(vm_object_t object, vm_pindex_t pindex)
311{
312 int i = ((uintptr_t)object + pindex) ^ object->hash_rand;
313
314 return (i & vm_page_hash_mask);
315}
316
317void
318vm_page_flag_set(vm_page_t m, unsigned short bits)
319{
320 GIANT_REQUIRED;
321 m->flags |= bits;
322}
323
324void
325vm_page_flag_clear(vm_page_t m, unsigned short bits)
326{
327 GIANT_REQUIRED;
328 m->flags &= ~bits;
329}
330
331void
332vm_page_busy(vm_page_t m)
333{
334 KASSERT((m->flags & PG_BUSY) == 0,
335 ("vm_page_busy: page already busy!!!"));
336 vm_page_flag_set(m, PG_BUSY);
337}
338
339/*
340 * vm_page_flash:
341 *
342 * wakeup anyone waiting for the page.
343 */
344void
345vm_page_flash(vm_page_t m)
346{
347 if (m->flags & PG_WANTED) {
348 vm_page_flag_clear(m, PG_WANTED);
349 wakeup(m);
350 }
351}
352
353/*
354 * vm_page_wakeup:
355 *
356 * clear the PG_BUSY flag and wakeup anyone waiting for the
357 * page.
358 *
359 */
360void
361vm_page_wakeup(vm_page_t m)
362{
363 KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
364 vm_page_flag_clear(m, PG_BUSY);
365 vm_page_flash(m);
366}
367
368/*
369 *
370 *
371 */
372void
373vm_page_io_start(vm_page_t m)
374{
375 GIANT_REQUIRED;
376 m->busy++;
377}
378
379void
380vm_page_io_finish(vm_page_t m)
381{
382 GIANT_REQUIRED;
383 m->busy--;
384 if (m->busy == 0)
385 vm_page_flash(m);
386}
387
388/*
389 * Keep page from being freed by the page daemon
390 * much of the same effect as wiring, except much lower
391 * overhead and should be used only for *very* temporary
392 * holding ("wiring").
393 */
394void
395vm_page_hold(vm_page_t mem)
396{
397 GIANT_REQUIRED;
398 mem->hold_count++;
399}
400
401void
402vm_page_unhold(vm_page_t mem)
403{
404 GIANT_REQUIRED;
405 --mem->hold_count;
406 KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
407 if (mem->hold_count == 0 && mem->queue == PQ_HOLD)
408 vm_page_free_toq(mem);
409}
410
411/*
412 * vm_page_protect:
413 *
414 * Reduce the protection of a page. This routine never raises the
415 * protection and therefore can be safely called if the page is already
416 * at VM_PROT_NONE (it will be a NOP effectively ).
417 */
418void
419vm_page_protect(vm_page_t mem, int prot)
420{
421 if (prot == VM_PROT_NONE) {
422 if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
423 pmap_page_protect(mem, VM_PROT_NONE);
424 vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED);
425 }
426 } else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
427 pmap_page_protect(mem, VM_PROT_READ);
428 vm_page_flag_clear(mem, PG_WRITEABLE);
429 }
430}
431/*
432 * vm_page_zero_fill:
433 *
434 * Zero-fill the specified page.
435 * Written as a standard pagein routine, to
436 * be used by the zero-fill object.
437 */
438boolean_t
439vm_page_zero_fill(vm_page_t m)
440{
441 pmap_zero_page(VM_PAGE_TO_PHYS(m));
442 return (TRUE);
443}
444
445/*
446 * vm_page_copy:
447 *
448 * Copy one page to another
449 */
450void
451vm_page_copy(vm_page_t src_m, vm_page_t dest_m)
452{
453 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
454 dest_m->valid = VM_PAGE_BITS_ALL;
455}
456
457/*
458 * vm_page_free:
459 *
460 * Free a page
461 *
462 * The clearing of PG_ZERO is a temporary safety until the code can be
463 * reviewed to determine that PG_ZERO is being properly cleared on
464 * write faults or maps. PG_ZERO was previously cleared in
465 * vm_page_alloc().
466 */
467void
468vm_page_free(vm_page_t m)
469{
470 vm_page_flag_clear(m, PG_ZERO);
471 vm_page_free_toq(m);
472 vm_page_zero_idle_wakeup();
473}
474
475/*
476 * vm_page_free_zero:
477 *
478 * Free a page to the zerod-pages queue
479 */
480void
481vm_page_free_zero(vm_page_t m)
482{
483 vm_page_flag_set(m, PG_ZERO);
484 vm_page_free_toq(m);
485}
486
487/*
488 * vm_page_sleep_busy:
489 *
490 * Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE)
491 * m->busy is zero. Returns TRUE if it had to sleep ( including if
492 * it almost had to sleep and made temporary spl*() mods), FALSE
493 * otherwise.
494 *
495 * This routine assumes that interrupts can only remove the busy
496 * status from a page, not set the busy status or change it from
497 * PG_BUSY to m->busy or vise versa (which would create a timing
498 * window).
499 */
500int
501vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
502{
503 GIANT_REQUIRED;
504 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
505 int s = splvm();
506 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
507 /*
508 * Page is busy. Wait and retry.
509 */
510 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
511 tsleep(m, PVM, msg, 0);
512 }
513 splx(s);
514 return (TRUE);
515 /* not reached */
516 }
517 return (FALSE);
518}
519/*
520 * vm_page_dirty:
521 *
522 * make page all dirty
523 */
524void
525vm_page_dirty(vm_page_t m)
526{
527 KASSERT(m->queue - m->pc != PQ_CACHE,
528 ("vm_page_dirty: page in cache!"));
529 m->dirty = VM_PAGE_BITS_ALL;
530}
531
532/*
533 * vm_page_undirty:
534 *
535 * Set page to not be dirty. Note: does not clear pmap modify bits
536 */
537void
538vm_page_undirty(vm_page_t m)
539{
540 m->dirty = 0;
541}
542
543/*
544 * vm_page_insert: [ internal use only ]
545 *
546 * Inserts the given mem entry into the object and object list.
547 *
548 * The pagetables are not updated but will presumably fault the page
549 * in if necessary, or if a kernel page the caller will at some point
550 * enter the page into the kernel's pmap. We are not allowed to block
551 * here so we *can't* do this anyway.
552 *
553 * The object and page must be locked, and must be splhigh.
554 * This routine may not block.
555 */
556void
557vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
558{
559 struct vm_page **bucket;
560
561 GIANT_REQUIRED;
562
563 if (m->object != NULL)
564 panic("vm_page_insert: already inserted");
565
566 /*
567 * Record the object/offset pair in this page
568 */
569 m->object = object;
570 m->pindex = pindex;
571
572 /*
573 * Insert it into the object_object/offset hash table
574 */
575 bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
576 m->hnext = *bucket;
577 *bucket = m;
578 vm_page_bucket_generation++;
579
580 /*
581 * Now link into the object's list of backed pages.
582 */
583 TAILQ_INSERT_TAIL(&object->memq, m, listq);
584 object->generation++;
585
586 /*
587 * show that the object has one more resident page.
588 */
589 object->resident_page_count++;
590
591 /*
592 * Since we are inserting a new and possibly dirty page,
593 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
594 */
595 if (m->flags & PG_WRITEABLE)
596 vm_object_set_writeable_dirty(object);
597}
598
599/*
600 * vm_page_remove:
601 * NOTE: used by device pager as well -wfj
602 *
603 * Removes the given mem entry from the object/offset-page
604 * table and the object page list, but do not invalidate/terminate
605 * the backing store.
606 *
607 * The object and page must be locked, and at splhigh.
608 * The underlying pmap entry (if any) is NOT removed here.
609 * This routine may not block.
610 */
611void
612vm_page_remove(vm_page_t m)
613{
614 vm_object_t object;
615
616 GIANT_REQUIRED;
617
618 if (m->object == NULL)
619 return;
620
621 if ((m->flags & PG_BUSY) == 0) {
622 panic("vm_page_remove: page not busy");
623 }
624
625 /*
626 * Basically destroy the page.
627 */
628 vm_page_wakeup(m);
629
630 object = m->object;
631
632 /*
633 * Remove from the object_object/offset hash table. The object
634 * must be on the hash queue, we will panic if it isn't
635 *
636 * Note: we must NULL-out m->hnext to prevent loops in detached
637 * buffers with vm_page_lookup().
638 */
639 {
640 struct vm_page **bucket;
641
642 bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)];
643 while (*bucket != m) {
644 if (*bucket == NULL)
645 panic("vm_page_remove(): page not found in hash");
646 bucket = &(*bucket)->hnext;
647 }
648 *bucket = m->hnext;
649 m->hnext = NULL;
650 vm_page_bucket_generation++;
651 }
652
653 /*
654 * Now remove from the object's list of backed pages.
655 */
656 TAILQ_REMOVE(&object->memq, m, listq);
657
658 /*
659 * And show that the object has one fewer resident page.
660 */
661 object->resident_page_count--;
662 object->generation++;
663
664 m->object = NULL;
665}
666
667/*
668 * vm_page_lookup:
669 *
670 * Returns the page associated with the object/offset
671 * pair specified; if none is found, NULL is returned.
672 *
673 * NOTE: the code below does not lock. It will operate properly if
674 * an interrupt makes a change, but the generation algorithm will not
675 * operate properly in an SMP environment where both cpu's are able to run
676 * kernel code simultaneously.
677 *
678 * The object must be locked. No side effects.
679 * This routine may not block.
680 * This is a critical path routine
681 */
682vm_page_t
683vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
684{
685 vm_page_t m;
686 struct vm_page **bucket;
687 int generation;
688
689 /*
690 * Search the hash table for this object/offset pair
691 */
692retry:
693 generation = vm_page_bucket_generation;
694 bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
695 for (m = *bucket; m != NULL; m = m->hnext) {
696 if ((m->object == object) && (m->pindex == pindex)) {
697 if (vm_page_bucket_generation != generation)
698 goto retry;
699 return (m);
700 }
701 }
702 if (vm_page_bucket_generation != generation)
703 goto retry;
704 return (NULL);
705}
706
707/*
708 * vm_page_rename:
709 *
710 * Move the given memory entry from its
711 * current object to the specified target object/offset.
712 *
713 * The object must be locked.
714 * This routine may not block.
715 *
716 * Note: this routine will raise itself to splvm(), the caller need not.
717 *
718 * Note: swap associated with the page must be invalidated by the move. We
719 * have to do this for several reasons: (1) we aren't freeing the
720 * page, (2) we are dirtying the page, (3) the VM system is probably
721 * moving the page from object A to B, and will then later move
722 * the backing store from A to B and we can't have a conflict.
723 *
724 * Note: we *always* dirty the page. It is necessary both for the
725 * fact that we moved it, and because we may be invalidating
726 * swap. If the page is on the cache, we have to deactivate it
727 * or vm_page_dirty() will panic. Dirty pages are not allowed
728 * on the cache.
729 */
730void
731vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
732{
733 int s;
734
735 s = splvm();
736 vm_page_remove(m);
737 vm_page_insert(m, new_object, new_pindex);
738 if (m->queue - m->pc == PQ_CACHE)
739 vm_page_deactivate(m);
740 vm_page_dirty(m);
741 splx(s);
742}
743
744/*
745 * vm_page_select_cache:
746 *
747 * Find a page on the cache queue with color optimization. As pages
748 * might be found, but not applicable, they are deactivated. This
749 * keeps us from using potentially busy cached pages.
750 *
751 * This routine must be called at splvm().
752 * This routine may not block.
753 */
754static vm_page_t
755vm_page_select_cache(vm_object_t object, vm_pindex_t pindex)
756{
757 vm_page_t m;
758
759 GIANT_REQUIRED;
760 while (TRUE) {
761 m = vm_pageq_find(
762 PQ_CACHE,
763 (pindex + object->pg_color) & PQ_L2_MASK,
764 FALSE
765 );
766 if (m && ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
767 m->hold_count || m->wire_count)) {
768 vm_page_deactivate(m);
769 continue;
770 }
771 return m;
772 }
773}
774
775/*
776 * vm_page_select_free:
777 *
778 * Find a free or zero page, with specified preference.
779 *
780 * This routine must be called at splvm().
781 * This routine may not block.
782 */
783static __inline vm_page_t
784vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero)
785{
786 vm_page_t m;
787
788 m = vm_pageq_find(
789 PQ_FREE,
790 (pindex + object->pg_color) & PQ_L2_MASK,
791 prefer_zero
792 );
793 return (m);
794}
795
796/*
797 * vm_page_alloc:
798 *
799 * Allocate and return a memory cell associated
800 * with this VM object/offset pair.
801 *
802 * page_req classes:
803 * VM_ALLOC_NORMAL normal process request
804 * VM_ALLOC_SYSTEM system *really* needs a page
805 * VM_ALLOC_INTERRUPT interrupt time request
806 * VM_ALLOC_ZERO zero page
807 *
808 * This routine may not block.
809 *
810 * Additional special handling is required when called from an
811 * interrupt (VM_ALLOC_INTERRUPT). We are not allowed to mess with
812 * the page cache in this case.
813 */
814vm_page_t
815vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
816{
817 vm_page_t m = NULL;
818 int s;
819
820 GIANT_REQUIRED;
821
822 KASSERT(!vm_page_lookup(object, pindex),
823 ("vm_page_alloc: page already allocated"));
824
825 /*
826 * The pager is allowed to eat deeper into the free page list.
827 */
828 if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
829 page_req = VM_ALLOC_SYSTEM;
830 };
831
832 s = splvm();
833
834loop:
835 if (cnt.v_free_count > cnt.v_free_reserved) {
836 /*
837 * Allocate from the free queue if there are plenty of pages
838 * in it.
839 */
840 if (page_req == VM_ALLOC_ZERO)
841 m = vm_page_select_free(object, pindex, TRUE);
842 else
843 m = vm_page_select_free(object, pindex, FALSE);
844 } else if (
845 (page_req == VM_ALLOC_SYSTEM &&
846 cnt.v_cache_count == 0 &&
847 cnt.v_free_count > cnt.v_interrupt_free_min) ||
848 (page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count > 0)
849 ) {
850 /*
851 * Interrupt or system, dig deeper into the free list.
852 */
853 m = vm_page_select_free(object, pindex, FALSE);
854 } else if (page_req != VM_ALLOC_INTERRUPT) {
855 /*
856 * Allocatable from cache (non-interrupt only). On success,
857 * we must free the page and try again, thus ensuring that
858 * cnt.v_*_free_min counters are replenished.
859 */
860 m = vm_page_select_cache(object, pindex);
861 if (m == NULL) {
862 splx(s);
863#if defined(DIAGNOSTIC)
864 if (cnt.v_cache_count > 0)
865 printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count);
866#endif
867 vm_pageout_deficit++;
868 pagedaemon_wakeup();
869 return (NULL);
870 }
871 KASSERT(m->dirty == 0, ("Found dirty cache page %p", m));
872 vm_page_busy(m);
873 vm_page_protect(m, VM_PROT_NONE);
874 vm_page_free(m);
875 goto loop;
876 } else {
877 /*
878 * Not allocatable from cache from interrupt, give up.
879 */
880 splx(s);
881 vm_pageout_deficit++;
882 pagedaemon_wakeup();
883 return (NULL);
884 }
885
886 /*
887 * At this point we had better have found a good page.
888 */
889
890 KASSERT(
891 m != NULL,
892 ("vm_page_alloc(): missing page on free queue\n")
893 );
894
895 /*
896 * Remove from free queue
897 */
898
899 vm_pageq_remove_nowakeup(m);
900
901 /*
902 * Initialize structure. Only the PG_ZERO flag is inherited.
903 */
904 if (m->flags & PG_ZERO) {
905 vm_page_zero_count--;
906 m->flags = PG_ZERO | PG_BUSY;
907 } else {
908 m->flags = PG_BUSY;
909 }
910 m->wire_count = 0;
911 m->hold_count = 0;
912 m->act_count = 0;
913 m->busy = 0;
914 m->valid = 0;
915 KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m));
916
917 /*
918 * vm_page_insert() is safe prior to the splx(). Note also that
919 * inserting a page here does not insert it into the pmap (which
920 * could cause us to block allocating memory). We cannot block
921 * anywhere.
922 */
923 vm_page_insert(m, object, pindex);
924
925 /*
926 * Don't wakeup too often - wakeup the pageout daemon when
927 * we would be nearly out of memory.
928 */
929 if (vm_paging_needed())
930 pagedaemon_wakeup();
931
932 splx(s);
933 return (m);
934}
935
936/*
937 * vm_wait: (also see VM_WAIT macro)
938 *
939 * Block until free pages are available for allocation
940 * - Called in various places before memory allocations.
941 */
942void
943vm_wait(void)
944{
945 int s;
946
947 s = splvm();
948 if (curproc == pageproc) {
949 vm_pageout_pages_needed = 1;
950 tsleep(&vm_pageout_pages_needed, PSWP, "VMWait", 0);
951 } else {
952 if (!vm_pages_needed) {
953 vm_pages_needed = 1;
954 wakeup(&vm_pages_needed);
955 }
956 tsleep(&cnt.v_free_count, PVM, "vmwait", 0);
957 }
958 splx(s);
959}
960
961/*
962 * vm_waitpfault: (also see VM_WAITPFAULT macro)
963 *
964 * Block until free pages are available for allocation
965 * - Called only in vm_fault so that processes page faulting
966 * can be easily tracked.
967 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing
968 * processes will be able to grab memory first. Do not change
969 * this balance without careful testing first.
970 */
971void
972vm_waitpfault(void)
973{
974 int s;
975
976 s = splvm();
977 if (!vm_pages_needed) {
978 vm_pages_needed = 1;
979 wakeup(&vm_pages_needed);
980 }
981 tsleep(&cnt.v_free_count, PUSER, "pfault", 0);
982 splx(s);
983}
984
985/*
986 * vm_page_activate:
987 *
988 * Put the specified page on the active list (if appropriate).
989 * Ensure that act_count is at least ACT_INIT but do not otherwise
990 * mess with it.
991 *
992 * The page queues must be locked.
993 * This routine may not block.
994 */
995void
996vm_page_activate(vm_page_t m)
997{
998 int s;
999
1000 GIANT_REQUIRED;
1001 s = splvm();
1002 if (m->queue != PQ_ACTIVE) {
1003 if ((m->queue - m->pc) == PQ_CACHE)
1004 cnt.v_reactivated++;
1005 vm_pageq_remove(m);
1006 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1007 if (m->act_count < ACT_INIT)
1008 m->act_count = ACT_INIT;
1009 vm_pageq_enqueue(PQ_ACTIVE, m);
1010 }
1011 } else {
1012 if (m->act_count < ACT_INIT)
1013 m->act_count = ACT_INIT;
1014 }
1015 splx(s);
1016}
1017
1018/*
1019 * vm_page_free_wakeup:
1020 *
1021 * Helper routine for vm_page_free_toq() and vm_page_cache(). This
1022 * routine is called when a page has been added to the cache or free
1023 * queues.
1024 *
1025 * This routine may not block.
1026 * This routine must be called at splvm()
1027 */
1028static __inline void
1029vm_page_free_wakeup(void)
1030{
1031 /*
1032 * if pageout daemon needs pages, then tell it that there are
1033 * some free.
1034 */
1035 if (vm_pageout_pages_needed &&
1036 cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
1037 wakeup(&vm_pageout_pages_needed);
1038 vm_pageout_pages_needed = 0;
1039 }
1040 /*
1041 * wakeup processes that are waiting on memory if we hit a
1042 * high water mark. And wakeup scheduler process if we have
1043 * lots of memory. this process will swapin processes.
1044 */
1045 if (vm_pages_needed && !vm_page_count_min()) {
1046 vm_pages_needed = 0;
1047 wakeup(&cnt.v_free_count);
1048 }
1049}
1050
1051/*
1052 * vm_page_free_toq:
1053 *
1054 * Returns the given page to the PQ_FREE list,
1055 * disassociating it with any VM object.
1056 *
1057 * Object and page must be locked prior to entry.
1058 * This routine may not block.
1059 */
1060
1061void
1062vm_page_free_toq(vm_page_t m)
1063{
1064 int s;
1065 struct vpgqueues *pq;
1066 vm_object_t object = m->object;
1067
1068 GIANT_REQUIRED;
1069 s = splvm();
1070 cnt.v_tfree++;
1071
1072 if (m->busy || ((m->queue - m->pc) == PQ_FREE)) {
1073 printf(
1074 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n",
1075 (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0,
1076 m->hold_count);
1077 if ((m->queue - m->pc) == PQ_FREE)
1078 panic("vm_page_free: freeing free page");
1079 else
1080 panic("vm_page_free: freeing busy page");
1081 }
1082
1083 /*
1084 * unqueue, then remove page. Note that we cannot destroy
1085 * the page here because we do not want to call the pager's
1086 * callback routine until after we've put the page on the
1087 * appropriate free queue.
1088 */
1089 vm_pageq_remove_nowakeup(m);
1090 vm_page_remove(m);
1091
1092 /*
1093 * If fictitious remove object association and
1094 * return, otherwise delay object association removal.
1095 */
1096 if ((m->flags & PG_FICTITIOUS) != 0) {
1097 splx(s);
1098 return;
1099 }
1100
1101 m->valid = 0;
1102 vm_page_undirty(m);
1103
1104 if (m->wire_count != 0) {
1105 if (m->wire_count > 1) {
1106 panic("vm_page_free: invalid wire count (%d), pindex: 0x%lx",
1107 m->wire_count, (long)m->pindex);
1108 }
1109 panic("vm_page_free: freeing wired page\n");
1110 }
1111
1112 /*
1113 * If we've exhausted the object's resident pages we want to free
1114 * it up.
1115 */
1116 if (object &&
1117 (object->type == OBJT_VNODE) &&
1118 ((object->flags & OBJ_DEAD) == 0)
1119 ) {
1120 struct vnode *vp = (struct vnode *)object->handle;
1121
1122 if (vp && VSHOULDFREE(vp))
1123 vfree(vp);
1124 }
1125
1126 /*
1127 * Clear the UNMANAGED flag when freeing an unmanaged page.
1128 */
1129 if (m->flags & PG_UNMANAGED) {
1130 m->flags &= ~PG_UNMANAGED;
1131 } else {
1132#ifdef __alpha__
1133 pmap_page_is_free(m);
1134#endif
1135 }
1136
1137 if (m->hold_count != 0) {
1138 m->flags &= ~PG_ZERO;
1139 m->queue = PQ_HOLD;
1140 } else
1141 m->queue = PQ_FREE + m->pc;
1142 pq = &vm_page_queues[m->queue];
1143 pq->lcnt++;
1144 ++(*pq->cnt);
1145
1146 /*
1147 * Put zero'd pages on the end ( where we look for zero'd pages
1148 * first ) and non-zerod pages at the head.
1149 */
1150 if (m->flags & PG_ZERO) {
1151 TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
1152 ++vm_page_zero_count;
1153 } else {
1154 TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
1155 }
1156 vm_page_free_wakeup();
1157 splx(s);
1158}
1159
1160/*
1161 * vm_page_unmanage:
1162 *
1163 * Prevent PV management from being done on the page. The page is
1164 * removed from the paging queues as if it were wired, and as a
1165 * consequence of no longer being managed the pageout daemon will not
1166 * touch it (since there is no way to locate the pte mappings for the
1167 * page). madvise() calls that mess with the pmap will also no longer
1168 * operate on the page.
1169 *
1170 * Beyond that the page is still reasonably 'normal'. Freeing the page
1171 * will clear the flag.
1172 *
1173 * This routine is used by OBJT_PHYS objects - objects using unswappable
1174 * physical memory as backing store rather then swap-backed memory and
1175 * will eventually be extended to support 4MB unmanaged physical
1176 * mappings.
1177 */
1178void
1179vm_page_unmanage(vm_page_t m)
1180{
1181 int s;
1182
1183 s = splvm();
1184 if ((m->flags & PG_UNMANAGED) == 0) {
1185 if (m->wire_count == 0)
1186 vm_pageq_remove(m);
1187 }
1188 vm_page_flag_set(m, PG_UNMANAGED);
1189 splx(s);
1190}
1191
1192/*
1193 * vm_page_wire:
1194 *
1195 * Mark this page as wired down by yet
1196 * another map, removing it from paging queues
1197 * as necessary.
1198 *
1199 * The page queues must be locked.
1200 * This routine may not block.
1201 */
1202void
1203vm_page_wire(vm_page_t m)
1204{
1205 int s;
1206
1207 /*
1208 * Only bump the wire statistics if the page is not already wired,
1209 * and only unqueue the page if it is on some queue (if it is unmanaged
1210 * it is already off the queues).
1211 */
1212 s = splvm();
1213 if (m->wire_count == 0) {
1214 if ((m->flags & PG_UNMANAGED) == 0)
1215 vm_pageq_remove(m);
1216 cnt.v_wire_count++;
1217 }
1218 m->wire_count++;
1219 KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
1220 splx(s);
1221 vm_page_flag_set(m, PG_MAPPED);
1222}
1223
1224/*
1225 * vm_page_unwire:
1226 *
1227 * Release one wiring of this page, potentially
1228 * enabling it to be paged again.
1229 *
1230 * Many pages placed on the inactive queue should actually go
1231 * into the cache, but it is difficult to figure out which. What
1232 * we do instead, if the inactive target is well met, is to put
1233 * clean pages at the head of the inactive queue instead of the tail.
1234 * This will cause them to be moved to the cache more quickly and
1235 * if not actively re-referenced, freed more quickly. If we just
1236 * stick these pages at the end of the inactive queue, heavy filesystem
1237 * meta-data accesses can cause an unnecessary paging load on memory bound
1238 * processes. This optimization causes one-time-use metadata to be
1239 * reused more quickly.
1240 *
1241 * BUT, if we are in a low-memory situation we have no choice but to
1242 * put clean pages on the cache queue.
1243 *
1244 * A number of routines use vm_page_unwire() to guarantee that the page
1245 * will go into either the inactive or active queues, and will NEVER
1246 * be placed in the cache - for example, just after dirtying a page.
1247 * dirty pages in the cache are not allowed.
1248 *
1249 * The page queues must be locked.
1250 * This routine may not block.
1251 */
1252void
1253vm_page_unwire(vm_page_t m, int activate)
1254{
1255 int s;
1256
1257 s = splvm();
1258
1259 if (m->wire_count > 0) {
1260 m->wire_count--;
1261 if (m->wire_count == 0) {
1262 cnt.v_wire_count--;
1263 if (m->flags & PG_UNMANAGED) {
1264 ;
1265 } else if (activate)
1266 vm_pageq_enqueue(PQ_ACTIVE, m);
1267 else {
1268 vm_page_flag_clear(m, PG_WINATCFLS);
1269 vm_pageq_enqueue(PQ_INACTIVE, m);
1270 }
1271 }
1272 } else {
1273 panic("vm_page_unwire: invalid wire count: %d\n", m->wire_count);
1274 }
1275 splx(s);
1276}
1277
1278
1279/*
1280 * Move the specified page to the inactive queue. If the page has
1281 * any associated swap, the swap is deallocated.
1282 *
1283 * Normally athead is 0 resulting in LRU operation. athead is set
1284 * to 1 if we want this page to be 'as if it were placed in the cache',
1285 * except without unmapping it from the process address space.
1286 *
1287 * This routine may not block.
1288 */
1289static __inline void
1290_vm_page_deactivate(vm_page_t m, int athead)
1291{
1292 int s;
1293
1294 GIANT_REQUIRED;
1295 /*
1296 * Ignore if already inactive.
1297 */
1298 if (m->queue == PQ_INACTIVE)
1299 return;
1300
1301 s = splvm();
1302 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1303 if ((m->queue - m->pc) == PQ_CACHE)
1304 cnt.v_reactivated++;
1305 vm_page_flag_clear(m, PG_WINATCFLS);
1306 vm_pageq_remove(m);
1307 if (athead)
1308 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1309 else
1310 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1311 m->queue = PQ_INACTIVE;
1312 vm_page_queues[PQ_INACTIVE].lcnt++;
1313 cnt.v_inactive_count++;
1314 }
1315 splx(s);
1316}
1317
1318void
1319vm_page_deactivate(vm_page_t m)
1320{
1321 _vm_page_deactivate(m, 0);
1322}
1323
1324/*
1325 * vm_page_try_to_cache:
1326 *
1327 * Returns 0 on failure, 1 on success
1328 */
1329int
1330vm_page_try_to_cache(vm_page_t m)
1331{
1332 GIANT_REQUIRED;
1333
1334 if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1335 (m->flags & (PG_BUSY|PG_UNMANAGED))) {
1336 return (0);
1337 }
1338 vm_page_test_dirty(m);
1339 if (m->dirty)
1340 return (0);
1341 vm_page_cache(m);
1342 return (1);
1343}
1344
1345/*
1346 * vm_page_try_to_free()
1347 *
1348 * Attempt to free the page. If we cannot free it, we do nothing.
1349 * 1 is returned on success, 0 on failure.
1350 */
1351int
1352vm_page_try_to_free(vm_page_t m)
1353{
1354 if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1355 (m->flags & (PG_BUSY|PG_UNMANAGED))) {
1356 return (0);
1357 }
1358 vm_page_test_dirty(m);
1359 if (m->dirty)
1360 return (0);
1361 vm_page_busy(m);
1362 vm_page_protect(m, VM_PROT_NONE);
1363 vm_page_free(m);
1364 return (1);
1365}
1366
1367/*
1368 * vm_page_cache
1369 *
1370 * Put the specified page onto the page cache queue (if appropriate).
1371 *
1372 * This routine may not block.
1373 */
1374void
1375vm_page_cache(vm_page_t m)
1376{
1377 int s;
1378
1379 GIANT_REQUIRED;
1380 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || m->wire_count) {
1381 printf("vm_page_cache: attempting to cache busy page\n");
1382 return;
1383 }
1384 if ((m->queue - m->pc) == PQ_CACHE)
1385 return;
1386
1387 /*
1388 * Remove all pmaps and indicate that the page is not
1389 * writeable or mapped.
1390 */
1391 vm_page_protect(m, VM_PROT_NONE);
1392 if (m->dirty != 0) {
1393 panic("vm_page_cache: caching a dirty page, pindex: %ld",
1394 (long)m->pindex);
1395 }
1396 s = splvm();
1397 vm_pageq_remove_nowakeup(m);
1398 vm_pageq_enqueue(PQ_CACHE + m->pc, m);
1399 vm_page_free_wakeup();
1400 splx(s);
1401}
1402
1403/*
1404 * vm_page_dontneed
1405 *
1406 * Cache, deactivate, or do nothing as appropriate. This routine
1407 * is typically used by madvise() MADV_DONTNEED.
1408 *
1409 * Generally speaking we want to move the page into the cache so
1410 * it gets reused quickly. However, this can result in a silly syndrome
1411 * due to the page recycling too quickly. Small objects will not be
1412 * fully cached. On the otherhand, if we move the page to the inactive
1413 * queue we wind up with a problem whereby very large objects
1414 * unnecessarily blow away our inactive and cache queues.
1415 *
1416 * The solution is to move the pages based on a fixed weighting. We
1417 * either leave them alone, deactivate them, or move them to the cache,
1418 * where moving them to the cache has the highest weighting.
1419 * By forcing some pages into other queues we eventually force the
1420 * system to balance the queues, potentially recovering other unrelated
1421 * space from active. The idea is to not force this to happen too
1422 * often.
1423 */
1424void
1425vm_page_dontneed(vm_page_t m)
1426{
1427 static int dnweight;
1428 int dnw;
1429 int head;
1430
1431 GIANT_REQUIRED;
1432 dnw = ++dnweight;
1433
1434 /*
1435 * occassionally leave the page alone
1436 */
1437 if ((dnw & 0x01F0) == 0 ||
1438 m->queue == PQ_INACTIVE ||
1439 m->queue - m->pc == PQ_CACHE
1440 ) {
1441 if (m->act_count >= ACT_INIT)
1442 --m->act_count;
1443 return;
1444 }
1445
1446 if (m->dirty == 0)
1447 vm_page_test_dirty(m);
1448
1449 if (m->dirty || (dnw & 0x0070) == 0) {
1450 /*
1451 * Deactivate the page 3 times out of 32.
1452 */
1453 head = 0;
1454 } else {
1455 /*
1456 * Cache the page 28 times out of every 32. Note that
1457 * the page is deactivated instead of cached, but placed
1458 * at the head of the queue instead of the tail.
1459 */
1460 head = 1;
1461 }
1462 _vm_page_deactivate(m, head);
1463}
1464
1465/*
1466 * Grab a page, waiting until we are waken up due to the page
1467 * changing state. We keep on waiting, if the page continues
1468 * to be in the object. If the page doesn't exist, allocate it.
1469 *
1470 * This routine may block.
1471 */
1472vm_page_t
1473vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
1474{
1475 vm_page_t m;
1476 int s, generation;
1477
1478 GIANT_REQUIRED;
1479retrylookup:
1480 if ((m = vm_page_lookup(object, pindex)) != NULL) {
1481 if (m->busy || (m->flags & PG_BUSY)) {
1482 generation = object->generation;
1483
1484 s = splvm();
1485 while ((object->generation == generation) &&
1486 (m->busy || (m->flags & PG_BUSY))) {
1487 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
1488 tsleep(m, PVM, "pgrbwt", 0);
1489 if ((allocflags & VM_ALLOC_RETRY) == 0) {
1490 splx(s);
1491 return NULL;
1492 }
1493 }
1494 splx(s);
1495 goto retrylookup;
1496 } else {
1497 vm_page_busy(m);
1498 return m;
1499 }
1500 }
1501
1502 m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY);
1503 if (m == NULL) {
1504 VM_WAIT;
1505 if ((allocflags & VM_ALLOC_RETRY) == 0)
1506 return NULL;
1507 goto retrylookup;
1508 }
1509
1510 return m;
1511}
1512
1513/*
1514 * Mapping function for valid bits or for dirty bits in
1515 * a page. May not block.
1516 *
1517 * Inputs are required to range within a page.
1518 */
1519__inline int
1520vm_page_bits(int base, int size)
1521{
1522 int first_bit;
1523 int last_bit;
1524
1525 KASSERT(
1526 base + size <= PAGE_SIZE,
1527 ("vm_page_bits: illegal base/size %d/%d", base, size)
1528 );
1529
1530 if (size == 0) /* handle degenerate case */
1531 return (0);
1532
1533 first_bit = base >> DEV_BSHIFT;
1534 last_bit = (base + size - 1) >> DEV_BSHIFT;
1535
1536 return ((2 << last_bit) - (1 << first_bit));
1537}
1538
1539/*
1540 * vm_page_set_validclean:
1541 *
1542 * Sets portions of a page valid and clean. The arguments are expected
1543 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
1544 * of any partial chunks touched by the range. The invalid portion of
1545 * such chunks will be zero'd.
1546 *
1547 * This routine may not block.
1548 *
1549 * (base + size) must be less then or equal to PAGE_SIZE.
1550 */
1551void
1552vm_page_set_validclean(vm_page_t m, int base, int size)
1553{
1554 int pagebits;
1555 int frag;
1556 int endoff;
1557
1558 GIANT_REQUIRED;
1559 if (size == 0) /* handle degenerate case */
1560 return;
1561
1562 /*
1563 * If the base is not DEV_BSIZE aligned and the valid
1564 * bit is clear, we have to zero out a portion of the
1565 * first block.
1566 */
1567 if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
1568 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0
1569 ) {
1570 pmap_zero_page_area(
1571 VM_PAGE_TO_PHYS(m),
1572 frag,
1573 base - frag
1574 );
1575 }
1576
1577 /*
1578 * If the ending offset is not DEV_BSIZE aligned and the
1579 * valid bit is clear, we have to zero out a portion of
1580 * the last block.
1581 */
1582 endoff = base + size;
1583 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
1584 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
1585 ) {
1586 pmap_zero_page_area(
1587 VM_PAGE_TO_PHYS(m),
1588 endoff,
1589 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))
1590 );
1591 }
1592
1593 /*
1594 * Set valid, clear dirty bits. If validating the entire
1595 * page we can safely clear the pmap modify bit. We also
1596 * use this opportunity to clear the PG_NOSYNC flag. If a process
1597 * takes a write fault on a MAP_NOSYNC memory area the flag will
1598 * be set again.
1599 *
1600 * We set valid bits inclusive of any overlap, but we can only
1601 * clear dirty bits for DEV_BSIZE chunks that are fully within
1602 * the range.
1603 */
1604 pagebits = vm_page_bits(base, size);
1605 m->valid |= pagebits;
1606#if 0 /* NOT YET */
1607 if ((frag = base & (DEV_BSIZE - 1)) != 0) {
1608 frag = DEV_BSIZE - frag;
1609 base += frag;
1610 size -= frag;
1611 if (size < 0)
1612 size = 0;
1613 }
1614 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
1615#endif
1616 m->dirty &= ~pagebits;
1617 if (base == 0 && size == PAGE_SIZE) {
1618 pmap_clear_modify(m);
1619 vm_page_flag_clear(m, PG_NOSYNC);
1620 }
1621}
1622
1623#if 0
1624
1625void
1626vm_page_set_dirty(vm_page_t m, int base, int size)
1627{
1628 m->dirty |= vm_page_bits(base, size);
1629}
1630
1631#endif
1632
1633void
1634vm_page_clear_dirty(vm_page_t m, int base, int size)
1635{
1636 GIANT_REQUIRED;
1637 m->dirty &= ~vm_page_bits(base, size);
1638}
1639
1640/*
1641 * vm_page_set_invalid:
1642 *
1643 * Invalidates DEV_BSIZE'd chunks within a page. Both the
1644 * valid and dirty bits for the effected areas are cleared.
1645 *
1646 * May not block.
1647 */
1648void
1649vm_page_set_invalid(vm_page_t m, int base, int size)
1650{
1651 int bits;
1652
1653 GIANT_REQUIRED;
1654 bits = vm_page_bits(base, size);
1655 m->valid &= ~bits;
1656 m->dirty &= ~bits;
1657 m->object->generation++;
1658}
1659
1660/*
1661 * vm_page_zero_invalid()
1662 *
1663 * The kernel assumes that the invalid portions of a page contain
1664 * garbage, but such pages can be mapped into memory by user code.
1665 * When this occurs, we must zero out the non-valid portions of the
1666 * page so user code sees what it expects.
1667 *
1668 * Pages are most often semi-valid when the end of a file is mapped
1669 * into memory and the file's size is not page aligned.
1670 */
1671void
1672vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
1673{
1674 int b;
1675 int i;
1676
1677 /*
1678 * Scan the valid bits looking for invalid sections that
1679 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the
1680 * valid bit may be set ) have already been zerod by
1681 * vm_page_set_validclean().
1682 */
1683 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
1684 if (i == (PAGE_SIZE / DEV_BSIZE) ||
1685 (m->valid & (1 << i))
1686 ) {
1687 if (i > b) {
1688 pmap_zero_page_area(
1689 VM_PAGE_TO_PHYS(m),
1690 b << DEV_BSHIFT,
1691 (i - b) << DEV_BSHIFT
1692 );
1693 }
1694 b = i + 1;
1695 }
1696 }
1697
1698 /*
1699 * setvalid is TRUE when we can safely set the zero'd areas
1700 * as being valid. We can do this if there are no cache consistancy
1701 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS.
1702 */
1703 if (setvalid)
1704 m->valid = VM_PAGE_BITS_ALL;
1705}
1706
1707/*
1708 * vm_page_is_valid:
1709 *
1710 * Is (partial) page valid? Note that the case where size == 0
1711 * will return FALSE in the degenerate case where the page is
1712 * entirely invalid, and TRUE otherwise.
1713 *
1714 * May not block.
1715 */
1716int
1717vm_page_is_valid(vm_page_t m, int base, int size)
1718{
1719 int bits = vm_page_bits(base, size);
1720
1721 if (m->valid && ((m->valid & bits) == bits))
1722 return 1;
1723 else
1724 return 0;
1725}
1726
1727/*
1728 * update dirty bits from pmap/mmu. May not block.
1729 */
1730void
1731vm_page_test_dirty(vm_page_t m)
1732{
1733 if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
1734 vm_page_dirty(m);
1735 }
1736}
1737
1738#include "opt_ddb.h"
1739#ifdef DDB
1740#include <sys/kernel.h>
1741
1742#include <ddb/ddb.h>
1743
1744DB_SHOW_COMMAND(page, vm_page_print_page_info)
1745{
1746 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1747 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1748 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1749 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1750 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1751 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1752 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1753 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1754 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1755 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1756}
1757
1758DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
1759{
1760 int i;
1761 db_printf("PQ_FREE:");
1762 for (i = 0; i < PQ_L2_SIZE; i++) {
1763 db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
1764 }
1765 db_printf("\n");
1766
1767 db_printf("PQ_CACHE:");
1768 for (i = 0; i < PQ_L2_SIZE; i++) {
1769 db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
1770 }
1771 db_printf("\n");
1772
1773 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
1774 vm_page_queues[PQ_ACTIVE].lcnt,
1775 vm_page_queues[PQ_INACTIVE].lcnt);
1776}
1777#endif /* DDB */
227 * Allocate (and initialize) the hash table buckets.
228 *
229 * The number of buckets MUST BE a power of 2, and the actual value is
230 * the next power of 2 greater than the number of physical pages in
231 * the system.
232 *
233 * We make the hash table approximately 2x the number of pages to
234 * reduce the chain length. This is about the same size using the
235 * singly-linked list as the 1x hash table we were using before
236 * using TAILQ but the chain length will be smaller.
237 *
238 * Note: This computation can be tweaked if desired.
239 */
240 if (vm_page_bucket_count == 0) {
241 vm_page_bucket_count = 1;
242 while (vm_page_bucket_count < atop(total))
243 vm_page_bucket_count <<= 1;
244 }
245 vm_page_bucket_count <<= 1;
246 vm_page_hash_mask = vm_page_bucket_count - 1;
247
248 /*
249 * Validate these addresses.
250 */
251 new_end = end - vm_page_bucket_count * sizeof(struct vm_page *);
252 new_end = trunc_page(new_end);
253 mapped = pmap_map(&vaddr, new_end, end,
254 VM_PROT_READ | VM_PROT_WRITE);
255 bzero((caddr_t) mapped, end - new_end);
256
257 vm_page_buckets = (struct vm_page **)mapped;
258 bucket = vm_page_buckets;
259 for (i = 0; i < vm_page_bucket_count; i++) {
260 *bucket = NULL;
261 bucket++;
262 }
263 for (i = 0; i < BUCKET_HASH_SIZE; ++i)
264 mtx_init(&vm_buckets_mtx[i], "vm buckets hash mutexes", MTX_DEF);
265
266 /*
267 * Compute the number of pages of memory that will be available for
268 * use (taking into account the overhead of a page structure per
269 * page).
270 */
271 first_page = phys_avail[0] / PAGE_SIZE;
272 page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
273 npages = (total - (page_range * sizeof(struct vm_page)) -
274 (end - new_end)) / PAGE_SIZE;
275 end = new_end;
276
277 /*
278 * Initialize the mem entry structures now, and put them in the free
279 * queue.
280 */
281 new_end = trunc_page(end - page_range * sizeof(struct vm_page));
282 mapped = pmap_map(&vaddr, new_end, end,
283 VM_PROT_READ | VM_PROT_WRITE);
284 vm_page_array = (vm_page_t) mapped;
285
286 /*
287 * Clear all of the page structures
288 */
289 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
290 vm_page_array_size = page_range;
291
292 /*
293 * Construct the free queue(s) in descending order (by physical
294 * address) so that the first 16MB of physical memory is allocated
295 * last rather than first. On large-memory machines, this avoids
296 * the exhaustion of low physical memory before isa_dmainit has run.
297 */
298 cnt.v_page_count = 0;
299 cnt.v_free_count = 0;
300 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
301 pa = phys_avail[i];
302 if (i == biggestone)
303 last_pa = new_end;
304 else
305 last_pa = phys_avail[i + 1];
306 while (pa < last_pa && npages-- > 0) {
307 vm_pageq_add_new_page(pa);
308 pa += PAGE_SIZE;
309 }
310 }
311 return (vaddr);
312}
313
314/*
315 * vm_page_hash:
316 *
317 * Distributes the object/offset key pair among hash buckets.
318 *
319 * NOTE: This macro depends on vm_page_bucket_count being a power of 2.
320 * This routine may not block.
321 *
322 * We try to randomize the hash based on the object to spread the pages
323 * out in the hash table without it costing us too much.
324 */
325static __inline int
326vm_page_hash(vm_object_t object, vm_pindex_t pindex)
327{
328 int i = ((uintptr_t)object + pindex) ^ object->hash_rand;
329
330 return (i & vm_page_hash_mask);
331}
332
333void
334vm_page_flag_set(vm_page_t m, unsigned short bits)
335{
336 GIANT_REQUIRED;
337 m->flags |= bits;
338}
339
340void
341vm_page_flag_clear(vm_page_t m, unsigned short bits)
342{
343 GIANT_REQUIRED;
344 m->flags &= ~bits;
345}
346
347void
348vm_page_busy(vm_page_t m)
349{
350 KASSERT((m->flags & PG_BUSY) == 0,
351 ("vm_page_busy: page already busy!!!"));
352 vm_page_flag_set(m, PG_BUSY);
353}
354
355/*
356 * vm_page_flash:
357 *
358 * wakeup anyone waiting for the page.
359 */
360void
361vm_page_flash(vm_page_t m)
362{
363 if (m->flags & PG_WANTED) {
364 vm_page_flag_clear(m, PG_WANTED);
365 wakeup(m);
366 }
367}
368
369/*
370 * vm_page_wakeup:
371 *
372 * clear the PG_BUSY flag and wakeup anyone waiting for the
373 * page.
374 *
375 */
376void
377vm_page_wakeup(vm_page_t m)
378{
379 KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
380 vm_page_flag_clear(m, PG_BUSY);
381 vm_page_flash(m);
382}
383
384/*
385 *
386 *
387 */
388void
389vm_page_io_start(vm_page_t m)
390{
391 GIANT_REQUIRED;
392 m->busy++;
393}
394
395void
396vm_page_io_finish(vm_page_t m)
397{
398 GIANT_REQUIRED;
399 m->busy--;
400 if (m->busy == 0)
401 vm_page_flash(m);
402}
403
404/*
405 * Keep page from being freed by the page daemon
406 * much of the same effect as wiring, except much lower
407 * overhead and should be used only for *very* temporary
408 * holding ("wiring").
409 */
410void
411vm_page_hold(vm_page_t mem)
412{
413 GIANT_REQUIRED;
414 mem->hold_count++;
415}
416
417void
418vm_page_unhold(vm_page_t mem)
419{
420 GIANT_REQUIRED;
421 --mem->hold_count;
422 KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
423 if (mem->hold_count == 0 && mem->queue == PQ_HOLD)
424 vm_page_free_toq(mem);
425}
426
427/*
428 * vm_page_protect:
429 *
430 * Reduce the protection of a page. This routine never raises the
431 * protection and therefore can be safely called if the page is already
432 * at VM_PROT_NONE (it will be a NOP effectively ).
433 */
434void
435vm_page_protect(vm_page_t mem, int prot)
436{
437 if (prot == VM_PROT_NONE) {
438 if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
439 pmap_page_protect(mem, VM_PROT_NONE);
440 vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED);
441 }
442 } else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
443 pmap_page_protect(mem, VM_PROT_READ);
444 vm_page_flag_clear(mem, PG_WRITEABLE);
445 }
446}
447/*
448 * vm_page_zero_fill:
449 *
450 * Zero-fill the specified page.
451 * Written as a standard pagein routine, to
452 * be used by the zero-fill object.
453 */
454boolean_t
455vm_page_zero_fill(vm_page_t m)
456{
457 pmap_zero_page(VM_PAGE_TO_PHYS(m));
458 return (TRUE);
459}
460
461/*
462 * vm_page_copy:
463 *
464 * Copy one page to another
465 */
466void
467vm_page_copy(vm_page_t src_m, vm_page_t dest_m)
468{
469 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
470 dest_m->valid = VM_PAGE_BITS_ALL;
471}
472
473/*
474 * vm_page_free:
475 *
476 * Free a page
477 *
478 * The clearing of PG_ZERO is a temporary safety until the code can be
479 * reviewed to determine that PG_ZERO is being properly cleared on
480 * write faults or maps. PG_ZERO was previously cleared in
481 * vm_page_alloc().
482 */
483void
484vm_page_free(vm_page_t m)
485{
486 vm_page_flag_clear(m, PG_ZERO);
487 vm_page_free_toq(m);
488 vm_page_zero_idle_wakeup();
489}
490
491/*
492 * vm_page_free_zero:
493 *
494 * Free a page to the zerod-pages queue
495 */
496void
497vm_page_free_zero(vm_page_t m)
498{
499 vm_page_flag_set(m, PG_ZERO);
500 vm_page_free_toq(m);
501}
502
503/*
504 * vm_page_sleep_busy:
505 *
506 * Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE)
507 * m->busy is zero. Returns TRUE if it had to sleep ( including if
508 * it almost had to sleep and made temporary spl*() mods), FALSE
509 * otherwise.
510 *
511 * This routine assumes that interrupts can only remove the busy
512 * status from a page, not set the busy status or change it from
513 * PG_BUSY to m->busy or vise versa (which would create a timing
514 * window).
515 */
516int
517vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
518{
519 GIANT_REQUIRED;
520 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
521 int s = splvm();
522 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
523 /*
524 * Page is busy. Wait and retry.
525 */
526 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
527 tsleep(m, PVM, msg, 0);
528 }
529 splx(s);
530 return (TRUE);
531 /* not reached */
532 }
533 return (FALSE);
534}
535/*
536 * vm_page_dirty:
537 *
538 * make page all dirty
539 */
540void
541vm_page_dirty(vm_page_t m)
542{
543 KASSERT(m->queue - m->pc != PQ_CACHE,
544 ("vm_page_dirty: page in cache!"));
545 m->dirty = VM_PAGE_BITS_ALL;
546}
547
548/*
549 * vm_page_undirty:
550 *
551 * Set page to not be dirty. Note: does not clear pmap modify bits
552 */
553void
554vm_page_undirty(vm_page_t m)
555{
556 m->dirty = 0;
557}
558
559/*
560 * vm_page_insert: [ internal use only ]
561 *
562 * Inserts the given mem entry into the object and object list.
563 *
564 * The pagetables are not updated but will presumably fault the page
565 * in if necessary, or if a kernel page the caller will at some point
566 * enter the page into the kernel's pmap. We are not allowed to block
567 * here so we *can't* do this anyway.
568 *
569 * The object and page must be locked, and must be splhigh.
570 * This routine may not block.
571 */
572void
573vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
574{
575 struct vm_page **bucket;
576
577 GIANT_REQUIRED;
578
579 if (m->object != NULL)
580 panic("vm_page_insert: already inserted");
581
582 /*
583 * Record the object/offset pair in this page
584 */
585 m->object = object;
586 m->pindex = pindex;
587
588 /*
589 * Insert it into the object_object/offset hash table
590 */
591 bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
592 m->hnext = *bucket;
593 *bucket = m;
594 vm_page_bucket_generation++;
595
596 /*
597 * Now link into the object's list of backed pages.
598 */
599 TAILQ_INSERT_TAIL(&object->memq, m, listq);
600 object->generation++;
601
602 /*
603 * show that the object has one more resident page.
604 */
605 object->resident_page_count++;
606
607 /*
608 * Since we are inserting a new and possibly dirty page,
609 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
610 */
611 if (m->flags & PG_WRITEABLE)
612 vm_object_set_writeable_dirty(object);
613}
614
615/*
616 * vm_page_remove:
617 * NOTE: used by device pager as well -wfj
618 *
619 * Removes the given mem entry from the object/offset-page
620 * table and the object page list, but do not invalidate/terminate
621 * the backing store.
622 *
623 * The object and page must be locked, and at splhigh.
624 * The underlying pmap entry (if any) is NOT removed here.
625 * This routine may not block.
626 */
627void
628vm_page_remove(vm_page_t m)
629{
630 vm_object_t object;
631
632 GIANT_REQUIRED;
633
634 if (m->object == NULL)
635 return;
636
637 if ((m->flags & PG_BUSY) == 0) {
638 panic("vm_page_remove: page not busy");
639 }
640
641 /*
642 * Basically destroy the page.
643 */
644 vm_page_wakeup(m);
645
646 object = m->object;
647
648 /*
649 * Remove from the object_object/offset hash table. The object
650 * must be on the hash queue, we will panic if it isn't
651 *
652 * Note: we must NULL-out m->hnext to prevent loops in detached
653 * buffers with vm_page_lookup().
654 */
655 {
656 struct vm_page **bucket;
657
658 bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)];
659 while (*bucket != m) {
660 if (*bucket == NULL)
661 panic("vm_page_remove(): page not found in hash");
662 bucket = &(*bucket)->hnext;
663 }
664 *bucket = m->hnext;
665 m->hnext = NULL;
666 vm_page_bucket_generation++;
667 }
668
669 /*
670 * Now remove from the object's list of backed pages.
671 */
672 TAILQ_REMOVE(&object->memq, m, listq);
673
674 /*
675 * And show that the object has one fewer resident page.
676 */
677 object->resident_page_count--;
678 object->generation++;
679
680 m->object = NULL;
681}
682
683/*
684 * vm_page_lookup:
685 *
686 * Returns the page associated with the object/offset
687 * pair specified; if none is found, NULL is returned.
688 *
689 * NOTE: the code below does not lock. It will operate properly if
690 * an interrupt makes a change, but the generation algorithm will not
691 * operate properly in an SMP environment where both cpu's are able to run
692 * kernel code simultaneously.
693 *
694 * The object must be locked. No side effects.
695 * This routine may not block.
696 * This is a critical path routine
697 */
698vm_page_t
699vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
700{
701 vm_page_t m;
702 struct vm_page **bucket;
703 int generation;
704
705 /*
706 * Search the hash table for this object/offset pair
707 */
708retry:
709 generation = vm_page_bucket_generation;
710 bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
711 for (m = *bucket; m != NULL; m = m->hnext) {
712 if ((m->object == object) && (m->pindex == pindex)) {
713 if (vm_page_bucket_generation != generation)
714 goto retry;
715 return (m);
716 }
717 }
718 if (vm_page_bucket_generation != generation)
719 goto retry;
720 return (NULL);
721}
722
723/*
724 * vm_page_rename:
725 *
726 * Move the given memory entry from its
727 * current object to the specified target object/offset.
728 *
729 * The object must be locked.
730 * This routine may not block.
731 *
732 * Note: this routine will raise itself to splvm(), the caller need not.
733 *
734 * Note: swap associated with the page must be invalidated by the move. We
735 * have to do this for several reasons: (1) we aren't freeing the
736 * page, (2) we are dirtying the page, (3) the VM system is probably
737 * moving the page from object A to B, and will then later move
738 * the backing store from A to B and we can't have a conflict.
739 *
740 * Note: we *always* dirty the page. It is necessary both for the
741 * fact that we moved it, and because we may be invalidating
742 * swap. If the page is on the cache, we have to deactivate it
743 * or vm_page_dirty() will panic. Dirty pages are not allowed
744 * on the cache.
745 */
746void
747vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
748{
749 int s;
750
751 s = splvm();
752 vm_page_remove(m);
753 vm_page_insert(m, new_object, new_pindex);
754 if (m->queue - m->pc == PQ_CACHE)
755 vm_page_deactivate(m);
756 vm_page_dirty(m);
757 splx(s);
758}
759
760/*
761 * vm_page_select_cache:
762 *
763 * Find a page on the cache queue with color optimization. As pages
764 * might be found, but not applicable, they are deactivated. This
765 * keeps us from using potentially busy cached pages.
766 *
767 * This routine must be called at splvm().
768 * This routine may not block.
769 */
770static vm_page_t
771vm_page_select_cache(vm_object_t object, vm_pindex_t pindex)
772{
773 vm_page_t m;
774
775 GIANT_REQUIRED;
776 while (TRUE) {
777 m = vm_pageq_find(
778 PQ_CACHE,
779 (pindex + object->pg_color) & PQ_L2_MASK,
780 FALSE
781 );
782 if (m && ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
783 m->hold_count || m->wire_count)) {
784 vm_page_deactivate(m);
785 continue;
786 }
787 return m;
788 }
789}
790
791/*
792 * vm_page_select_free:
793 *
794 * Find a free or zero page, with specified preference.
795 *
796 * This routine must be called at splvm().
797 * This routine may not block.
798 */
799static __inline vm_page_t
800vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero)
801{
802 vm_page_t m;
803
804 m = vm_pageq_find(
805 PQ_FREE,
806 (pindex + object->pg_color) & PQ_L2_MASK,
807 prefer_zero
808 );
809 return (m);
810}
811
812/*
813 * vm_page_alloc:
814 *
815 * Allocate and return a memory cell associated
816 * with this VM object/offset pair.
817 *
818 * page_req classes:
819 * VM_ALLOC_NORMAL normal process request
820 * VM_ALLOC_SYSTEM system *really* needs a page
821 * VM_ALLOC_INTERRUPT interrupt time request
822 * VM_ALLOC_ZERO zero page
823 *
824 * This routine may not block.
825 *
826 * Additional special handling is required when called from an
827 * interrupt (VM_ALLOC_INTERRUPT). We are not allowed to mess with
828 * the page cache in this case.
829 */
830vm_page_t
831vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
832{
833 vm_page_t m = NULL;
834 int s;
835
836 GIANT_REQUIRED;
837
838 KASSERT(!vm_page_lookup(object, pindex),
839 ("vm_page_alloc: page already allocated"));
840
841 /*
842 * The pager is allowed to eat deeper into the free page list.
843 */
844 if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
845 page_req = VM_ALLOC_SYSTEM;
846 };
847
848 s = splvm();
849
850loop:
851 if (cnt.v_free_count > cnt.v_free_reserved) {
852 /*
853 * Allocate from the free queue if there are plenty of pages
854 * in it.
855 */
856 if (page_req == VM_ALLOC_ZERO)
857 m = vm_page_select_free(object, pindex, TRUE);
858 else
859 m = vm_page_select_free(object, pindex, FALSE);
860 } else if (
861 (page_req == VM_ALLOC_SYSTEM &&
862 cnt.v_cache_count == 0 &&
863 cnt.v_free_count > cnt.v_interrupt_free_min) ||
864 (page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count > 0)
865 ) {
866 /*
867 * Interrupt or system, dig deeper into the free list.
868 */
869 m = vm_page_select_free(object, pindex, FALSE);
870 } else if (page_req != VM_ALLOC_INTERRUPT) {
871 /*
872 * Allocatable from cache (non-interrupt only). On success,
873 * we must free the page and try again, thus ensuring that
874 * cnt.v_*_free_min counters are replenished.
875 */
876 m = vm_page_select_cache(object, pindex);
877 if (m == NULL) {
878 splx(s);
879#if defined(DIAGNOSTIC)
880 if (cnt.v_cache_count > 0)
881 printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count);
882#endif
883 vm_pageout_deficit++;
884 pagedaemon_wakeup();
885 return (NULL);
886 }
887 KASSERT(m->dirty == 0, ("Found dirty cache page %p", m));
888 vm_page_busy(m);
889 vm_page_protect(m, VM_PROT_NONE);
890 vm_page_free(m);
891 goto loop;
892 } else {
893 /*
894 * Not allocatable from cache from interrupt, give up.
895 */
896 splx(s);
897 vm_pageout_deficit++;
898 pagedaemon_wakeup();
899 return (NULL);
900 }
901
902 /*
903 * At this point we had better have found a good page.
904 */
905
906 KASSERT(
907 m != NULL,
908 ("vm_page_alloc(): missing page on free queue\n")
909 );
910
911 /*
912 * Remove from free queue
913 */
914
915 vm_pageq_remove_nowakeup(m);
916
917 /*
918 * Initialize structure. Only the PG_ZERO flag is inherited.
919 */
920 if (m->flags & PG_ZERO) {
921 vm_page_zero_count--;
922 m->flags = PG_ZERO | PG_BUSY;
923 } else {
924 m->flags = PG_BUSY;
925 }
926 m->wire_count = 0;
927 m->hold_count = 0;
928 m->act_count = 0;
929 m->busy = 0;
930 m->valid = 0;
931 KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m));
932
933 /*
934 * vm_page_insert() is safe prior to the splx(). Note also that
935 * inserting a page here does not insert it into the pmap (which
936 * could cause us to block allocating memory). We cannot block
937 * anywhere.
938 */
939 vm_page_insert(m, object, pindex);
940
941 /*
942 * Don't wakeup too often - wakeup the pageout daemon when
943 * we would be nearly out of memory.
944 */
945 if (vm_paging_needed())
946 pagedaemon_wakeup();
947
948 splx(s);
949 return (m);
950}
951
952/*
953 * vm_wait: (also see VM_WAIT macro)
954 *
955 * Block until free pages are available for allocation
956 * - Called in various places before memory allocations.
957 */
958void
959vm_wait(void)
960{
961 int s;
962
963 s = splvm();
964 if (curproc == pageproc) {
965 vm_pageout_pages_needed = 1;
966 tsleep(&vm_pageout_pages_needed, PSWP, "VMWait", 0);
967 } else {
968 if (!vm_pages_needed) {
969 vm_pages_needed = 1;
970 wakeup(&vm_pages_needed);
971 }
972 tsleep(&cnt.v_free_count, PVM, "vmwait", 0);
973 }
974 splx(s);
975}
976
977/*
978 * vm_waitpfault: (also see VM_WAITPFAULT macro)
979 *
980 * Block until free pages are available for allocation
981 * - Called only in vm_fault so that processes page faulting
982 * can be easily tracked.
983 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing
984 * processes will be able to grab memory first. Do not change
985 * this balance without careful testing first.
986 */
987void
988vm_waitpfault(void)
989{
990 int s;
991
992 s = splvm();
993 if (!vm_pages_needed) {
994 vm_pages_needed = 1;
995 wakeup(&vm_pages_needed);
996 }
997 tsleep(&cnt.v_free_count, PUSER, "pfault", 0);
998 splx(s);
999}
1000
1001/*
1002 * vm_page_activate:
1003 *
1004 * Put the specified page on the active list (if appropriate).
1005 * Ensure that act_count is at least ACT_INIT but do not otherwise
1006 * mess with it.
1007 *
1008 * The page queues must be locked.
1009 * This routine may not block.
1010 */
1011void
1012vm_page_activate(vm_page_t m)
1013{
1014 int s;
1015
1016 GIANT_REQUIRED;
1017 s = splvm();
1018 if (m->queue != PQ_ACTIVE) {
1019 if ((m->queue - m->pc) == PQ_CACHE)
1020 cnt.v_reactivated++;
1021 vm_pageq_remove(m);
1022 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1023 if (m->act_count < ACT_INIT)
1024 m->act_count = ACT_INIT;
1025 vm_pageq_enqueue(PQ_ACTIVE, m);
1026 }
1027 } else {
1028 if (m->act_count < ACT_INIT)
1029 m->act_count = ACT_INIT;
1030 }
1031 splx(s);
1032}
1033
1034/*
1035 * vm_page_free_wakeup:
1036 *
1037 * Helper routine for vm_page_free_toq() and vm_page_cache(). This
1038 * routine is called when a page has been added to the cache or free
1039 * queues.
1040 *
1041 * This routine may not block.
1042 * This routine must be called at splvm()
1043 */
1044static __inline void
1045vm_page_free_wakeup(void)
1046{
1047 /*
1048 * if pageout daemon needs pages, then tell it that there are
1049 * some free.
1050 */
1051 if (vm_pageout_pages_needed &&
1052 cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
1053 wakeup(&vm_pageout_pages_needed);
1054 vm_pageout_pages_needed = 0;
1055 }
1056 /*
1057 * wakeup processes that are waiting on memory if we hit a
1058 * high water mark. And wakeup scheduler process if we have
1059 * lots of memory. this process will swapin processes.
1060 */
1061 if (vm_pages_needed && !vm_page_count_min()) {
1062 vm_pages_needed = 0;
1063 wakeup(&cnt.v_free_count);
1064 }
1065}
1066
1067/*
1068 * vm_page_free_toq:
1069 *
1070 * Returns the given page to the PQ_FREE list,
1071 * disassociating it with any VM object.
1072 *
1073 * Object and page must be locked prior to entry.
1074 * This routine may not block.
1075 */
1076
1077void
1078vm_page_free_toq(vm_page_t m)
1079{
1080 int s;
1081 struct vpgqueues *pq;
1082 vm_object_t object = m->object;
1083
1084 GIANT_REQUIRED;
1085 s = splvm();
1086 cnt.v_tfree++;
1087
1088 if (m->busy || ((m->queue - m->pc) == PQ_FREE)) {
1089 printf(
1090 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n",
1091 (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0,
1092 m->hold_count);
1093 if ((m->queue - m->pc) == PQ_FREE)
1094 panic("vm_page_free: freeing free page");
1095 else
1096 panic("vm_page_free: freeing busy page");
1097 }
1098
1099 /*
1100 * unqueue, then remove page. Note that we cannot destroy
1101 * the page here because we do not want to call the pager's
1102 * callback routine until after we've put the page on the
1103 * appropriate free queue.
1104 */
1105 vm_pageq_remove_nowakeup(m);
1106 vm_page_remove(m);
1107
1108 /*
1109 * If fictitious remove object association and
1110 * return, otherwise delay object association removal.
1111 */
1112 if ((m->flags & PG_FICTITIOUS) != 0) {
1113 splx(s);
1114 return;
1115 }
1116
1117 m->valid = 0;
1118 vm_page_undirty(m);
1119
1120 if (m->wire_count != 0) {
1121 if (m->wire_count > 1) {
1122 panic("vm_page_free: invalid wire count (%d), pindex: 0x%lx",
1123 m->wire_count, (long)m->pindex);
1124 }
1125 panic("vm_page_free: freeing wired page\n");
1126 }
1127
1128 /*
1129 * If we've exhausted the object's resident pages we want to free
1130 * it up.
1131 */
1132 if (object &&
1133 (object->type == OBJT_VNODE) &&
1134 ((object->flags & OBJ_DEAD) == 0)
1135 ) {
1136 struct vnode *vp = (struct vnode *)object->handle;
1137
1138 if (vp && VSHOULDFREE(vp))
1139 vfree(vp);
1140 }
1141
1142 /*
1143 * Clear the UNMANAGED flag when freeing an unmanaged page.
1144 */
1145 if (m->flags & PG_UNMANAGED) {
1146 m->flags &= ~PG_UNMANAGED;
1147 } else {
1148#ifdef __alpha__
1149 pmap_page_is_free(m);
1150#endif
1151 }
1152
1153 if (m->hold_count != 0) {
1154 m->flags &= ~PG_ZERO;
1155 m->queue = PQ_HOLD;
1156 } else
1157 m->queue = PQ_FREE + m->pc;
1158 pq = &vm_page_queues[m->queue];
1159 pq->lcnt++;
1160 ++(*pq->cnt);
1161
1162 /*
1163 * Put zero'd pages on the end ( where we look for zero'd pages
1164 * first ) and non-zerod pages at the head.
1165 */
1166 if (m->flags & PG_ZERO) {
1167 TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
1168 ++vm_page_zero_count;
1169 } else {
1170 TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
1171 }
1172 vm_page_free_wakeup();
1173 splx(s);
1174}
1175
1176/*
1177 * vm_page_unmanage:
1178 *
1179 * Prevent PV management from being done on the page. The page is
1180 * removed from the paging queues as if it were wired, and as a
1181 * consequence of no longer being managed the pageout daemon will not
1182 * touch it (since there is no way to locate the pte mappings for the
1183 * page). madvise() calls that mess with the pmap will also no longer
1184 * operate on the page.
1185 *
1186 * Beyond that the page is still reasonably 'normal'. Freeing the page
1187 * will clear the flag.
1188 *
1189 * This routine is used by OBJT_PHYS objects - objects using unswappable
1190 * physical memory as backing store rather then swap-backed memory and
1191 * will eventually be extended to support 4MB unmanaged physical
1192 * mappings.
1193 */
1194void
1195vm_page_unmanage(vm_page_t m)
1196{
1197 int s;
1198
1199 s = splvm();
1200 if ((m->flags & PG_UNMANAGED) == 0) {
1201 if (m->wire_count == 0)
1202 vm_pageq_remove(m);
1203 }
1204 vm_page_flag_set(m, PG_UNMANAGED);
1205 splx(s);
1206}
1207
1208/*
1209 * vm_page_wire:
1210 *
1211 * Mark this page as wired down by yet
1212 * another map, removing it from paging queues
1213 * as necessary.
1214 *
1215 * The page queues must be locked.
1216 * This routine may not block.
1217 */
1218void
1219vm_page_wire(vm_page_t m)
1220{
1221 int s;
1222
1223 /*
1224 * Only bump the wire statistics if the page is not already wired,
1225 * and only unqueue the page if it is on some queue (if it is unmanaged
1226 * it is already off the queues).
1227 */
1228 s = splvm();
1229 if (m->wire_count == 0) {
1230 if ((m->flags & PG_UNMANAGED) == 0)
1231 vm_pageq_remove(m);
1232 cnt.v_wire_count++;
1233 }
1234 m->wire_count++;
1235 KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
1236 splx(s);
1237 vm_page_flag_set(m, PG_MAPPED);
1238}
1239
1240/*
1241 * vm_page_unwire:
1242 *
1243 * Release one wiring of this page, potentially
1244 * enabling it to be paged again.
1245 *
1246 * Many pages placed on the inactive queue should actually go
1247 * into the cache, but it is difficult to figure out which. What
1248 * we do instead, if the inactive target is well met, is to put
1249 * clean pages at the head of the inactive queue instead of the tail.
1250 * This will cause them to be moved to the cache more quickly and
1251 * if not actively re-referenced, freed more quickly. If we just
1252 * stick these pages at the end of the inactive queue, heavy filesystem
1253 * meta-data accesses can cause an unnecessary paging load on memory bound
1254 * processes. This optimization causes one-time-use metadata to be
1255 * reused more quickly.
1256 *
1257 * BUT, if we are in a low-memory situation we have no choice but to
1258 * put clean pages on the cache queue.
1259 *
1260 * A number of routines use vm_page_unwire() to guarantee that the page
1261 * will go into either the inactive or active queues, and will NEVER
1262 * be placed in the cache - for example, just after dirtying a page.
1263 * dirty pages in the cache are not allowed.
1264 *
1265 * The page queues must be locked.
1266 * This routine may not block.
1267 */
1268void
1269vm_page_unwire(vm_page_t m, int activate)
1270{
1271 int s;
1272
1273 s = splvm();
1274
1275 if (m->wire_count > 0) {
1276 m->wire_count--;
1277 if (m->wire_count == 0) {
1278 cnt.v_wire_count--;
1279 if (m->flags & PG_UNMANAGED) {
1280 ;
1281 } else if (activate)
1282 vm_pageq_enqueue(PQ_ACTIVE, m);
1283 else {
1284 vm_page_flag_clear(m, PG_WINATCFLS);
1285 vm_pageq_enqueue(PQ_INACTIVE, m);
1286 }
1287 }
1288 } else {
1289 panic("vm_page_unwire: invalid wire count: %d\n", m->wire_count);
1290 }
1291 splx(s);
1292}
1293
1294
1295/*
1296 * Move the specified page to the inactive queue. If the page has
1297 * any associated swap, the swap is deallocated.
1298 *
1299 * Normally athead is 0 resulting in LRU operation. athead is set
1300 * to 1 if we want this page to be 'as if it were placed in the cache',
1301 * except without unmapping it from the process address space.
1302 *
1303 * This routine may not block.
1304 */
1305static __inline void
1306_vm_page_deactivate(vm_page_t m, int athead)
1307{
1308 int s;
1309
1310 GIANT_REQUIRED;
1311 /*
1312 * Ignore if already inactive.
1313 */
1314 if (m->queue == PQ_INACTIVE)
1315 return;
1316
1317 s = splvm();
1318 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1319 if ((m->queue - m->pc) == PQ_CACHE)
1320 cnt.v_reactivated++;
1321 vm_page_flag_clear(m, PG_WINATCFLS);
1322 vm_pageq_remove(m);
1323 if (athead)
1324 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1325 else
1326 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1327 m->queue = PQ_INACTIVE;
1328 vm_page_queues[PQ_INACTIVE].lcnt++;
1329 cnt.v_inactive_count++;
1330 }
1331 splx(s);
1332}
1333
1334void
1335vm_page_deactivate(vm_page_t m)
1336{
1337 _vm_page_deactivate(m, 0);
1338}
1339
1340/*
1341 * vm_page_try_to_cache:
1342 *
1343 * Returns 0 on failure, 1 on success
1344 */
1345int
1346vm_page_try_to_cache(vm_page_t m)
1347{
1348 GIANT_REQUIRED;
1349
1350 if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1351 (m->flags & (PG_BUSY|PG_UNMANAGED))) {
1352 return (0);
1353 }
1354 vm_page_test_dirty(m);
1355 if (m->dirty)
1356 return (0);
1357 vm_page_cache(m);
1358 return (1);
1359}
1360
1361/*
1362 * vm_page_try_to_free()
1363 *
1364 * Attempt to free the page. If we cannot free it, we do nothing.
1365 * 1 is returned on success, 0 on failure.
1366 */
1367int
1368vm_page_try_to_free(vm_page_t m)
1369{
1370 if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1371 (m->flags & (PG_BUSY|PG_UNMANAGED))) {
1372 return (0);
1373 }
1374 vm_page_test_dirty(m);
1375 if (m->dirty)
1376 return (0);
1377 vm_page_busy(m);
1378 vm_page_protect(m, VM_PROT_NONE);
1379 vm_page_free(m);
1380 return (1);
1381}
1382
1383/*
1384 * vm_page_cache
1385 *
1386 * Put the specified page onto the page cache queue (if appropriate).
1387 *
1388 * This routine may not block.
1389 */
1390void
1391vm_page_cache(vm_page_t m)
1392{
1393 int s;
1394
1395 GIANT_REQUIRED;
1396 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || m->wire_count) {
1397 printf("vm_page_cache: attempting to cache busy page\n");
1398 return;
1399 }
1400 if ((m->queue - m->pc) == PQ_CACHE)
1401 return;
1402
1403 /*
1404 * Remove all pmaps and indicate that the page is not
1405 * writeable or mapped.
1406 */
1407 vm_page_protect(m, VM_PROT_NONE);
1408 if (m->dirty != 0) {
1409 panic("vm_page_cache: caching a dirty page, pindex: %ld",
1410 (long)m->pindex);
1411 }
1412 s = splvm();
1413 vm_pageq_remove_nowakeup(m);
1414 vm_pageq_enqueue(PQ_CACHE + m->pc, m);
1415 vm_page_free_wakeup();
1416 splx(s);
1417}
1418
1419/*
1420 * vm_page_dontneed
1421 *
1422 * Cache, deactivate, or do nothing as appropriate. This routine
1423 * is typically used by madvise() MADV_DONTNEED.
1424 *
1425 * Generally speaking we want to move the page into the cache so
1426 * it gets reused quickly. However, this can result in a silly syndrome
1427 * due to the page recycling too quickly. Small objects will not be
1428 * fully cached. On the otherhand, if we move the page to the inactive
1429 * queue we wind up with a problem whereby very large objects
1430 * unnecessarily blow away our inactive and cache queues.
1431 *
1432 * The solution is to move the pages based on a fixed weighting. We
1433 * either leave them alone, deactivate them, or move them to the cache,
1434 * where moving them to the cache has the highest weighting.
1435 * By forcing some pages into other queues we eventually force the
1436 * system to balance the queues, potentially recovering other unrelated
1437 * space from active. The idea is to not force this to happen too
1438 * often.
1439 */
1440void
1441vm_page_dontneed(vm_page_t m)
1442{
1443 static int dnweight;
1444 int dnw;
1445 int head;
1446
1447 GIANT_REQUIRED;
1448 dnw = ++dnweight;
1449
1450 /*
1451 * occassionally leave the page alone
1452 */
1453 if ((dnw & 0x01F0) == 0 ||
1454 m->queue == PQ_INACTIVE ||
1455 m->queue - m->pc == PQ_CACHE
1456 ) {
1457 if (m->act_count >= ACT_INIT)
1458 --m->act_count;
1459 return;
1460 }
1461
1462 if (m->dirty == 0)
1463 vm_page_test_dirty(m);
1464
1465 if (m->dirty || (dnw & 0x0070) == 0) {
1466 /*
1467 * Deactivate the page 3 times out of 32.
1468 */
1469 head = 0;
1470 } else {
1471 /*
1472 * Cache the page 28 times out of every 32. Note that
1473 * the page is deactivated instead of cached, but placed
1474 * at the head of the queue instead of the tail.
1475 */
1476 head = 1;
1477 }
1478 _vm_page_deactivate(m, head);
1479}
1480
1481/*
1482 * Grab a page, waiting until we are waken up due to the page
1483 * changing state. We keep on waiting, if the page continues
1484 * to be in the object. If the page doesn't exist, allocate it.
1485 *
1486 * This routine may block.
1487 */
1488vm_page_t
1489vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
1490{
1491 vm_page_t m;
1492 int s, generation;
1493
1494 GIANT_REQUIRED;
1495retrylookup:
1496 if ((m = vm_page_lookup(object, pindex)) != NULL) {
1497 if (m->busy || (m->flags & PG_BUSY)) {
1498 generation = object->generation;
1499
1500 s = splvm();
1501 while ((object->generation == generation) &&
1502 (m->busy || (m->flags & PG_BUSY))) {
1503 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
1504 tsleep(m, PVM, "pgrbwt", 0);
1505 if ((allocflags & VM_ALLOC_RETRY) == 0) {
1506 splx(s);
1507 return NULL;
1508 }
1509 }
1510 splx(s);
1511 goto retrylookup;
1512 } else {
1513 vm_page_busy(m);
1514 return m;
1515 }
1516 }
1517
1518 m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY);
1519 if (m == NULL) {
1520 VM_WAIT;
1521 if ((allocflags & VM_ALLOC_RETRY) == 0)
1522 return NULL;
1523 goto retrylookup;
1524 }
1525
1526 return m;
1527}
1528
1529/*
1530 * Mapping function for valid bits or for dirty bits in
1531 * a page. May not block.
1532 *
1533 * Inputs are required to range within a page.
1534 */
1535__inline int
1536vm_page_bits(int base, int size)
1537{
1538 int first_bit;
1539 int last_bit;
1540
1541 KASSERT(
1542 base + size <= PAGE_SIZE,
1543 ("vm_page_bits: illegal base/size %d/%d", base, size)
1544 );
1545
1546 if (size == 0) /* handle degenerate case */
1547 return (0);
1548
1549 first_bit = base >> DEV_BSHIFT;
1550 last_bit = (base + size - 1) >> DEV_BSHIFT;
1551
1552 return ((2 << last_bit) - (1 << first_bit));
1553}
1554
1555/*
1556 * vm_page_set_validclean:
1557 *
1558 * Sets portions of a page valid and clean. The arguments are expected
1559 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
1560 * of any partial chunks touched by the range. The invalid portion of
1561 * such chunks will be zero'd.
1562 *
1563 * This routine may not block.
1564 *
1565 * (base + size) must be less then or equal to PAGE_SIZE.
1566 */
1567void
1568vm_page_set_validclean(vm_page_t m, int base, int size)
1569{
1570 int pagebits;
1571 int frag;
1572 int endoff;
1573
1574 GIANT_REQUIRED;
1575 if (size == 0) /* handle degenerate case */
1576 return;
1577
1578 /*
1579 * If the base is not DEV_BSIZE aligned and the valid
1580 * bit is clear, we have to zero out a portion of the
1581 * first block.
1582 */
1583 if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
1584 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0
1585 ) {
1586 pmap_zero_page_area(
1587 VM_PAGE_TO_PHYS(m),
1588 frag,
1589 base - frag
1590 );
1591 }
1592
1593 /*
1594 * If the ending offset is not DEV_BSIZE aligned and the
1595 * valid bit is clear, we have to zero out a portion of
1596 * the last block.
1597 */
1598 endoff = base + size;
1599 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
1600 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
1601 ) {
1602 pmap_zero_page_area(
1603 VM_PAGE_TO_PHYS(m),
1604 endoff,
1605 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))
1606 );
1607 }
1608
1609 /*
1610 * Set valid, clear dirty bits. If validating the entire
1611 * page we can safely clear the pmap modify bit. We also
1612 * use this opportunity to clear the PG_NOSYNC flag. If a process
1613 * takes a write fault on a MAP_NOSYNC memory area the flag will
1614 * be set again.
1615 *
1616 * We set valid bits inclusive of any overlap, but we can only
1617 * clear dirty bits for DEV_BSIZE chunks that are fully within
1618 * the range.
1619 */
1620 pagebits = vm_page_bits(base, size);
1621 m->valid |= pagebits;
1622#if 0 /* NOT YET */
1623 if ((frag = base & (DEV_BSIZE - 1)) != 0) {
1624 frag = DEV_BSIZE - frag;
1625 base += frag;
1626 size -= frag;
1627 if (size < 0)
1628 size = 0;
1629 }
1630 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
1631#endif
1632 m->dirty &= ~pagebits;
1633 if (base == 0 && size == PAGE_SIZE) {
1634 pmap_clear_modify(m);
1635 vm_page_flag_clear(m, PG_NOSYNC);
1636 }
1637}
1638
1639#if 0
1640
1641void
1642vm_page_set_dirty(vm_page_t m, int base, int size)
1643{
1644 m->dirty |= vm_page_bits(base, size);
1645}
1646
1647#endif
1648
1649void
1650vm_page_clear_dirty(vm_page_t m, int base, int size)
1651{
1652 GIANT_REQUIRED;
1653 m->dirty &= ~vm_page_bits(base, size);
1654}
1655
1656/*
1657 * vm_page_set_invalid:
1658 *
1659 * Invalidates DEV_BSIZE'd chunks within a page. Both the
1660 * valid and dirty bits for the effected areas are cleared.
1661 *
1662 * May not block.
1663 */
1664void
1665vm_page_set_invalid(vm_page_t m, int base, int size)
1666{
1667 int bits;
1668
1669 GIANT_REQUIRED;
1670 bits = vm_page_bits(base, size);
1671 m->valid &= ~bits;
1672 m->dirty &= ~bits;
1673 m->object->generation++;
1674}
1675
1676/*
1677 * vm_page_zero_invalid()
1678 *
1679 * The kernel assumes that the invalid portions of a page contain
1680 * garbage, but such pages can be mapped into memory by user code.
1681 * When this occurs, we must zero out the non-valid portions of the
1682 * page so user code sees what it expects.
1683 *
1684 * Pages are most often semi-valid when the end of a file is mapped
1685 * into memory and the file's size is not page aligned.
1686 */
1687void
1688vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
1689{
1690 int b;
1691 int i;
1692
1693 /*
1694 * Scan the valid bits looking for invalid sections that
1695 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the
1696 * valid bit may be set ) have already been zerod by
1697 * vm_page_set_validclean().
1698 */
1699 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
1700 if (i == (PAGE_SIZE / DEV_BSIZE) ||
1701 (m->valid & (1 << i))
1702 ) {
1703 if (i > b) {
1704 pmap_zero_page_area(
1705 VM_PAGE_TO_PHYS(m),
1706 b << DEV_BSHIFT,
1707 (i - b) << DEV_BSHIFT
1708 );
1709 }
1710 b = i + 1;
1711 }
1712 }
1713
1714 /*
1715 * setvalid is TRUE when we can safely set the zero'd areas
1716 * as being valid. We can do this if there are no cache consistancy
1717 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS.
1718 */
1719 if (setvalid)
1720 m->valid = VM_PAGE_BITS_ALL;
1721}
1722
1723/*
1724 * vm_page_is_valid:
1725 *
1726 * Is (partial) page valid? Note that the case where size == 0
1727 * will return FALSE in the degenerate case where the page is
1728 * entirely invalid, and TRUE otherwise.
1729 *
1730 * May not block.
1731 */
1732int
1733vm_page_is_valid(vm_page_t m, int base, int size)
1734{
1735 int bits = vm_page_bits(base, size);
1736
1737 if (m->valid && ((m->valid & bits) == bits))
1738 return 1;
1739 else
1740 return 0;
1741}
1742
1743/*
1744 * update dirty bits from pmap/mmu. May not block.
1745 */
1746void
1747vm_page_test_dirty(vm_page_t m)
1748{
1749 if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
1750 vm_page_dirty(m);
1751 }
1752}
1753
1754#include "opt_ddb.h"
1755#ifdef DDB
1756#include <sys/kernel.h>
1757
1758#include <ddb/ddb.h>
1759
1760DB_SHOW_COMMAND(page, vm_page_print_page_info)
1761{
1762 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1763 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1764 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1765 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1766 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1767 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1768 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1769 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1770 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1771 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1772}
1773
1774DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
1775{
1776 int i;
1777 db_printf("PQ_FREE:");
1778 for (i = 0; i < PQ_L2_SIZE; i++) {
1779 db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
1780 }
1781 db_printf("\n");
1782
1783 db_printf("PQ_CACHE:");
1784 for (i = 0; i < PQ_L2_SIZE; i++) {
1785 db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
1786 }
1787 db_printf("\n");
1788
1789 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
1790 vm_page_queues[PQ_ACTIVE].lcnt,
1791 vm_page_queues[PQ_INACTIVE].lcnt);
1792}
1793#endif /* DDB */