vm_fault.c revision 318716
1/*-
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 *
10 * This code is derived from software contributed to Berkeley by
11 * The Mach Operating System project at Carnegie-Mellon University.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 *    must display the following acknowledgement:
23 *	This product includes software developed by the University of
24 *	California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 *    may be used to endorse or promote products derived from this software
27 *    without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 *	from: @(#)vm_fault.c	8.4 (Berkeley) 1/12/94
42 *
43 *
44 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
45 * All rights reserved.
46 *
47 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
48 *
49 * Permission to use, copy, modify and distribute this software and
50 * its documentation is hereby granted, provided that both the copyright
51 * notice and this permission notice appear in all copies of the
52 * software, derivative works or modified versions, and any portions
53 * thereof, and that both notices appear in supporting documentation.
54 *
55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58 *
59 * Carnegie Mellon requests users of this software to return to
60 *
61 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62 *  School of Computer Science
63 *  Carnegie Mellon University
64 *  Pittsburgh PA 15213-3890
65 *
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
68 */
69
70/*
71 *	Page fault handling module.
72 */
73
74#include <sys/cdefs.h>
75__FBSDID("$FreeBSD: stable/11/sys/vm/vm_fault.c 318716 2017-05-23 07:27:30Z markj $");
76
77#include "opt_ktrace.h"
78#include "opt_vm.h"
79
80#include <sys/param.h>
81#include <sys/systm.h>
82#include <sys/kernel.h>
83#include <sys/lock.h>
84#include <sys/mman.h>
85#include <sys/proc.h>
86#include <sys/racct.h>
87#include <sys/resourcevar.h>
88#include <sys/rwlock.h>
89#include <sys/sysctl.h>
90#include <sys/vmmeter.h>
91#include <sys/vnode.h>
92#ifdef KTRACE
93#include <sys/ktrace.h>
94#endif
95
96#include <vm/vm.h>
97#include <vm/vm_param.h>
98#include <vm/pmap.h>
99#include <vm/vm_map.h>
100#include <vm/vm_object.h>
101#include <vm/vm_page.h>
102#include <vm/vm_pageout.h>
103#include <vm/vm_kern.h>
104#include <vm/vm_pager.h>
105#include <vm/vm_extern.h>
106#include <vm/vm_reserv.h>
107
108#define PFBAK 4
109#define PFFOR 4
110
111#define	VM_FAULT_READ_DEFAULT	(1 + VM_FAULT_READ_AHEAD_INIT)
112#define	VM_FAULT_READ_MAX	(1 + VM_FAULT_READ_AHEAD_MAX)
113
114#define	VM_FAULT_DONTNEED_MIN	1048576
115
116struct faultstate {
117	vm_page_t m;
118	vm_object_t object;
119	vm_pindex_t pindex;
120	vm_page_t first_m;
121	vm_object_t	first_object;
122	vm_pindex_t first_pindex;
123	vm_map_t map;
124	vm_map_entry_t entry;
125	int map_generation;
126	bool lookup_still_valid;
127	struct vnode *vp;
128};
129
130static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr,
131	    int ahead);
132static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
133	    int backward, int forward);
134
135static inline void
136release_page(struct faultstate *fs)
137{
138
139	vm_page_xunbusy(fs->m);
140	vm_page_lock(fs->m);
141	vm_page_deactivate(fs->m);
142	vm_page_unlock(fs->m);
143	fs->m = NULL;
144}
145
146static inline void
147unlock_map(struct faultstate *fs)
148{
149
150	if (fs->lookup_still_valid) {
151		vm_map_lookup_done(fs->map, fs->entry);
152		fs->lookup_still_valid = false;
153	}
154}
155
156static void
157unlock_vp(struct faultstate *fs)
158{
159
160	if (fs->vp != NULL) {
161		vput(fs->vp);
162		fs->vp = NULL;
163	}
164}
165
166static void
167unlock_and_deallocate(struct faultstate *fs)
168{
169
170	vm_object_pip_wakeup(fs->object);
171	VM_OBJECT_WUNLOCK(fs->object);
172	if (fs->object != fs->first_object) {
173		VM_OBJECT_WLOCK(fs->first_object);
174		vm_page_lock(fs->first_m);
175		vm_page_free(fs->first_m);
176		vm_page_unlock(fs->first_m);
177		vm_object_pip_wakeup(fs->first_object);
178		VM_OBJECT_WUNLOCK(fs->first_object);
179		fs->first_m = NULL;
180	}
181	vm_object_deallocate(fs->first_object);
182	unlock_map(fs);
183	unlock_vp(fs);
184}
185
186static void
187vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot,
188    vm_prot_t fault_type, int fault_flags, bool set_wd)
189{
190	bool need_dirty;
191
192	if (((prot & VM_PROT_WRITE) == 0 &&
193	    (fault_flags & VM_FAULT_DIRTY) == 0) ||
194	    (m->oflags & VPO_UNMANAGED) != 0)
195		return;
196
197	VM_OBJECT_ASSERT_LOCKED(m->object);
198
199	need_dirty = ((fault_type & VM_PROT_WRITE) != 0 &&
200	    (fault_flags & VM_FAULT_WIRE) == 0) ||
201	    (fault_flags & VM_FAULT_DIRTY) != 0;
202
203	if (set_wd)
204		vm_object_set_writeable_dirty(m->object);
205	else
206		/*
207		 * If two callers of vm_fault_dirty() with set_wd ==
208		 * FALSE, one for the map entry with MAP_ENTRY_NOSYNC
209		 * flag set, other with flag clear, race, it is
210		 * possible for the no-NOSYNC thread to see m->dirty
211		 * != 0 and not clear VPO_NOSYNC.  Take vm_page lock
212		 * around manipulation of VPO_NOSYNC and
213		 * vm_page_dirty() call, to avoid the race and keep
214		 * m->oflags consistent.
215		 */
216		vm_page_lock(m);
217
218	/*
219	 * If this is a NOSYNC mmap we do not want to set VPO_NOSYNC
220	 * if the page is already dirty to prevent data written with
221	 * the expectation of being synced from not being synced.
222	 * Likewise if this entry does not request NOSYNC then make
223	 * sure the page isn't marked NOSYNC.  Applications sharing
224	 * data should use the same flags to avoid ping ponging.
225	 */
226	if ((entry->eflags & MAP_ENTRY_NOSYNC) != 0) {
227		if (m->dirty == 0) {
228			m->oflags |= VPO_NOSYNC;
229		}
230	} else {
231		m->oflags &= ~VPO_NOSYNC;
232	}
233
234	/*
235	 * If the fault is a write, we know that this page is being
236	 * written NOW so dirty it explicitly to save on
237	 * pmap_is_modified() calls later.
238	 *
239	 * Also tell the backing pager, if any, that it should remove
240	 * any swap backing since the page is now dirty.
241	 */
242	if (need_dirty)
243		vm_page_dirty(m);
244	if (!set_wd)
245		vm_page_unlock(m);
246	if (need_dirty)
247		vm_pager_page_unswapped(m);
248}
249
250static void
251vm_fault_fill_hold(vm_page_t *m_hold, vm_page_t m)
252{
253
254	if (m_hold != NULL) {
255		*m_hold = m;
256		vm_page_lock(m);
257		vm_page_hold(m);
258		vm_page_unlock(m);
259	}
260}
261
262/*
263 * Unlocks fs.first_object and fs.map on success.
264 */
265static int
266vm_fault_soft_fast(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot,
267    int fault_type, int fault_flags, boolean_t wired, vm_page_t *m_hold)
268{
269	vm_page_t m;
270	int rv;
271
272	MPASS(fs->vp == NULL);
273	m = vm_page_lookup(fs->first_object, fs->first_pindex);
274	/* A busy page can be mapped for read|execute access. */
275	if (m == NULL || ((prot & VM_PROT_WRITE) != 0 &&
276	    vm_page_busied(m)) || m->valid != VM_PAGE_BITS_ALL)
277		return (KERN_FAILURE);
278	rv = pmap_enter(fs->map->pmap, vaddr, m, prot, fault_type |
279	    PMAP_ENTER_NOSLEEP | (wired ? PMAP_ENTER_WIRED : 0), 0);
280	if (rv != KERN_SUCCESS)
281		return (rv);
282	vm_fault_fill_hold(m_hold, m);
283	vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags, false);
284	VM_OBJECT_RUNLOCK(fs->first_object);
285	if (!wired)
286		vm_fault_prefault(fs, vaddr, PFBAK, PFFOR);
287	vm_map_lookup_done(fs->map, fs->entry);
288	curthread->td_ru.ru_minflt++;
289	return (KERN_SUCCESS);
290}
291
292static void
293vm_fault_restore_map_lock(struct faultstate *fs)
294{
295
296	VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
297	MPASS(fs->first_object->paging_in_progress > 0);
298
299	if (!vm_map_trylock_read(fs->map)) {
300		VM_OBJECT_WUNLOCK(fs->first_object);
301		vm_map_lock_read(fs->map);
302		VM_OBJECT_WLOCK(fs->first_object);
303	}
304	fs->lookup_still_valid = true;
305}
306
307static void
308vm_fault_populate_check_page(vm_page_t m)
309{
310
311	/*
312	 * Check each page to ensure that the pager is obeying the
313	 * interface: the page must be installed in the object, fully
314	 * valid, and exclusively busied.
315	 */
316	MPASS(m != NULL);
317	MPASS(m->valid == VM_PAGE_BITS_ALL);
318	MPASS(vm_page_xbusied(m));
319}
320
321static void
322vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first,
323    vm_pindex_t last)
324{
325	vm_page_t m;
326	vm_pindex_t pidx;
327
328	VM_OBJECT_ASSERT_WLOCKED(object);
329	MPASS(first <= last);
330	for (pidx = first, m = vm_page_lookup(object, pidx);
331	    pidx <= last; pidx++, m = vm_page_next(m)) {
332		vm_fault_populate_check_page(m);
333		vm_page_lock(m);
334		vm_page_deactivate(m);
335		vm_page_unlock(m);
336		vm_page_xunbusy(m);
337	}
338}
339
340static int
341vm_fault_populate(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot,
342    int fault_type, int fault_flags, boolean_t wired, vm_page_t *m_hold)
343{
344	vm_page_t m;
345	vm_pindex_t map_first, map_last, pager_first, pager_last, pidx;
346	int rv;
347
348	MPASS(fs->object == fs->first_object);
349	VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
350	MPASS(fs->first_object->paging_in_progress > 0);
351	MPASS(fs->first_object->backing_object == NULL);
352	MPASS(fs->lookup_still_valid);
353
354	pager_first = OFF_TO_IDX(fs->entry->offset);
355	pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1;
356	unlock_map(fs);
357	unlock_vp(fs);
358
359	/*
360	 * Call the pager (driver) populate() method.
361	 *
362	 * There is no guarantee that the method will be called again
363	 * if the current fault is for read, and a future fault is
364	 * for write.  Report the entry's maximum allowed protection
365	 * to the driver.
366	 */
367	rv = vm_pager_populate(fs->first_object, fs->first_pindex,
368	    fault_type, fs->entry->max_protection, &pager_first, &pager_last);
369
370	VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
371	if (rv == VM_PAGER_BAD) {
372		/*
373		 * VM_PAGER_BAD is the backdoor for a pager to request
374		 * normal fault handling.
375		 */
376		vm_fault_restore_map_lock(fs);
377		if (fs->map->timestamp != fs->map_generation)
378			return (KERN_RESOURCE_SHORTAGE); /* RetryFault */
379		return (KERN_NOT_RECEIVER);
380	}
381	if (rv != VM_PAGER_OK)
382		return (KERN_FAILURE); /* AKA SIGSEGV */
383
384	/* Ensure that the driver is obeying the interface. */
385	MPASS(pager_first <= pager_last);
386	MPASS(fs->first_pindex <= pager_last);
387	MPASS(fs->first_pindex >= pager_first);
388	MPASS(pager_last < fs->first_object->size);
389
390	vm_fault_restore_map_lock(fs);
391	if (fs->map->timestamp != fs->map_generation) {
392		vm_fault_populate_cleanup(fs->first_object, pager_first,
393		    pager_last);
394		return (KERN_RESOURCE_SHORTAGE); /* RetryFault */
395	}
396
397	/*
398	 * The map is unchanged after our last unlock.  Process the fault.
399	 *
400	 * The range [pager_first, pager_last] that is given to the
401	 * pager is only a hint.  The pager may populate any range
402	 * within the object that includes the requested page index.
403	 * In case the pager expanded the range, clip it to fit into
404	 * the map entry.
405	 */
406	map_first = OFF_TO_IDX(fs->entry->offset);
407	if (map_first > pager_first) {
408		vm_fault_populate_cleanup(fs->first_object, pager_first,
409		    map_first - 1);
410		pager_first = map_first;
411	}
412	map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1;
413	if (map_last < pager_last) {
414		vm_fault_populate_cleanup(fs->first_object, map_last + 1,
415		    pager_last);
416		pager_last = map_last;
417	}
418	for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx);
419	    pidx <= pager_last; pidx++, m = vm_page_next(m)) {
420		vm_fault_populate_check_page(m);
421		vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags,
422		    true);
423		VM_OBJECT_WUNLOCK(fs->first_object);
424		pmap_enter(fs->map->pmap, fs->entry->start + IDX_TO_OFF(pidx) -
425		    fs->entry->offset, m, prot, fault_type | (wired ?
426		    PMAP_ENTER_WIRED : 0), 0);
427		VM_OBJECT_WLOCK(fs->first_object);
428		if (pidx == fs->first_pindex)
429			vm_fault_fill_hold(m_hold, m);
430		vm_page_lock(m);
431		if ((fault_flags & VM_FAULT_WIRE) != 0) {
432			KASSERT(wired, ("VM_FAULT_WIRE && !wired"));
433			vm_page_wire(m);
434		} else {
435			vm_page_activate(m);
436		}
437		vm_page_unlock(m);
438		vm_page_xunbusy(m);
439	}
440	curthread->td_ru.ru_majflt++;
441	return (KERN_SUCCESS);
442}
443
444/*
445 *	vm_fault:
446 *
447 *	Handle a page fault occurring at the given address,
448 *	requiring the given permissions, in the map specified.
449 *	If successful, the page is inserted into the
450 *	associated physical map.
451 *
452 *	NOTE: the given address should be truncated to the
453 *	proper page address.
454 *
455 *	KERN_SUCCESS is returned if the page fault is handled; otherwise,
456 *	a standard error specifying why the fault is fatal is returned.
457 *
458 *	The map in question must be referenced, and remains so.
459 *	Caller may hold no locks.
460 */
461int
462vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
463    int fault_flags)
464{
465	struct thread *td;
466	int result;
467
468	td = curthread;
469	if ((td->td_pflags & TDP_NOFAULTING) != 0)
470		return (KERN_PROTECTION_FAILURE);
471#ifdef KTRACE
472	if (map != kernel_map && KTRPOINT(td, KTR_FAULT))
473		ktrfault(vaddr, fault_type);
474#endif
475	result = vm_fault_hold(map, trunc_page(vaddr), fault_type, fault_flags,
476	    NULL);
477#ifdef KTRACE
478	if (map != kernel_map && KTRPOINT(td, KTR_FAULTEND))
479		ktrfaultend(result);
480#endif
481	return (result);
482}
483
484int
485vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type,
486    int fault_flags, vm_page_t *m_hold)
487{
488	struct faultstate fs;
489	struct vnode *vp;
490	vm_object_t next_object, retry_object;
491	vm_offset_t e_end, e_start;
492	vm_pindex_t retry_pindex;
493	vm_prot_t prot, retry_prot;
494	int ahead, alloc_req, behind, cluster_offset, error, era, faultcount;
495	int locked, nera, result, rv;
496	u_char behavior;
497	boolean_t wired;	/* Passed by reference. */
498	bool dead, growstack, hardfault, is_first_object_locked;
499
500	PCPU_INC(cnt.v_vm_faults);
501	fs.vp = NULL;
502	faultcount = 0;
503	nera = -1;
504	growstack = true;
505	hardfault = false;
506
507RetryFault:;
508
509	/*
510	 * Find the backing store object and offset into it to begin the
511	 * search.
512	 */
513	fs.map = map;
514	result = vm_map_lookup(&fs.map, vaddr, fault_type, &fs.entry,
515	    &fs.first_object, &fs.first_pindex, &prot, &wired);
516	if (result != KERN_SUCCESS) {
517		if (growstack && result == KERN_INVALID_ADDRESS &&
518		    map != kernel_map) {
519			result = vm_map_growstack(curproc, vaddr);
520			if (result != KERN_SUCCESS)
521				return (KERN_FAILURE);
522			growstack = false;
523			goto RetryFault;
524		}
525		unlock_vp(&fs);
526		return (result);
527	}
528
529	fs.map_generation = fs.map->timestamp;
530
531	if (fs.entry->eflags & MAP_ENTRY_NOFAULT) {
532		panic("vm_fault: fault on nofault entry, addr: %lx",
533		    (u_long)vaddr);
534	}
535
536	if (fs.entry->eflags & MAP_ENTRY_IN_TRANSITION &&
537	    fs.entry->wiring_thread != curthread) {
538		vm_map_unlock_read(fs.map);
539		vm_map_lock(fs.map);
540		if (vm_map_lookup_entry(fs.map, vaddr, &fs.entry) &&
541		    (fs.entry->eflags & MAP_ENTRY_IN_TRANSITION)) {
542			unlock_vp(&fs);
543			fs.entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
544			vm_map_unlock_and_wait(fs.map, 0);
545		} else
546			vm_map_unlock(fs.map);
547		goto RetryFault;
548	}
549
550	if (wired)
551		fault_type = prot | (fault_type & VM_PROT_COPY);
552	else
553		KASSERT((fault_flags & VM_FAULT_WIRE) == 0,
554		    ("!wired && VM_FAULT_WIRE"));
555
556	/*
557	 * Try to avoid lock contention on the top-level object through
558	 * special-case handling of some types of page faults, specifically,
559	 * those that are both (1) mapping an existing page from the top-
560	 * level object and (2) not having to mark that object as containing
561	 * dirty pages.  Under these conditions, a read lock on the top-level
562	 * object suffices, allowing multiple page faults of a similar type to
563	 * run in parallel on the same top-level object.
564	 */
565	if (fs.vp == NULL /* avoid locked vnode leak */ &&
566	    (fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0 &&
567	    /* avoid calling vm_object_set_writeable_dirty() */
568	    ((prot & VM_PROT_WRITE) == 0 ||
569	    (fs.first_object->type != OBJT_VNODE &&
570	    (fs.first_object->flags & OBJ_TMPFS_NODE) == 0) ||
571	    (fs.first_object->flags & OBJ_MIGHTBEDIRTY) != 0)) {
572		VM_OBJECT_RLOCK(fs.first_object);
573		if ((prot & VM_PROT_WRITE) == 0 ||
574		    (fs.first_object->type != OBJT_VNODE &&
575		    (fs.first_object->flags & OBJ_TMPFS_NODE) == 0) ||
576		    (fs.first_object->flags & OBJ_MIGHTBEDIRTY) != 0) {
577			rv = vm_fault_soft_fast(&fs, vaddr, prot, fault_type,
578			    fault_flags, wired, m_hold);
579			if (rv == KERN_SUCCESS)
580				return (rv);
581		}
582		if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) {
583			VM_OBJECT_RUNLOCK(fs.first_object);
584			VM_OBJECT_WLOCK(fs.first_object);
585		}
586	} else {
587		VM_OBJECT_WLOCK(fs.first_object);
588	}
589
590	/*
591	 * Make a reference to this object to prevent its disposal while we
592	 * are messing with it.  Once we have the reference, the map is free
593	 * to be diddled.  Since objects reference their shadows (and copies),
594	 * they will stay around as well.
595	 *
596	 * Bump the paging-in-progress count to prevent size changes (e.g.
597	 * truncation operations) during I/O.
598	 */
599	vm_object_reference_locked(fs.first_object);
600	vm_object_pip_add(fs.first_object, 1);
601
602	fs.lookup_still_valid = true;
603
604	fs.first_m = NULL;
605
606	/*
607	 * Search for the page at object/offset.
608	 */
609	fs.object = fs.first_object;
610	fs.pindex = fs.first_pindex;
611	while (TRUE) {
612		/*
613		 * If the object is marked for imminent termination,
614		 * we retry here, since the collapse pass has raced
615		 * with us.  Otherwise, if we see terminally dead
616		 * object, return fail.
617		 */
618		if ((fs.object->flags & OBJ_DEAD) != 0) {
619			dead = fs.object->type == OBJT_DEAD;
620			unlock_and_deallocate(&fs);
621			if (dead)
622				return (KERN_PROTECTION_FAILURE);
623			pause("vmf_de", 1);
624			goto RetryFault;
625		}
626
627		/*
628		 * See if page is resident
629		 */
630		fs.m = vm_page_lookup(fs.object, fs.pindex);
631		if (fs.m != NULL) {
632			/*
633			 * Wait/Retry if the page is busy.  We have to do this
634			 * if the page is either exclusive or shared busy
635			 * because the vm_pager may be using read busy for
636			 * pageouts (and even pageins if it is the vnode
637			 * pager), and we could end up trying to pagein and
638			 * pageout the same page simultaneously.
639			 *
640			 * We can theoretically allow the busy case on a read
641			 * fault if the page is marked valid, but since such
642			 * pages are typically already pmap'd, putting that
643			 * special case in might be more effort then it is
644			 * worth.  We cannot under any circumstances mess
645			 * around with a shared busied page except, perhaps,
646			 * to pmap it.
647			 */
648			if (vm_page_busied(fs.m)) {
649				/*
650				 * Reference the page before unlocking and
651				 * sleeping so that the page daemon is less
652				 * likely to reclaim it.
653				 */
654				vm_page_aflag_set(fs.m, PGA_REFERENCED);
655				if (fs.object != fs.first_object) {
656					if (!VM_OBJECT_TRYWLOCK(
657					    fs.first_object)) {
658						VM_OBJECT_WUNLOCK(fs.object);
659						VM_OBJECT_WLOCK(fs.first_object);
660						VM_OBJECT_WLOCK(fs.object);
661					}
662					vm_page_lock(fs.first_m);
663					vm_page_free(fs.first_m);
664					vm_page_unlock(fs.first_m);
665					vm_object_pip_wakeup(fs.first_object);
666					VM_OBJECT_WUNLOCK(fs.first_object);
667					fs.first_m = NULL;
668				}
669				unlock_map(&fs);
670				if (fs.m == vm_page_lookup(fs.object,
671				    fs.pindex)) {
672					vm_page_sleep_if_busy(fs.m, "vmpfw");
673				}
674				vm_object_pip_wakeup(fs.object);
675				VM_OBJECT_WUNLOCK(fs.object);
676				PCPU_INC(cnt.v_intrans);
677				vm_object_deallocate(fs.first_object);
678				goto RetryFault;
679			}
680			vm_page_lock(fs.m);
681			vm_page_remque(fs.m);
682			vm_page_unlock(fs.m);
683
684			/*
685			 * Mark page busy for other processes, and the
686			 * pagedaemon.  If it still isn't completely valid
687			 * (readable), jump to readrest, else break-out ( we
688			 * found the page ).
689			 */
690			vm_page_xbusy(fs.m);
691			if (fs.m->valid != VM_PAGE_BITS_ALL)
692				goto readrest;
693			break;
694		}
695		KASSERT(fs.m == NULL, ("fs.m should be NULL, not %p", fs.m));
696
697		/*
698		 * Page is not resident.  If the pager might contain the page
699		 * or this is the beginning of the search, allocate a new
700		 * page.  (Default objects are zero-fill, so there is no real
701		 * pager for them.)
702		 */
703		if (fs.object->type != OBJT_DEFAULT ||
704		    fs.object == fs.first_object) {
705			if (fs.pindex >= fs.object->size) {
706				unlock_and_deallocate(&fs);
707				return (KERN_PROTECTION_FAILURE);
708			}
709
710			if (fs.object == fs.first_object &&
711			    (fs.first_object->flags & OBJ_POPULATE) != 0 &&
712			    fs.first_object->shadow_count == 0) {
713				rv = vm_fault_populate(&fs, vaddr, prot,
714				    fault_type, fault_flags, wired, m_hold);
715				switch (rv) {
716				case KERN_SUCCESS:
717				case KERN_FAILURE:
718					unlock_and_deallocate(&fs);
719					return (rv);
720				case KERN_RESOURCE_SHORTAGE:
721					unlock_and_deallocate(&fs);
722					goto RetryFault;
723				case KERN_NOT_RECEIVER:
724					/*
725					 * Pager's populate() method
726					 * returned VM_PAGER_BAD.
727					 */
728					break;
729				default:
730					panic("inconsistent return codes");
731				}
732			}
733
734			/*
735			 * Allocate a new page for this object/offset pair.
736			 *
737			 * Unlocked read of the p_flag is harmless. At
738			 * worst, the P_KILLED might be not observed
739			 * there, and allocation can fail, causing
740			 * restart and new reading of the p_flag.
741			 */
742			if (!vm_page_count_severe() || P_KILLED(curproc)) {
743#if VM_NRESERVLEVEL > 0
744				vm_object_color(fs.object, atop(vaddr) -
745				    fs.pindex);
746#endif
747				alloc_req = P_KILLED(curproc) ?
748				    VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL;
749				if (fs.object->type != OBJT_VNODE &&
750				    fs.object->backing_object == NULL)
751					alloc_req |= VM_ALLOC_ZERO;
752				fs.m = vm_page_alloc(fs.object, fs.pindex,
753				    alloc_req);
754			}
755			if (fs.m == NULL) {
756				unlock_and_deallocate(&fs);
757				VM_WAITPFAULT;
758				goto RetryFault;
759			}
760		}
761
762readrest:
763		/*
764		 * At this point, we have either allocated a new page or found
765		 * an existing page that is only partially valid.
766		 *
767		 * We hold a reference on the current object and the page is
768		 * exclusive busied.
769		 */
770
771		/*
772		 * If the pager for the current object might have the page,
773		 * then determine the number of additional pages to read and
774		 * potentially reprioritize previously read pages for earlier
775		 * reclamation.  These operations should only be performed
776		 * once per page fault.  Even if the current pager doesn't
777		 * have the page, the number of additional pages to read will
778		 * apply to subsequent objects in the shadow chain.
779		 */
780		if (fs.object->type != OBJT_DEFAULT && nera == -1 &&
781		    !P_KILLED(curproc)) {
782			KASSERT(fs.lookup_still_valid, ("map unlocked"));
783			era = fs.entry->read_ahead;
784			behavior = vm_map_entry_behavior(fs.entry);
785			if (behavior == MAP_ENTRY_BEHAV_RANDOM) {
786				nera = 0;
787			} else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) {
788				nera = VM_FAULT_READ_AHEAD_MAX;
789				if (vaddr == fs.entry->next_read)
790					vm_fault_dontneed(&fs, vaddr, nera);
791			} else if (vaddr == fs.entry->next_read) {
792				/*
793				 * This is a sequential fault.  Arithmetically
794				 * increase the requested number of pages in
795				 * the read-ahead window.  The requested
796				 * number of pages is "# of sequential faults
797				 * x (read ahead min + 1) + read ahead min"
798				 */
799				nera = VM_FAULT_READ_AHEAD_MIN;
800				if (era > 0) {
801					nera += era + 1;
802					if (nera > VM_FAULT_READ_AHEAD_MAX)
803						nera = VM_FAULT_READ_AHEAD_MAX;
804				}
805				if (era == VM_FAULT_READ_AHEAD_MAX)
806					vm_fault_dontneed(&fs, vaddr, nera);
807			} else {
808				/*
809				 * This is a non-sequential fault.
810				 */
811				nera = 0;
812			}
813			if (era != nera) {
814				/*
815				 * A read lock on the map suffices to update
816				 * the read ahead count safely.
817				 */
818				fs.entry->read_ahead = nera;
819			}
820
821			/*
822			 * Prepare for unlocking the map.  Save the map
823			 * entry's start and end addresses, which are used to
824			 * optimize the size of the pager operation below.
825			 * Even if the map entry's addresses change after
826			 * unlocking the map, using the saved addresses is
827			 * safe.
828			 */
829			e_start = fs.entry->start;
830			e_end = fs.entry->end;
831		}
832
833		/*
834		 * Call the pager to retrieve the page if there is a chance
835		 * that the pager has it, and potentially retrieve additional
836		 * pages at the same time.
837		 */
838		if (fs.object->type != OBJT_DEFAULT) {
839			/*
840			 * Release the map lock before locking the vnode or
841			 * sleeping in the pager.  (If the current object has
842			 * a shadow, then an earlier iteration of this loop
843			 * may have already unlocked the map.)
844			 */
845			unlock_map(&fs);
846
847			if (fs.object->type == OBJT_VNODE &&
848			    (vp = fs.object->handle) != fs.vp) {
849				/*
850				 * Perform an unlock in case the desired vnode
851				 * changed while the map was unlocked during a
852				 * retry.
853				 */
854				unlock_vp(&fs);
855
856				locked = VOP_ISLOCKED(vp);
857				if (locked != LK_EXCLUSIVE)
858					locked = LK_SHARED;
859
860				/*
861				 * We must not sleep acquiring the vnode lock
862				 * while we have the page exclusive busied or
863				 * the object's paging-in-progress count
864				 * incremented.  Otherwise, we could deadlock.
865				 */
866				error = vget(vp, locked | LK_CANRECURSE |
867				    LK_NOWAIT, curthread);
868				if (error != 0) {
869					vhold(vp);
870					release_page(&fs);
871					unlock_and_deallocate(&fs);
872					error = vget(vp, locked | LK_RETRY |
873					    LK_CANRECURSE, curthread);
874					vdrop(vp);
875					fs.vp = vp;
876					KASSERT(error == 0,
877					    ("vm_fault: vget failed"));
878					goto RetryFault;
879				}
880				fs.vp = vp;
881			}
882			KASSERT(fs.vp == NULL || !fs.map->system_map,
883			    ("vm_fault: vnode-backed object mapped by system map"));
884
885			/*
886			 * Page in the requested page and hint the pager,
887			 * that it may bring up surrounding pages.
888			 */
889			if (nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM ||
890			    P_KILLED(curproc)) {
891				behind = 0;
892				ahead = 0;
893			} else {
894				/* Is this a sequential fault? */
895				if (nera > 0) {
896					behind = 0;
897					ahead = nera;
898				} else {
899					/*
900					 * Request a cluster of pages that is
901					 * aligned to a VM_FAULT_READ_DEFAULT
902					 * page offset boundary within the
903					 * object.  Alignment to a page offset
904					 * boundary is more likely to coincide
905					 * with the underlying file system
906					 * block than alignment to a virtual
907					 * address boundary.
908					 */
909					cluster_offset = fs.pindex %
910					    VM_FAULT_READ_DEFAULT;
911					behind = ulmin(cluster_offset,
912					    atop(vaddr - e_start));
913					ahead = VM_FAULT_READ_DEFAULT - 1 -
914					    cluster_offset;
915				}
916				ahead = ulmin(ahead, atop(e_end - vaddr) - 1);
917			}
918			rv = vm_pager_get_pages(fs.object, &fs.m, 1,
919			    &behind, &ahead);
920			if (rv == VM_PAGER_OK) {
921				faultcount = behind + 1 + ahead;
922				hardfault = true;
923				break; /* break to PAGE HAS BEEN FOUND */
924			}
925			if (rv == VM_PAGER_ERROR)
926				printf("vm_fault: pager read error, pid %d (%s)\n",
927				    curproc->p_pid, curproc->p_comm);
928
929			/*
930			 * If an I/O error occurred or the requested page was
931			 * outside the range of the pager, clean up and return
932			 * an error.
933			 */
934			if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) {
935				vm_page_lock(fs.m);
936				if (fs.m->wire_count == 0)
937					vm_page_free(fs.m);
938				else
939					vm_page_xunbusy_maybelocked(fs.m);
940				vm_page_unlock(fs.m);
941				fs.m = NULL;
942				unlock_and_deallocate(&fs);
943				return (rv == VM_PAGER_ERROR ? KERN_FAILURE :
944				    KERN_PROTECTION_FAILURE);
945			}
946
947			/*
948			 * The requested page does not exist at this object/
949			 * offset.  Remove the invalid page from the object,
950			 * waking up anyone waiting for it, and continue on to
951			 * the next object.  However, if this is the top-level
952			 * object, we must leave the busy page in place to
953			 * prevent another process from rushing past us, and
954			 * inserting the page in that object at the same time
955			 * that we are.
956			 */
957			if (fs.object != fs.first_object) {
958				vm_page_lock(fs.m);
959				if (fs.m->wire_count == 0)
960					vm_page_free(fs.m);
961				else
962					vm_page_xunbusy_maybelocked(fs.m);
963				vm_page_unlock(fs.m);
964				fs.m = NULL;
965			}
966		}
967
968		/*
969		 * We get here if the object has default pager (or unwiring)
970		 * or the pager doesn't have the page.
971		 */
972		if (fs.object == fs.first_object)
973			fs.first_m = fs.m;
974
975		/*
976		 * Move on to the next object.  Lock the next object before
977		 * unlocking the current one.
978		 */
979		next_object = fs.object->backing_object;
980		if (next_object == NULL) {
981			/*
982			 * If there's no object left, fill the page in the top
983			 * object with zeros.
984			 */
985			if (fs.object != fs.first_object) {
986				vm_object_pip_wakeup(fs.object);
987				VM_OBJECT_WUNLOCK(fs.object);
988
989				fs.object = fs.first_object;
990				fs.pindex = fs.first_pindex;
991				fs.m = fs.first_m;
992				VM_OBJECT_WLOCK(fs.object);
993			}
994			fs.first_m = NULL;
995
996			/*
997			 * Zero the page if necessary and mark it valid.
998			 */
999			if ((fs.m->flags & PG_ZERO) == 0) {
1000				pmap_zero_page(fs.m);
1001			} else {
1002				PCPU_INC(cnt.v_ozfod);
1003			}
1004			PCPU_INC(cnt.v_zfod);
1005			fs.m->valid = VM_PAGE_BITS_ALL;
1006			/* Don't try to prefault neighboring pages. */
1007			faultcount = 1;
1008			break;	/* break to PAGE HAS BEEN FOUND */
1009		} else {
1010			KASSERT(fs.object != next_object,
1011			    ("object loop %p", next_object));
1012			VM_OBJECT_WLOCK(next_object);
1013			vm_object_pip_add(next_object, 1);
1014			if (fs.object != fs.first_object)
1015				vm_object_pip_wakeup(fs.object);
1016			fs.pindex +=
1017			    OFF_TO_IDX(fs.object->backing_object_offset);
1018			VM_OBJECT_WUNLOCK(fs.object);
1019			fs.object = next_object;
1020		}
1021	}
1022
1023	vm_page_assert_xbusied(fs.m);
1024
1025	/*
1026	 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock
1027	 * is held.]
1028	 */
1029
1030	/*
1031	 * If the page is being written, but isn't already owned by the
1032	 * top-level object, we have to copy it into a new page owned by the
1033	 * top-level object.
1034	 */
1035	if (fs.object != fs.first_object) {
1036		/*
1037		 * We only really need to copy if we want to write it.
1038		 */
1039		if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) {
1040			/*
1041			 * This allows pages to be virtually copied from a
1042			 * backing_object into the first_object, where the
1043			 * backing object has no other refs to it, and cannot
1044			 * gain any more refs.  Instead of a bcopy, we just
1045			 * move the page from the backing object to the
1046			 * first object.  Note that we must mark the page
1047			 * dirty in the first object so that it will go out
1048			 * to swap when needed.
1049			 */
1050			is_first_object_locked = false;
1051			if (
1052				/*
1053				 * Only one shadow object
1054				 */
1055				(fs.object->shadow_count == 1) &&
1056				/*
1057				 * No COW refs, except us
1058				 */
1059				(fs.object->ref_count == 1) &&
1060				/*
1061				 * No one else can look this object up
1062				 */
1063				(fs.object->handle == NULL) &&
1064				/*
1065				 * No other ways to look the object up
1066				 */
1067				((fs.object->type == OBJT_DEFAULT) ||
1068				 (fs.object->type == OBJT_SWAP)) &&
1069			    (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs.first_object)) &&
1070				/*
1071				 * We don't chase down the shadow chain
1072				 */
1073			    fs.object == fs.first_object->backing_object) {
1074				vm_page_lock(fs.m);
1075				vm_page_remove(fs.m);
1076				vm_page_unlock(fs.m);
1077				vm_page_lock(fs.first_m);
1078				vm_page_replace_checked(fs.m, fs.first_object,
1079				    fs.first_pindex, fs.first_m);
1080				vm_page_free(fs.first_m);
1081				vm_page_unlock(fs.first_m);
1082				vm_page_dirty(fs.m);
1083#if VM_NRESERVLEVEL > 0
1084				/*
1085				 * Rename the reservation.
1086				 */
1087				vm_reserv_rename(fs.m, fs.first_object,
1088				    fs.object, OFF_TO_IDX(
1089				    fs.first_object->backing_object_offset));
1090#endif
1091				/*
1092				 * Removing the page from the backing object
1093				 * unbusied it.
1094				 */
1095				vm_page_xbusy(fs.m);
1096				fs.first_m = fs.m;
1097				fs.m = NULL;
1098				PCPU_INC(cnt.v_cow_optim);
1099			} else {
1100				/*
1101				 * Oh, well, lets copy it.
1102				 */
1103				pmap_copy_page(fs.m, fs.first_m);
1104				fs.first_m->valid = VM_PAGE_BITS_ALL;
1105				if (wired && (fault_flags &
1106				    VM_FAULT_WIRE) == 0) {
1107					vm_page_lock(fs.first_m);
1108					vm_page_wire(fs.first_m);
1109					vm_page_unlock(fs.first_m);
1110
1111					vm_page_lock(fs.m);
1112					vm_page_unwire(fs.m, PQ_INACTIVE);
1113					vm_page_unlock(fs.m);
1114				}
1115				/*
1116				 * We no longer need the old page or object.
1117				 */
1118				release_page(&fs);
1119			}
1120			/*
1121			 * fs.object != fs.first_object due to above
1122			 * conditional
1123			 */
1124			vm_object_pip_wakeup(fs.object);
1125			VM_OBJECT_WUNLOCK(fs.object);
1126			/*
1127			 * Only use the new page below...
1128			 */
1129			fs.object = fs.first_object;
1130			fs.pindex = fs.first_pindex;
1131			fs.m = fs.first_m;
1132			if (!is_first_object_locked)
1133				VM_OBJECT_WLOCK(fs.object);
1134			PCPU_INC(cnt.v_cow_faults);
1135			curthread->td_cow++;
1136		} else {
1137			prot &= ~VM_PROT_WRITE;
1138		}
1139	}
1140
1141	/*
1142	 * We must verify that the maps have not changed since our last
1143	 * lookup.
1144	 */
1145	if (!fs.lookup_still_valid) {
1146		if (!vm_map_trylock_read(fs.map)) {
1147			release_page(&fs);
1148			unlock_and_deallocate(&fs);
1149			goto RetryFault;
1150		}
1151		fs.lookup_still_valid = true;
1152		if (fs.map->timestamp != fs.map_generation) {
1153			result = vm_map_lookup_locked(&fs.map, vaddr, fault_type,
1154			    &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired);
1155
1156			/*
1157			 * If we don't need the page any longer, put it on the inactive
1158			 * list (the easiest thing to do here).  If no one needs it,
1159			 * pageout will grab it eventually.
1160			 */
1161			if (result != KERN_SUCCESS) {
1162				release_page(&fs);
1163				unlock_and_deallocate(&fs);
1164
1165				/*
1166				 * If retry of map lookup would have blocked then
1167				 * retry fault from start.
1168				 */
1169				if (result == KERN_FAILURE)
1170					goto RetryFault;
1171				return (result);
1172			}
1173			if ((retry_object != fs.first_object) ||
1174			    (retry_pindex != fs.first_pindex)) {
1175				release_page(&fs);
1176				unlock_and_deallocate(&fs);
1177				goto RetryFault;
1178			}
1179
1180			/*
1181			 * Check whether the protection has changed or the object has
1182			 * been copied while we left the map unlocked. Changing from
1183			 * read to write permission is OK - we leave the page
1184			 * write-protected, and catch the write fault. Changing from
1185			 * write to read permission means that we can't mark the page
1186			 * write-enabled after all.
1187			 */
1188			prot &= retry_prot;
1189		}
1190	}
1191
1192	/*
1193	 * If the page was filled by a pager, save the virtual address that
1194	 * should be faulted on next under a sequential access pattern to the
1195	 * map entry.  A read lock on the map suffices to update this address
1196	 * safely.
1197	 */
1198	if (hardfault)
1199		fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE;
1200
1201	vm_fault_dirty(fs.entry, fs.m, prot, fault_type, fault_flags, true);
1202	vm_page_assert_xbusied(fs.m);
1203
1204	/*
1205	 * Page must be completely valid or it is not fit to
1206	 * map into user space.  vm_pager_get_pages() ensures this.
1207	 */
1208	KASSERT(fs.m->valid == VM_PAGE_BITS_ALL,
1209	    ("vm_fault: page %p partially invalid", fs.m));
1210	VM_OBJECT_WUNLOCK(fs.object);
1211
1212	/*
1213	 * Put this page into the physical map.  We had to do the unlock above
1214	 * because pmap_enter() may sleep.  We don't put the page
1215	 * back on the active queue until later so that the pageout daemon
1216	 * won't find it (yet).
1217	 */
1218	pmap_enter(fs.map->pmap, vaddr, fs.m, prot,
1219	    fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0);
1220	if (faultcount != 1 && (fault_flags & VM_FAULT_WIRE) == 0 &&
1221	    wired == 0)
1222		vm_fault_prefault(&fs, vaddr,
1223		    faultcount > 0 ? behind : PFBAK,
1224		    faultcount > 0 ? ahead : PFFOR);
1225	VM_OBJECT_WLOCK(fs.object);
1226	vm_page_lock(fs.m);
1227
1228	/*
1229	 * If the page is not wired down, then put it where the pageout daemon
1230	 * can find it.
1231	 */
1232	if ((fault_flags & VM_FAULT_WIRE) != 0) {
1233		KASSERT(wired, ("VM_FAULT_WIRE && !wired"));
1234		vm_page_wire(fs.m);
1235	} else
1236		vm_page_activate(fs.m);
1237	if (m_hold != NULL) {
1238		*m_hold = fs.m;
1239		vm_page_hold(fs.m);
1240	}
1241	vm_page_unlock(fs.m);
1242	vm_page_xunbusy(fs.m);
1243
1244	/*
1245	 * Unlock everything, and return
1246	 */
1247	unlock_and_deallocate(&fs);
1248	if (hardfault) {
1249		PCPU_INC(cnt.v_io_faults);
1250		curthread->td_ru.ru_majflt++;
1251#ifdef RACCT
1252		if (racct_enable && fs.object->type == OBJT_VNODE) {
1253			PROC_LOCK(curproc);
1254			if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) {
1255				racct_add_force(curproc, RACCT_WRITEBPS,
1256				    PAGE_SIZE + behind * PAGE_SIZE);
1257				racct_add_force(curproc, RACCT_WRITEIOPS, 1);
1258			} else {
1259				racct_add_force(curproc, RACCT_READBPS,
1260				    PAGE_SIZE + ahead * PAGE_SIZE);
1261				racct_add_force(curproc, RACCT_READIOPS, 1);
1262			}
1263			PROC_UNLOCK(curproc);
1264		}
1265#endif
1266	} else
1267		curthread->td_ru.ru_minflt++;
1268
1269	return (KERN_SUCCESS);
1270}
1271
1272/*
1273 * Speed up the reclamation of pages that precede the faulting pindex within
1274 * the first object of the shadow chain.  Essentially, perform the equivalent
1275 * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes
1276 * the faulting pindex by the cluster size when the pages read by vm_fault()
1277 * cross a cluster-size boundary.  The cluster size is the greater of the
1278 * smallest superpage size and VM_FAULT_DONTNEED_MIN.
1279 *
1280 * When "fs->first_object" is a shadow object, the pages in the backing object
1281 * that precede the faulting pindex are deactivated by vm_fault().  So, this
1282 * function must only be concerned with pages in the first object.
1283 */
1284static void
1285vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead)
1286{
1287	vm_map_entry_t entry;
1288	vm_object_t first_object, object;
1289	vm_offset_t end, start;
1290	vm_page_t m, m_next;
1291	vm_pindex_t pend, pstart;
1292	vm_size_t size;
1293
1294	object = fs->object;
1295	VM_OBJECT_ASSERT_WLOCKED(object);
1296	first_object = fs->first_object;
1297	if (first_object != object) {
1298		if (!VM_OBJECT_TRYWLOCK(first_object)) {
1299			VM_OBJECT_WUNLOCK(object);
1300			VM_OBJECT_WLOCK(first_object);
1301			VM_OBJECT_WLOCK(object);
1302		}
1303	}
1304	/* Neither fictitious nor unmanaged pages can be reclaimed. */
1305	if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) {
1306		size = VM_FAULT_DONTNEED_MIN;
1307		if (MAXPAGESIZES > 1 && size < pagesizes[1])
1308			size = pagesizes[1];
1309		end = rounddown2(vaddr, size);
1310		if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) &&
1311		    (entry = fs->entry)->start < end) {
1312			if (end - entry->start < size)
1313				start = entry->start;
1314			else
1315				start = end - size;
1316			pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED);
1317			pstart = OFF_TO_IDX(entry->offset) + atop(start -
1318			    entry->start);
1319			m_next = vm_page_find_least(first_object, pstart);
1320			pend = OFF_TO_IDX(entry->offset) + atop(end -
1321			    entry->start);
1322			while ((m = m_next) != NULL && m->pindex < pend) {
1323				m_next = TAILQ_NEXT(m, listq);
1324				if (m->valid != VM_PAGE_BITS_ALL ||
1325				    vm_page_busied(m))
1326					continue;
1327
1328				/*
1329				 * Don't clear PGA_REFERENCED, since it would
1330				 * likely represent a reference by a different
1331				 * process.
1332				 *
1333				 * Typically, at this point, prefetched pages
1334				 * are still in the inactive queue.  Only
1335				 * pages that triggered page faults are in the
1336				 * active queue.
1337				 */
1338				vm_page_lock(m);
1339				vm_page_deactivate(m);
1340				vm_page_unlock(m);
1341			}
1342		}
1343	}
1344	if (first_object != object)
1345		VM_OBJECT_WUNLOCK(first_object);
1346}
1347
1348/*
1349 * vm_fault_prefault provides a quick way of clustering
1350 * pagefaults into a processes address space.  It is a "cousin"
1351 * of vm_map_pmap_enter, except it runs at page fault time instead
1352 * of mmap time.
1353 */
1354static void
1355vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
1356    int backward, int forward)
1357{
1358	pmap_t pmap;
1359	vm_map_entry_t entry;
1360	vm_object_t backing_object, lobject;
1361	vm_offset_t addr, starta;
1362	vm_pindex_t pindex;
1363	vm_page_t m;
1364	int i;
1365
1366	pmap = fs->map->pmap;
1367	if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))
1368		return;
1369
1370	entry = fs->entry;
1371
1372	if (addra < backward * PAGE_SIZE) {
1373		starta = entry->start;
1374	} else {
1375		starta = addra - backward * PAGE_SIZE;
1376		if (starta < entry->start)
1377			starta = entry->start;
1378	}
1379
1380	/*
1381	 * Generate the sequence of virtual addresses that are candidates for
1382	 * prefaulting in an outward spiral from the faulting virtual address,
1383	 * "addra".  Specifically, the sequence is "addra - PAGE_SIZE", "addra
1384	 * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ...
1385	 * If the candidate address doesn't have a backing physical page, then
1386	 * the loop immediately terminates.
1387	 */
1388	for (i = 0; i < 2 * imax(backward, forward); i++) {
1389		addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE :
1390		    PAGE_SIZE);
1391		if (addr > addra + forward * PAGE_SIZE)
1392			addr = 0;
1393
1394		if (addr < starta || addr >= entry->end)
1395			continue;
1396
1397		if (!pmap_is_prefaultable(pmap, addr))
1398			continue;
1399
1400		pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
1401		lobject = entry->object.vm_object;
1402		VM_OBJECT_RLOCK(lobject);
1403		while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
1404		    lobject->type == OBJT_DEFAULT &&
1405		    (backing_object = lobject->backing_object) != NULL) {
1406			KASSERT((lobject->backing_object_offset & PAGE_MASK) ==
1407			    0, ("vm_fault_prefault: unaligned object offset"));
1408			pindex += lobject->backing_object_offset >> PAGE_SHIFT;
1409			VM_OBJECT_RLOCK(backing_object);
1410			VM_OBJECT_RUNLOCK(lobject);
1411			lobject = backing_object;
1412		}
1413		if (m == NULL) {
1414			VM_OBJECT_RUNLOCK(lobject);
1415			break;
1416		}
1417		if (m->valid == VM_PAGE_BITS_ALL &&
1418		    (m->flags & PG_FICTITIOUS) == 0)
1419			pmap_enter_quick(pmap, addr, m, entry->protection);
1420		VM_OBJECT_RUNLOCK(lobject);
1421	}
1422}
1423
1424/*
1425 * Hold each of the physical pages that are mapped by the specified range of
1426 * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid
1427 * and allow the specified types of access, "prot".  If all of the implied
1428 * pages are successfully held, then the number of held pages is returned
1429 * together with pointers to those pages in the array "ma".  However, if any
1430 * of the pages cannot be held, -1 is returned.
1431 */
1432int
1433vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
1434    vm_prot_t prot, vm_page_t *ma, int max_count)
1435{
1436	vm_offset_t end, va;
1437	vm_page_t *mp;
1438	int count;
1439	boolean_t pmap_failed;
1440
1441	if (len == 0)
1442		return (0);
1443	end = round_page(addr + len);
1444	addr = trunc_page(addr);
1445
1446	/*
1447	 * Check for illegal addresses.
1448	 */
1449	if (addr < vm_map_min(map) || addr > end || end > vm_map_max(map))
1450		return (-1);
1451
1452	if (atop(end - addr) > max_count)
1453		panic("vm_fault_quick_hold_pages: count > max_count");
1454	count = atop(end - addr);
1455
1456	/*
1457	 * Most likely, the physical pages are resident in the pmap, so it is
1458	 * faster to try pmap_extract_and_hold() first.
1459	 */
1460	pmap_failed = FALSE;
1461	for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) {
1462		*mp = pmap_extract_and_hold(map->pmap, va, prot);
1463		if (*mp == NULL)
1464			pmap_failed = TRUE;
1465		else if ((prot & VM_PROT_WRITE) != 0 &&
1466		    (*mp)->dirty != VM_PAGE_BITS_ALL) {
1467			/*
1468			 * Explicitly dirty the physical page.  Otherwise, the
1469			 * caller's changes may go unnoticed because they are
1470			 * performed through an unmanaged mapping or by a DMA
1471			 * operation.
1472			 *
1473			 * The object lock is not held here.
1474			 * See vm_page_clear_dirty_mask().
1475			 */
1476			vm_page_dirty(*mp);
1477		}
1478	}
1479	if (pmap_failed) {
1480		/*
1481		 * One or more pages could not be held by the pmap.  Either no
1482		 * page was mapped at the specified virtual address or that
1483		 * mapping had insufficient permissions.  Attempt to fault in
1484		 * and hold these pages.
1485		 */
1486		for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE)
1487			if (*mp == NULL && vm_fault_hold(map, va, prot,
1488			    VM_FAULT_NORMAL, mp) != KERN_SUCCESS)
1489				goto error;
1490	}
1491	return (count);
1492error:
1493	for (mp = ma; mp < ma + count; mp++)
1494		if (*mp != NULL) {
1495			vm_page_lock(*mp);
1496			vm_page_unhold(*mp);
1497			vm_page_unlock(*mp);
1498		}
1499	return (-1);
1500}
1501
1502/*
1503 *	Routine:
1504 *		vm_fault_copy_entry
1505 *	Function:
1506 *		Create new shadow object backing dst_entry with private copy of
1507 *		all underlying pages. When src_entry is equal to dst_entry,
1508 *		function implements COW for wired-down map entry. Otherwise,
1509 *		it forks wired entry into dst_map.
1510 *
1511 *	In/out conditions:
1512 *		The source and destination maps must be locked for write.
1513 *		The source map entry must be wired down (or be a sharing map
1514 *		entry corresponding to a main map entry that is wired down).
1515 */
1516void
1517vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
1518    vm_map_entry_t dst_entry, vm_map_entry_t src_entry,
1519    vm_ooffset_t *fork_charge)
1520{
1521	vm_object_t backing_object, dst_object, object, src_object;
1522	vm_pindex_t dst_pindex, pindex, src_pindex;
1523	vm_prot_t access, prot;
1524	vm_offset_t vaddr;
1525	vm_page_t dst_m;
1526	vm_page_t src_m;
1527	boolean_t upgrade;
1528
1529#ifdef	lint
1530	src_map++;
1531#endif	/* lint */
1532
1533	upgrade = src_entry == dst_entry;
1534	access = prot = dst_entry->protection;
1535
1536	src_object = src_entry->object.vm_object;
1537	src_pindex = OFF_TO_IDX(src_entry->offset);
1538
1539	if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
1540		dst_object = src_object;
1541		vm_object_reference(dst_object);
1542	} else {
1543		/*
1544		 * Create the top-level object for the destination entry. (Doesn't
1545		 * actually shadow anything - we copy the pages directly.)
1546		 */
1547		dst_object = vm_object_allocate(OBJT_DEFAULT,
1548		    atop(dst_entry->end - dst_entry->start));
1549#if VM_NRESERVLEVEL > 0
1550		dst_object->flags |= OBJ_COLORED;
1551		dst_object->pg_color = atop(dst_entry->start);
1552#endif
1553	}
1554
1555	VM_OBJECT_WLOCK(dst_object);
1556	KASSERT(upgrade || dst_entry->object.vm_object == NULL,
1557	    ("vm_fault_copy_entry: vm_object not NULL"));
1558	if (src_object != dst_object) {
1559		dst_entry->object.vm_object = dst_object;
1560		dst_entry->offset = 0;
1561		dst_object->charge = dst_entry->end - dst_entry->start;
1562	}
1563	if (fork_charge != NULL) {
1564		KASSERT(dst_entry->cred == NULL,
1565		    ("vm_fault_copy_entry: leaked swp charge"));
1566		dst_object->cred = curthread->td_ucred;
1567		crhold(dst_object->cred);
1568		*fork_charge += dst_object->charge;
1569	} else if (dst_object->cred == NULL) {
1570		KASSERT(dst_entry->cred != NULL, ("no cred for entry %p",
1571		    dst_entry));
1572		dst_object->cred = dst_entry->cred;
1573		dst_entry->cred = NULL;
1574	}
1575
1576	/*
1577	 * If not an upgrade, then enter the mappings in the pmap as
1578	 * read and/or execute accesses.  Otherwise, enter them as
1579	 * write accesses.
1580	 *
1581	 * A writeable large page mapping is only created if all of
1582	 * the constituent small page mappings are modified. Marking
1583	 * PTEs as modified on inception allows promotion to happen
1584	 * without taking potentially large number of soft faults.
1585	 */
1586	if (!upgrade)
1587		access &= ~VM_PROT_WRITE;
1588
1589	/*
1590	 * Loop through all of the virtual pages within the entry's
1591	 * range, copying each page from the source object to the
1592	 * destination object.  Since the source is wired, those pages
1593	 * must exist.  In contrast, the destination is pageable.
1594	 * Since the destination object does share any backing storage
1595	 * with the source object, all of its pages must be dirtied,
1596	 * regardless of whether they can be written.
1597	 */
1598	for (vaddr = dst_entry->start, dst_pindex = 0;
1599	    vaddr < dst_entry->end;
1600	    vaddr += PAGE_SIZE, dst_pindex++) {
1601again:
1602		/*
1603		 * Find the page in the source object, and copy it in.
1604		 * Because the source is wired down, the page will be
1605		 * in memory.
1606		 */
1607		if (src_object != dst_object)
1608			VM_OBJECT_RLOCK(src_object);
1609		object = src_object;
1610		pindex = src_pindex + dst_pindex;
1611		while ((src_m = vm_page_lookup(object, pindex)) == NULL &&
1612		    (backing_object = object->backing_object) != NULL) {
1613			/*
1614			 * Unless the source mapping is read-only or
1615			 * it is presently being upgraded from
1616			 * read-only, the first object in the shadow
1617			 * chain should provide all of the pages.  In
1618			 * other words, this loop body should never be
1619			 * executed when the source mapping is already
1620			 * read/write.
1621			 */
1622			KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 ||
1623			    upgrade,
1624			    ("vm_fault_copy_entry: main object missing page"));
1625
1626			VM_OBJECT_RLOCK(backing_object);
1627			pindex += OFF_TO_IDX(object->backing_object_offset);
1628			if (object != dst_object)
1629				VM_OBJECT_RUNLOCK(object);
1630			object = backing_object;
1631		}
1632		KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing"));
1633
1634		if (object != dst_object) {
1635			/*
1636			 * Allocate a page in the destination object.
1637			 */
1638			dst_m = vm_page_alloc(dst_object, (src_object ==
1639			    dst_object ? src_pindex : 0) + dst_pindex,
1640			    VM_ALLOC_NORMAL);
1641			if (dst_m == NULL) {
1642				VM_OBJECT_WUNLOCK(dst_object);
1643				VM_OBJECT_RUNLOCK(object);
1644				VM_WAIT;
1645				VM_OBJECT_WLOCK(dst_object);
1646				goto again;
1647			}
1648			pmap_copy_page(src_m, dst_m);
1649			VM_OBJECT_RUNLOCK(object);
1650			dst_m->valid = VM_PAGE_BITS_ALL;
1651			dst_m->dirty = VM_PAGE_BITS_ALL;
1652		} else {
1653			dst_m = src_m;
1654			if (vm_page_sleep_if_busy(dst_m, "fltupg"))
1655				goto again;
1656			vm_page_xbusy(dst_m);
1657			KASSERT(dst_m->valid == VM_PAGE_BITS_ALL,
1658			    ("invalid dst page %p", dst_m));
1659		}
1660		VM_OBJECT_WUNLOCK(dst_object);
1661
1662		/*
1663		 * Enter it in the pmap. If a wired, copy-on-write
1664		 * mapping is being replaced by a write-enabled
1665		 * mapping, then wire that new mapping.
1666		 */
1667		pmap_enter(dst_map->pmap, vaddr, dst_m, prot,
1668		    access | (upgrade ? PMAP_ENTER_WIRED : 0), 0);
1669
1670		/*
1671		 * Mark it no longer busy, and put it on the active list.
1672		 */
1673		VM_OBJECT_WLOCK(dst_object);
1674
1675		if (upgrade) {
1676			if (src_m != dst_m) {
1677				vm_page_lock(src_m);
1678				vm_page_unwire(src_m, PQ_INACTIVE);
1679				vm_page_unlock(src_m);
1680				vm_page_lock(dst_m);
1681				vm_page_wire(dst_m);
1682				vm_page_unlock(dst_m);
1683			} else {
1684				KASSERT(dst_m->wire_count > 0,
1685				    ("dst_m %p is not wired", dst_m));
1686			}
1687		} else {
1688			vm_page_lock(dst_m);
1689			vm_page_activate(dst_m);
1690			vm_page_unlock(dst_m);
1691		}
1692		vm_page_xunbusy(dst_m);
1693	}
1694	VM_OBJECT_WUNLOCK(dst_object);
1695	if (upgrade) {
1696		dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY);
1697		vm_object_deallocate(src_object);
1698	}
1699}
1700
1701/*
1702 * Block entry into the machine-independent layer's page fault handler by
1703 * the calling thread.  Subsequent calls to vm_fault() by that thread will
1704 * return KERN_PROTECTION_FAILURE.  Enable machine-dependent handling of
1705 * spurious page faults.
1706 */
1707int
1708vm_fault_disable_pagefaults(void)
1709{
1710
1711	return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR));
1712}
1713
1714void
1715vm_fault_enable_pagefaults(int save)
1716{
1717
1718	curthread_pflags_restore(save);
1719}
1720