1/*-
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53 *  School of Computer Science
54 *  Carnegie Mellon University
55 *  Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61/*
62 *	Virtual memory mapping module.
63 */
64
65#include <sys/cdefs.h>
66__FBSDID("$FreeBSD$");
67
68#include <sys/param.h>
69#include <sys/systm.h>
70#include <sys/kernel.h>
71#include <sys/ktr.h>
72#include <sys/lock.h>
73#include <sys/mutex.h>
74#include <sys/proc.h>
75#include <sys/vmmeter.h>
76#include <sys/mman.h>
77#include <sys/vnode.h>
78#include <sys/racct.h>
79#include <sys/resourcevar.h>
80#include <sys/rwlock.h>
81#include <sys/file.h>
82#include <sys/sysctl.h>
83#include <sys/sysent.h>
84#include <sys/shm.h>
85
86#include <vm/vm.h>
87#include <vm/vm_param.h>
88#include <vm/pmap.h>
89#include <vm/vm_map.h>
90#include <vm/vm_page.h>
91#include <vm/vm_object.h>
92#include <vm/vm_pager.h>
93#include <vm/vm_kern.h>
94#include <vm/vm_extern.h>
95#include <vm/vnode_pager.h>
96#include <vm/swap_pager.h>
97#include <vm/uma.h>
98
99/*
100 *	Virtual memory maps provide for the mapping, protection,
101 *	and sharing of virtual memory objects.  In addition,
102 *	this module provides for an efficient virtual copy of
103 *	memory from one map to another.
104 *
105 *	Synchronization is required prior to most operations.
106 *
107 *	Maps consist of an ordered doubly-linked list of simple
108 *	entries; a self-adjusting binary search tree of these
109 *	entries is used to speed up lookups.
110 *
111 *	Since portions of maps are specified by start/end addresses,
112 *	which may not align with existing map entries, all
113 *	routines merely "clip" entries to these start/end values.
114 *	[That is, an entry is split into two, bordering at a
115 *	start or end value.]  Note that these clippings may not
116 *	always be necessary (as the two resulting entries are then
117 *	not changed); however, the clipping is done for convenience.
118 *
119 *	As mentioned above, virtual copy operations are performed
120 *	by copying VM object references from one map to
121 *	another, and then marking both regions as copy-on-write.
122 */
123
124static struct mtx map_sleep_mtx;
125static uma_zone_t mapentzone;
126static uma_zone_t kmapentzone;
127static uma_zone_t mapzone;
128static uma_zone_t vmspace_zone;
129static int vmspace_zinit(void *mem, int size, int flags);
130static int vm_map_zinit(void *mem, int ize, int flags);
131static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
132    vm_offset_t max);
133static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
134static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
135static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
136static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
137    vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
138#ifdef INVARIANTS
139static void vm_map_zdtor(void *mem, int size, void *arg);
140static void vmspace_zdtor(void *mem, int size, void *arg);
141#endif
142static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
143    vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max,
144    int cow);
145static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
146    vm_offset_t failed_addr);
147
148#define	ENTRY_CHARGED(e) ((e)->cred != NULL || \
149    ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
150     !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
151
152/*
153 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
154 * stable.
155 */
156#define PROC_VMSPACE_LOCK(p) do { } while (0)
157#define PROC_VMSPACE_UNLOCK(p) do { } while (0)
158
159/*
160 *	VM_MAP_RANGE_CHECK:	[ internal use only ]
161 *
162 *	Asserts that the starting and ending region
163 *	addresses fall within the valid range of the map.
164 */
165#define	VM_MAP_RANGE_CHECK(map, start, end)		\
166		{					\
167		if (start < vm_map_min(map))		\
168			start = vm_map_min(map);	\
169		if (end > vm_map_max(map))		\
170			end = vm_map_max(map);		\
171		if (start > end)			\
172			start = end;			\
173		}
174
175/*
176 *	vm_map_startup:
177 *
178 *	Initialize the vm_map module.  Must be called before
179 *	any other vm_map routines.
180 *
181 *	Map and entry structures are allocated from the general
182 *	purpose memory pool with some exceptions:
183 *
184 *	- The kernel map and kmem submap are allocated statically.
185 *	- Kernel map entries are allocated out of a static pool.
186 *
187 *	These restrictions are necessary since malloc() uses the
188 *	maps and requires map entries.
189 */
190
191void
192vm_map_startup(void)
193{
194	mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
195	mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
196#ifdef INVARIANTS
197	    vm_map_zdtor,
198#else
199	    NULL,
200#endif
201	    vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
202	uma_prealloc(mapzone, MAX_KMAP);
203	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
204	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
205	    UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
206	mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
207	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
208	vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
209#ifdef INVARIANTS
210	    vmspace_zdtor,
211#else
212	    NULL,
213#endif
214	    vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
215}
216
217static int
218vmspace_zinit(void *mem, int size, int flags)
219{
220	struct vmspace *vm;
221
222	vm = (struct vmspace *)mem;
223
224	vm->vm_map.pmap = NULL;
225	(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
226	PMAP_LOCK_INIT(vmspace_pmap(vm));
227	return (0);
228}
229
230static int
231vm_map_zinit(void *mem, int size, int flags)
232{
233	vm_map_t map;
234
235	map = (vm_map_t)mem;
236	memset(map, 0, sizeof(*map));
237	mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
238	sx_init(&map->lock, "vm map (user)");
239	return (0);
240}
241
242#ifdef INVARIANTS
243static void
244vmspace_zdtor(void *mem, int size, void *arg)
245{
246	struct vmspace *vm;
247
248	vm = (struct vmspace *)mem;
249
250	vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
251}
252static void
253vm_map_zdtor(void *mem, int size, void *arg)
254{
255	vm_map_t map;
256
257	map = (vm_map_t)mem;
258	KASSERT(map->nentries == 0,
259	    ("map %p nentries == %d on free.",
260	    map, map->nentries));
261	KASSERT(map->size == 0,
262	    ("map %p size == %lu on free.",
263	    map, (unsigned long)map->size));
264}
265#endif	/* INVARIANTS */
266
267/*
268 * Allocate a vmspace structure, including a vm_map and pmap,
269 * and initialize those structures.  The refcnt is set to 1.
270 *
271 * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
272 */
273struct vmspace *
274vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
275{
276	struct vmspace *vm;
277
278	vm = uma_zalloc(vmspace_zone, M_WAITOK);
279
280	KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
281
282	if (pinit == NULL)
283		pinit = &pmap_pinit;
284
285	if (!pinit(vmspace_pmap(vm))) {
286		uma_zfree(vmspace_zone, vm);
287		return (NULL);
288	}
289	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
290	_vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
291	vm->vm_refcnt = 1;
292	vm->vm_shm = NULL;
293	vm->vm_swrss = 0;
294	vm->vm_tsize = 0;
295	vm->vm_dsize = 0;
296	vm->vm_ssize = 0;
297	vm->vm_taddr = 0;
298	vm->vm_daddr = 0;
299	vm->vm_maxsaddr = 0;
300	return (vm);
301}
302
303#ifdef RACCT
304static void
305vmspace_container_reset(struct proc *p)
306{
307
308	PROC_LOCK(p);
309	racct_set(p, RACCT_DATA, 0);
310	racct_set(p, RACCT_STACK, 0);
311	racct_set(p, RACCT_RSS, 0);
312	racct_set(p, RACCT_MEMLOCK, 0);
313	racct_set(p, RACCT_VMEM, 0);
314	PROC_UNLOCK(p);
315}
316#endif
317
318static inline void
319vmspace_dofree(struct vmspace *vm)
320{
321
322	CTR1(KTR_VM, "vmspace_free: %p", vm);
323
324	/*
325	 * Make sure any SysV shm is freed, it might not have been in
326	 * exit1().
327	 */
328	shmexit(vm);
329
330	/*
331	 * Lock the map, to wait out all other references to it.
332	 * Delete all of the mappings and pages they hold, then call
333	 * the pmap module to reclaim anything left.
334	 */
335	(void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset,
336	    vm->vm_map.max_offset);
337
338	pmap_release(vmspace_pmap(vm));
339	vm->vm_map.pmap = NULL;
340	uma_zfree(vmspace_zone, vm);
341}
342
343void
344vmspace_free(struct vmspace *vm)
345{
346
347	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
348	    "vmspace_free() called");
349
350	if (vm->vm_refcnt == 0)
351		panic("vmspace_free: attempt to free already freed vmspace");
352
353	if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
354		vmspace_dofree(vm);
355}
356
357void
358vmspace_exitfree(struct proc *p)
359{
360	struct vmspace *vm;
361
362	PROC_VMSPACE_LOCK(p);
363	vm = p->p_vmspace;
364	p->p_vmspace = NULL;
365	PROC_VMSPACE_UNLOCK(p);
366	KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
367	vmspace_free(vm);
368}
369
370void
371vmspace_exit(struct thread *td)
372{
373	int refcnt;
374	struct vmspace *vm;
375	struct proc *p;
376
377	/*
378	 * Release user portion of address space.
379	 * This releases references to vnodes,
380	 * which could cause I/O if the file has been unlinked.
381	 * Need to do this early enough that we can still sleep.
382	 *
383	 * The last exiting process to reach this point releases as
384	 * much of the environment as it can. vmspace_dofree() is the
385	 * slower fallback in case another process had a temporary
386	 * reference to the vmspace.
387	 */
388
389	p = td->td_proc;
390	vm = p->p_vmspace;
391	atomic_add_int(&vmspace0.vm_refcnt, 1);
392	do {
393		refcnt = vm->vm_refcnt;
394		if (refcnt > 1 && p->p_vmspace != &vmspace0) {
395			/* Switch now since other proc might free vmspace */
396			PROC_VMSPACE_LOCK(p);
397			p->p_vmspace = &vmspace0;
398			PROC_VMSPACE_UNLOCK(p);
399			pmap_activate(td);
400		}
401	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
402	if (refcnt == 1) {
403		if (p->p_vmspace != vm) {
404			/* vmspace not yet freed, switch back */
405			PROC_VMSPACE_LOCK(p);
406			p->p_vmspace = vm;
407			PROC_VMSPACE_UNLOCK(p);
408			pmap_activate(td);
409		}
410		pmap_remove_pages(vmspace_pmap(vm));
411		/* Switch now since this proc will free vmspace */
412		PROC_VMSPACE_LOCK(p);
413		p->p_vmspace = &vmspace0;
414		PROC_VMSPACE_UNLOCK(p);
415		pmap_activate(td);
416		vmspace_dofree(vm);
417	}
418#ifdef RACCT
419	if (racct_enable)
420		vmspace_container_reset(p);
421#endif
422}
423
424/* Acquire reference to vmspace owned by another process. */
425
426struct vmspace *
427vmspace_acquire_ref(struct proc *p)
428{
429	struct vmspace *vm;
430	int refcnt;
431
432	PROC_VMSPACE_LOCK(p);
433	vm = p->p_vmspace;
434	if (vm == NULL) {
435		PROC_VMSPACE_UNLOCK(p);
436		return (NULL);
437	}
438	do {
439		refcnt = vm->vm_refcnt;
440		if (refcnt <= 0) { 	/* Avoid 0->1 transition */
441			PROC_VMSPACE_UNLOCK(p);
442			return (NULL);
443		}
444	} while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
445	if (vm != p->p_vmspace) {
446		PROC_VMSPACE_UNLOCK(p);
447		vmspace_free(vm);
448		return (NULL);
449	}
450	PROC_VMSPACE_UNLOCK(p);
451	return (vm);
452}
453
454/*
455 * Switch between vmspaces in an AIO kernel process.
456 *
457 * The AIO kernel processes switch to and from a user process's
458 * vmspace while performing an I/O operation on behalf of a user
459 * process.  The new vmspace is either the vmspace of a user process
460 * obtained from an active AIO request or the initial vmspace of the
461 * AIO kernel process (when it is idling).  Because user processes
462 * will block to drain any active AIO requests before proceeding in
463 * exit() or execve(), the vmspace reference count for these vmspaces
464 * can never be 0.  This allows for a much simpler implementation than
465 * the loop in vmspace_acquire_ref() above.  Similarly, AIO kernel
466 * processes hold an extra reference on their initial vmspace for the
467 * life of the process so that this guarantee is true for any vmspace
468 * passed as 'newvm'.
469 */
470void
471vmspace_switch_aio(struct vmspace *newvm)
472{
473	struct vmspace *oldvm;
474
475	/* XXX: Need some way to assert that this is an aio daemon. */
476
477	KASSERT(newvm->vm_refcnt > 0,
478	    ("vmspace_switch_aio: newvm unreferenced"));
479
480	oldvm = curproc->p_vmspace;
481	if (oldvm == newvm)
482		return;
483
484	/*
485	 * Point to the new address space and refer to it.
486	 */
487	curproc->p_vmspace = newvm;
488	atomic_add_int(&newvm->vm_refcnt, 1);
489
490	/* Activate the new mapping. */
491	pmap_activate(curthread);
492
493	/* Remove the daemon's reference to the old address space. */
494	KASSERT(oldvm->vm_refcnt > 1,
495	    ("vmspace_switch_aio: oldvm dropping last reference"));
496	vmspace_free(oldvm);
497}
498
499void
500_vm_map_lock(vm_map_t map, const char *file, int line)
501{
502
503	if (map->system_map)
504		mtx_lock_flags_(&map->system_mtx, 0, file, line);
505	else
506		sx_xlock_(&map->lock, file, line);
507	map->timestamp++;
508}
509
510static void
511vm_map_process_deferred(void)
512{
513	struct thread *td;
514	vm_map_entry_t entry, next;
515	vm_object_t object;
516
517	td = curthread;
518	entry = td->td_map_def_user;
519	td->td_map_def_user = NULL;
520	while (entry != NULL) {
521		next = entry->next;
522		if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) {
523			/*
524			 * Decrement the object's writemappings and
525			 * possibly the vnode's v_writecount.
526			 */
527			KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
528			    ("Submap with writecount"));
529			object = entry->object.vm_object;
530			KASSERT(object != NULL, ("No object for writecount"));
531			vnode_pager_release_writecount(object, entry->start,
532			    entry->end);
533		}
534		vm_map_entry_deallocate(entry, FALSE);
535		entry = next;
536	}
537}
538
539void
540_vm_map_unlock(vm_map_t map, const char *file, int line)
541{
542
543	if (map->system_map)
544		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
545	else {
546		sx_xunlock_(&map->lock, file, line);
547		vm_map_process_deferred();
548	}
549}
550
551void
552_vm_map_lock_read(vm_map_t map, const char *file, int line)
553{
554
555	if (map->system_map)
556		mtx_lock_flags_(&map->system_mtx, 0, file, line);
557	else
558		sx_slock_(&map->lock, file, line);
559}
560
561void
562_vm_map_unlock_read(vm_map_t map, const char *file, int line)
563{
564
565	if (map->system_map)
566		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
567	else {
568		sx_sunlock_(&map->lock, file, line);
569		vm_map_process_deferred();
570	}
571}
572
573int
574_vm_map_trylock(vm_map_t map, const char *file, int line)
575{
576	int error;
577
578	error = map->system_map ?
579	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
580	    !sx_try_xlock_(&map->lock, file, line);
581	if (error == 0)
582		map->timestamp++;
583	return (error == 0);
584}
585
586int
587_vm_map_trylock_read(vm_map_t map, const char *file, int line)
588{
589	int error;
590
591	error = map->system_map ?
592	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
593	    !sx_try_slock_(&map->lock, file, line);
594	return (error == 0);
595}
596
597/*
598 *	_vm_map_lock_upgrade:	[ internal use only ]
599 *
600 *	Tries to upgrade a read (shared) lock on the specified map to a write
601 *	(exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
602 *	non-zero value if the upgrade fails.  If the upgrade fails, the map is
603 *	returned without a read or write lock held.
604 *
605 *	Requires that the map be read locked.
606 */
607int
608_vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
609{
610	unsigned int last_timestamp;
611
612	if (map->system_map) {
613		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
614	} else {
615		if (!sx_try_upgrade_(&map->lock, file, line)) {
616			last_timestamp = map->timestamp;
617			sx_sunlock_(&map->lock, file, line);
618			vm_map_process_deferred();
619			/*
620			 * If the map's timestamp does not change while the
621			 * map is unlocked, then the upgrade succeeds.
622			 */
623			sx_xlock_(&map->lock, file, line);
624			if (last_timestamp != map->timestamp) {
625				sx_xunlock_(&map->lock, file, line);
626				return (1);
627			}
628		}
629	}
630	map->timestamp++;
631	return (0);
632}
633
634void
635_vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
636{
637
638	if (map->system_map) {
639		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
640	} else
641		sx_downgrade_(&map->lock, file, line);
642}
643
644/*
645 *	vm_map_locked:
646 *
647 *	Returns a non-zero value if the caller holds a write (exclusive) lock
648 *	on the specified map and the value "0" otherwise.
649 */
650int
651vm_map_locked(vm_map_t map)
652{
653
654	if (map->system_map)
655		return (mtx_owned(&map->system_mtx));
656	else
657		return (sx_xlocked(&map->lock));
658}
659
660#ifdef INVARIANTS
661static void
662_vm_map_assert_locked(vm_map_t map, const char *file, int line)
663{
664
665	if (map->system_map)
666		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
667	else
668		sx_assert_(&map->lock, SA_XLOCKED, file, line);
669}
670
671#define	VM_MAP_ASSERT_LOCKED(map) \
672    _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
673#else
674#define	VM_MAP_ASSERT_LOCKED(map)
675#endif
676
677/*
678 *	_vm_map_unlock_and_wait:
679 *
680 *	Atomically releases the lock on the specified map and puts the calling
681 *	thread to sleep.  The calling thread will remain asleep until either
682 *	vm_map_wakeup() is performed on the map or the specified timeout is
683 *	exceeded.
684 *
685 *	WARNING!  This function does not perform deferred deallocations of
686 *	objects and map	entries.  Therefore, the calling thread is expected to
687 *	reacquire the map lock after reawakening and later perform an ordinary
688 *	unlock operation, such as vm_map_unlock(), before completing its
689 *	operation on the map.
690 */
691int
692_vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
693{
694
695	mtx_lock(&map_sleep_mtx);
696	if (map->system_map)
697		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
698	else
699		sx_xunlock_(&map->lock, file, line);
700	return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
701	    timo));
702}
703
704/*
705 *	vm_map_wakeup:
706 *
707 *	Awaken any threads that have slept on the map using
708 *	vm_map_unlock_and_wait().
709 */
710void
711vm_map_wakeup(vm_map_t map)
712{
713
714	/*
715	 * Acquire and release map_sleep_mtx to prevent a wakeup()
716	 * from being performed (and lost) between the map unlock
717	 * and the msleep() in _vm_map_unlock_and_wait().
718	 */
719	mtx_lock(&map_sleep_mtx);
720	mtx_unlock(&map_sleep_mtx);
721	wakeup(&map->root);
722}
723
724void
725vm_map_busy(vm_map_t map)
726{
727
728	VM_MAP_ASSERT_LOCKED(map);
729	map->busy++;
730}
731
732void
733vm_map_unbusy(vm_map_t map)
734{
735
736	VM_MAP_ASSERT_LOCKED(map);
737	KASSERT(map->busy, ("vm_map_unbusy: not busy"));
738	if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
739		vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
740		wakeup(&map->busy);
741	}
742}
743
744void
745vm_map_wait_busy(vm_map_t map)
746{
747
748	VM_MAP_ASSERT_LOCKED(map);
749	while (map->busy) {
750		vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
751		if (map->system_map)
752			msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
753		else
754			sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
755	}
756	map->timestamp++;
757}
758
759long
760vmspace_resident_count(struct vmspace *vmspace)
761{
762	return pmap_resident_count(vmspace_pmap(vmspace));
763}
764
765/*
766 *	vm_map_create:
767 *
768 *	Creates and returns a new empty VM map with
769 *	the given physical map structure, and having
770 *	the given lower and upper address bounds.
771 */
772vm_map_t
773vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
774{
775	vm_map_t result;
776
777	result = uma_zalloc(mapzone, M_WAITOK);
778	CTR1(KTR_VM, "vm_map_create: %p", result);
779	_vm_map_init(result, pmap, min, max);
780	return (result);
781}
782
783/*
784 * Initialize an existing vm_map structure
785 * such as that in the vmspace structure.
786 */
787static void
788_vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
789{
790
791	map->header.next = map->header.prev = &map->header;
792	map->needs_wakeup = FALSE;
793	map->system_map = 0;
794	map->pmap = pmap;
795	map->min_offset = min;
796	map->max_offset = max;
797	map->flags = 0;
798	map->root = NULL;
799	map->timestamp = 0;
800	map->busy = 0;
801}
802
803void
804vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
805{
806
807	_vm_map_init(map, pmap, min, max);
808	mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
809	sx_init(&map->lock, "user map");
810}
811
812/*
813 *	vm_map_entry_dispose:	[ internal use only ]
814 *
815 *	Inverse of vm_map_entry_create.
816 */
817static void
818vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
819{
820	uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
821}
822
823/*
824 *	vm_map_entry_create:	[ internal use only ]
825 *
826 *	Allocates a VM map entry for insertion.
827 *	No entry fields are filled in.
828 */
829static vm_map_entry_t
830vm_map_entry_create(vm_map_t map)
831{
832	vm_map_entry_t new_entry;
833
834	if (map->system_map)
835		new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
836	else
837		new_entry = uma_zalloc(mapentzone, M_WAITOK);
838	if (new_entry == NULL)
839		panic("vm_map_entry_create: kernel resources exhausted");
840	return (new_entry);
841}
842
843/*
844 *	vm_map_entry_set_behavior:
845 *
846 *	Set the expected access behavior, either normal, random, or
847 *	sequential.
848 */
849static inline void
850vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
851{
852	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
853	    (behavior & MAP_ENTRY_BEHAV_MASK);
854}
855
856/*
857 *	vm_map_entry_set_max_free:
858 *
859 *	Set the max_free field in a vm_map_entry.
860 */
861static inline void
862vm_map_entry_set_max_free(vm_map_entry_t entry)
863{
864
865	entry->max_free = entry->adj_free;
866	if (entry->left != NULL && entry->left->max_free > entry->max_free)
867		entry->max_free = entry->left->max_free;
868	if (entry->right != NULL && entry->right->max_free > entry->max_free)
869		entry->max_free = entry->right->max_free;
870}
871
872/*
873 *	vm_map_entry_splay:
874 *
875 *	The Sleator and Tarjan top-down splay algorithm with the
876 *	following variation.  Max_free must be computed bottom-up, so
877 *	on the downward pass, maintain the left and right spines in
878 *	reverse order.  Then, make a second pass up each side to fix
879 *	the pointers and compute max_free.  The time bound is O(log n)
880 *	amortized.
881 *
882 *	The new root is the vm_map_entry containing "addr", or else an
883 *	adjacent entry (lower or higher) if addr is not in the tree.
884 *
885 *	The map must be locked, and leaves it so.
886 *
887 *	Returns: the new root.
888 */
889static vm_map_entry_t
890vm_map_entry_splay(vm_offset_t addr, vm_map_entry_t root)
891{
892	vm_map_entry_t llist, rlist;
893	vm_map_entry_t ltree, rtree;
894	vm_map_entry_t y;
895
896	/* Special case of empty tree. */
897	if (root == NULL)
898		return (root);
899
900	/*
901	 * Pass One: Splay down the tree until we find addr or a NULL
902	 * pointer where addr would go.  llist and rlist are the two
903	 * sides in reverse order (bottom-up), with llist linked by
904	 * the right pointer and rlist linked by the left pointer in
905	 * the vm_map_entry.  Wait until Pass Two to set max_free on
906	 * the two spines.
907	 */
908	llist = NULL;
909	rlist = NULL;
910	for (;;) {
911		/* root is never NULL in here. */
912		if (addr < root->start) {
913			y = root->left;
914			if (y == NULL)
915				break;
916			if (addr < y->start && y->left != NULL) {
917				/* Rotate right and put y on rlist. */
918				root->left = y->right;
919				y->right = root;
920				vm_map_entry_set_max_free(root);
921				root = y->left;
922				y->left = rlist;
923				rlist = y;
924			} else {
925				/* Put root on rlist. */
926				root->left = rlist;
927				rlist = root;
928				root = y;
929			}
930		} else if (addr >= root->end) {
931			y = root->right;
932			if (y == NULL)
933				break;
934			if (addr >= y->end && y->right != NULL) {
935				/* Rotate left and put y on llist. */
936				root->right = y->left;
937				y->left = root;
938				vm_map_entry_set_max_free(root);
939				root = y->right;
940				y->right = llist;
941				llist = y;
942			} else {
943				/* Put root on llist. */
944				root->right = llist;
945				llist = root;
946				root = y;
947			}
948		} else
949			break;
950	}
951
952	/*
953	 * Pass Two: Walk back up the two spines, flip the pointers
954	 * and set max_free.  The subtrees of the root go at the
955	 * bottom of llist and rlist.
956	 */
957	ltree = root->left;
958	while (llist != NULL) {
959		y = llist->right;
960		llist->right = ltree;
961		vm_map_entry_set_max_free(llist);
962		ltree = llist;
963		llist = y;
964	}
965	rtree = root->right;
966	while (rlist != NULL) {
967		y = rlist->left;
968		rlist->left = rtree;
969		vm_map_entry_set_max_free(rlist);
970		rtree = rlist;
971		rlist = y;
972	}
973
974	/*
975	 * Final assembly: add ltree and rtree as subtrees of root.
976	 */
977	root->left = ltree;
978	root->right = rtree;
979	vm_map_entry_set_max_free(root);
980
981	return (root);
982}
983
984/*
985 *	vm_map_entry_{un,}link:
986 *
987 *	Insert/remove entries from maps.
988 */
989static void
990vm_map_entry_link(vm_map_t map,
991		  vm_map_entry_t after_where,
992		  vm_map_entry_t entry)
993{
994
995	CTR4(KTR_VM,
996	    "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map,
997	    map->nentries, entry, after_where);
998	VM_MAP_ASSERT_LOCKED(map);
999	KASSERT(after_where == &map->header ||
1000	    after_where->end <= entry->start,
1001	    ("vm_map_entry_link: prev end %jx new start %jx overlap",
1002	    (uintmax_t)after_where->end, (uintmax_t)entry->start));
1003	KASSERT(after_where->next == &map->header ||
1004	    entry->end <= after_where->next->start,
1005	    ("vm_map_entry_link: new end %jx next start %jx overlap",
1006	    (uintmax_t)entry->end, (uintmax_t)after_where->next->start));
1007
1008	map->nentries++;
1009	entry->prev = after_where;
1010	entry->next = after_where->next;
1011	entry->next->prev = entry;
1012	after_where->next = entry;
1013
1014	if (after_where != &map->header) {
1015		if (after_where != map->root)
1016			vm_map_entry_splay(after_where->start, map->root);
1017		entry->right = after_where->right;
1018		entry->left = after_where;
1019		after_where->right = NULL;
1020		after_where->adj_free = entry->start - after_where->end;
1021		vm_map_entry_set_max_free(after_where);
1022	} else {
1023		entry->right = map->root;
1024		entry->left = NULL;
1025	}
1026	entry->adj_free = (entry->next == &map->header ? map->max_offset :
1027	    entry->next->start) - entry->end;
1028	vm_map_entry_set_max_free(entry);
1029	map->root = entry;
1030}
1031
1032static void
1033vm_map_entry_unlink(vm_map_t map,
1034		    vm_map_entry_t entry)
1035{
1036	vm_map_entry_t next, prev, root;
1037
1038	VM_MAP_ASSERT_LOCKED(map);
1039	if (entry != map->root)
1040		vm_map_entry_splay(entry->start, map->root);
1041	if (entry->left == NULL)
1042		root = entry->right;
1043	else {
1044		root = vm_map_entry_splay(entry->start, entry->left);
1045		root->right = entry->right;
1046		root->adj_free = (entry->next == &map->header ? map->max_offset :
1047		    entry->next->start) - root->end;
1048		vm_map_entry_set_max_free(root);
1049	}
1050	map->root = root;
1051
1052	prev = entry->prev;
1053	next = entry->next;
1054	next->prev = prev;
1055	prev->next = next;
1056	map->nentries--;
1057	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1058	    map->nentries, entry);
1059}
1060
1061/*
1062 *	vm_map_entry_resize_free:
1063 *
1064 *	Recompute the amount of free space following a vm_map_entry
1065 *	and propagate that value up the tree.  Call this function after
1066 *	resizing a map entry in-place, that is, without a call to
1067 *	vm_map_entry_link() or _unlink().
1068 *
1069 *	The map must be locked, and leaves it so.
1070 */
1071static void
1072vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
1073{
1074
1075	/*
1076	 * Using splay trees without parent pointers, propagating
1077	 * max_free up the tree is done by moving the entry to the
1078	 * root and making the change there.
1079	 */
1080	if (entry != map->root)
1081		map->root = vm_map_entry_splay(entry->start, map->root);
1082
1083	entry->adj_free = (entry->next == &map->header ? map->max_offset :
1084	    entry->next->start) - entry->end;
1085	vm_map_entry_set_max_free(entry);
1086}
1087
1088/*
1089 *	vm_map_lookup_entry:	[ internal use only ]
1090 *
1091 *	Finds the map entry containing (or
1092 *	immediately preceding) the specified address
1093 *	in the given map; the entry is returned
1094 *	in the "entry" parameter.  The boolean
1095 *	result indicates whether the address is
1096 *	actually contained in the map.
1097 */
1098boolean_t
1099vm_map_lookup_entry(
1100	vm_map_t map,
1101	vm_offset_t address,
1102	vm_map_entry_t *entry)	/* OUT */
1103{
1104	vm_map_entry_t cur;
1105	boolean_t locked;
1106
1107	/*
1108	 * If the map is empty, then the map entry immediately preceding
1109	 * "address" is the map's header.
1110	 */
1111	cur = map->root;
1112	if (cur == NULL)
1113		*entry = &map->header;
1114	else if (address >= cur->start && cur->end > address) {
1115		*entry = cur;
1116		return (TRUE);
1117	} else if ((locked = vm_map_locked(map)) ||
1118	    sx_try_upgrade(&map->lock)) {
1119		/*
1120		 * Splay requires a write lock on the map.  However, it only
1121		 * restructures the binary search tree; it does not otherwise
1122		 * change the map.  Thus, the map's timestamp need not change
1123		 * on a temporary upgrade.
1124		 */
1125		map->root = cur = vm_map_entry_splay(address, cur);
1126		if (!locked)
1127			sx_downgrade(&map->lock);
1128
1129		/*
1130		 * If "address" is contained within a map entry, the new root
1131		 * is that map entry.  Otherwise, the new root is a map entry
1132		 * immediately before or after "address".
1133		 */
1134		if (address >= cur->start) {
1135			*entry = cur;
1136			if (cur->end > address)
1137				return (TRUE);
1138		} else
1139			*entry = cur->prev;
1140	} else
1141		/*
1142		 * Since the map is only locked for read access, perform a
1143		 * standard binary search tree lookup for "address".
1144		 */
1145		for (;;) {
1146			if (address < cur->start) {
1147				if (cur->left == NULL) {
1148					*entry = cur->prev;
1149					break;
1150				}
1151				cur = cur->left;
1152			} else if (cur->end > address) {
1153				*entry = cur;
1154				return (TRUE);
1155			} else {
1156				if (cur->right == NULL) {
1157					*entry = cur;
1158					break;
1159				}
1160				cur = cur->right;
1161			}
1162		}
1163	return (FALSE);
1164}
1165
1166/*
1167 *	vm_map_insert:
1168 *
1169 *	Inserts the given whole VM object into the target
1170 *	map at the specified address range.  The object's
1171 *	size should match that of the address range.
1172 *
1173 *	Requires that the map be locked, and leaves it so.
1174 *
1175 *	If object is non-NULL, ref count must be bumped by caller
1176 *	prior to making call to account for the new entry.
1177 */
1178int
1179vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1180    vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
1181{
1182	vm_map_entry_t new_entry, prev_entry, temp_entry;
1183	vm_eflags_t protoeflags;
1184	struct ucred *cred;
1185	vm_inherit_t inheritance;
1186
1187	VM_MAP_ASSERT_LOCKED(map);
1188	KASSERT((object != kmem_object && object != kernel_object) ||
1189	    (cow & MAP_COPY_ON_WRITE) == 0,
1190	    ("vm_map_insert: kmem or kernel object and COW"));
1191	KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0,
1192	    ("vm_map_insert: paradoxical MAP_NOFAULT request"));
1193
1194	/*
1195	 * Check that the start and end points are not bogus.
1196	 */
1197	if ((start < map->min_offset) || (end > map->max_offset) ||
1198	    (start >= end))
1199		return (KERN_INVALID_ADDRESS);
1200
1201	/*
1202	 * Find the entry prior to the proposed starting address; if it's part
1203	 * of an existing entry, this range is bogus.
1204	 */
1205	if (vm_map_lookup_entry(map, start, &temp_entry))
1206		return (KERN_NO_SPACE);
1207
1208	prev_entry = temp_entry;
1209
1210	/*
1211	 * Assert that the next entry doesn't overlap the end point.
1212	 */
1213	if ((prev_entry->next != &map->header) &&
1214	    (prev_entry->next->start < end))
1215		return (KERN_NO_SPACE);
1216
1217	protoeflags = 0;
1218	if (cow & MAP_COPY_ON_WRITE)
1219		protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY;
1220	if (cow & MAP_NOFAULT)
1221		protoeflags |= MAP_ENTRY_NOFAULT;
1222	if (cow & MAP_DISABLE_SYNCER)
1223		protoeflags |= MAP_ENTRY_NOSYNC;
1224	if (cow & MAP_DISABLE_COREDUMP)
1225		protoeflags |= MAP_ENTRY_NOCOREDUMP;
1226	if (cow & MAP_STACK_GROWS_DOWN)
1227		protoeflags |= MAP_ENTRY_GROWS_DOWN;
1228	if (cow & MAP_STACK_GROWS_UP)
1229		protoeflags |= MAP_ENTRY_GROWS_UP;
1230	if (cow & MAP_VN_WRITECOUNT)
1231		protoeflags |= MAP_ENTRY_VN_WRITECNT;
1232	if (cow & MAP_INHERIT_SHARE)
1233		inheritance = VM_INHERIT_SHARE;
1234	else
1235		inheritance = VM_INHERIT_DEFAULT;
1236
1237	cred = NULL;
1238	if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT))
1239		goto charged;
1240	if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1241	    ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1242		if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1243			return (KERN_RESOURCE_SHORTAGE);
1244		KASSERT(object == NULL || (protoeflags & MAP_ENTRY_NEEDS_COPY) ||
1245		    object->cred == NULL,
1246		    ("OVERCOMMIT: vm_map_insert o %p", object));
1247		cred = curthread->td_ucred;
1248	}
1249
1250charged:
1251	/* Expand the kernel pmap, if necessary. */
1252	if (map == kernel_map && end > kernel_vm_end)
1253		pmap_growkernel(end);
1254	if (object != NULL) {
1255		/*
1256		 * OBJ_ONEMAPPING must be cleared unless this mapping
1257		 * is trivially proven to be the only mapping for any
1258		 * of the object's pages.  (Object granularity
1259		 * reference counting is insufficient to recognize
1260		 * aliases with precision.)
1261		 */
1262		VM_OBJECT_WLOCK(object);
1263		if (object->ref_count > 1 || object->shadow_count != 0)
1264			vm_object_clear_flag(object, OBJ_ONEMAPPING);
1265		VM_OBJECT_WUNLOCK(object);
1266	}
1267	else if ((prev_entry != &map->header) &&
1268		 (prev_entry->eflags == protoeflags) &&
1269		 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 &&
1270		 (prev_entry->end == start) &&
1271		 (prev_entry->wired_count == 0) &&
1272		 (prev_entry->cred == cred ||
1273		  (prev_entry->object.vm_object != NULL &&
1274		   (prev_entry->object.vm_object->cred == cred))) &&
1275		   vm_object_coalesce(prev_entry->object.vm_object,
1276		       prev_entry->offset,
1277		       (vm_size_t)(prev_entry->end - prev_entry->start),
1278		       (vm_size_t)(end - prev_entry->end), cred != NULL &&
1279		       (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) {
1280		/*
1281		 * We were able to extend the object.  Determine if we
1282		 * can extend the previous map entry to include the
1283		 * new range as well.
1284		 */
1285		if ((prev_entry->inheritance == inheritance) &&
1286		    (prev_entry->protection == prot) &&
1287		    (prev_entry->max_protection == max)) {
1288			map->size += (end - prev_entry->end);
1289			prev_entry->end = end;
1290			vm_map_entry_resize_free(map, prev_entry);
1291			vm_map_simplify_entry(map, prev_entry);
1292			return (KERN_SUCCESS);
1293		}
1294
1295		/*
1296		 * If we can extend the object but cannot extend the
1297		 * map entry, we have to create a new map entry.  We
1298		 * must bump the ref count on the extended object to
1299		 * account for it.  object may be NULL.
1300		 */
1301		object = prev_entry->object.vm_object;
1302		offset = prev_entry->offset +
1303			(prev_entry->end - prev_entry->start);
1304		vm_object_reference(object);
1305		if (cred != NULL && object != NULL && object->cred != NULL &&
1306		    !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1307			/* Object already accounts for this uid. */
1308			cred = NULL;
1309		}
1310	}
1311	if (cred != NULL)
1312		crhold(cred);
1313
1314	/*
1315	 * Create a new entry
1316	 */
1317	new_entry = vm_map_entry_create(map);
1318	new_entry->start = start;
1319	new_entry->end = end;
1320	new_entry->cred = NULL;
1321
1322	new_entry->eflags = protoeflags;
1323	new_entry->object.vm_object = object;
1324	new_entry->offset = offset;
1325	new_entry->avail_ssize = 0;
1326
1327	new_entry->inheritance = inheritance;
1328	new_entry->protection = prot;
1329	new_entry->max_protection = max;
1330	new_entry->wired_count = 0;
1331	new_entry->wiring_thread = NULL;
1332	new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1333	new_entry->next_read = start;
1334
1335	KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1336	    ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry));
1337	new_entry->cred = cred;
1338
1339	/*
1340	 * Insert the new entry into the list
1341	 */
1342	vm_map_entry_link(map, prev_entry, new_entry);
1343	map->size += new_entry->end - new_entry->start;
1344
1345	/*
1346	 * Try to coalesce the new entry with both the previous and next
1347	 * entries in the list.  Previously, we only attempted to coalesce
1348	 * with the previous entry when object is NULL.  Here, we handle the
1349	 * other cases, which are less common.
1350	 */
1351	vm_map_simplify_entry(map, new_entry);
1352
1353	if (cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) {
1354		vm_map_pmap_enter(map, start, prot,
1355				    object, OFF_TO_IDX(offset), end - start,
1356				    cow & MAP_PREFAULT_PARTIAL);
1357	}
1358
1359	return (KERN_SUCCESS);
1360}
1361
1362/*
1363 *	vm_map_findspace:
1364 *
1365 *	Find the first fit (lowest VM address) for "length" free bytes
1366 *	beginning at address >= start in the given map.
1367 *
1368 *	In a vm_map_entry, "adj_free" is the amount of free space
1369 *	adjacent (higher address) to this entry, and "max_free" is the
1370 *	maximum amount of contiguous free space in its subtree.  This
1371 *	allows finding a free region in one path down the tree, so
1372 *	O(log n) amortized with splay trees.
1373 *
1374 *	The map must be locked, and leaves it so.
1375 *
1376 *	Returns: 0 on success, and starting address in *addr,
1377 *		 1 if insufficient space.
1378 */
1379int
1380vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1381    vm_offset_t *addr)	/* OUT */
1382{
1383	vm_map_entry_t entry;
1384	vm_offset_t st;
1385
1386	/*
1387	 * Request must fit within min/max VM address and must avoid
1388	 * address wrap.
1389	 */
1390	if (start < map->min_offset)
1391		start = map->min_offset;
1392	if (start + length > map->max_offset || start + length < start)
1393		return (1);
1394
1395	/* Empty tree means wide open address space. */
1396	if (map->root == NULL) {
1397		*addr = start;
1398		return (0);
1399	}
1400
1401	/*
1402	 * After splay, if start comes before root node, then there
1403	 * must be a gap from start to the root.
1404	 */
1405	map->root = vm_map_entry_splay(start, map->root);
1406	if (start + length <= map->root->start) {
1407		*addr = start;
1408		return (0);
1409	}
1410
1411	/*
1412	 * Root is the last node that might begin its gap before
1413	 * start, and this is the last comparison where address
1414	 * wrap might be a problem.
1415	 */
1416	st = (start > map->root->end) ? start : map->root->end;
1417	if (length <= map->root->end + map->root->adj_free - st) {
1418		*addr = st;
1419		return (0);
1420	}
1421
1422	/* With max_free, can immediately tell if no solution. */
1423	entry = map->root->right;
1424	if (entry == NULL || length > entry->max_free)
1425		return (1);
1426
1427	/*
1428	 * Search the right subtree in the order: left subtree, root,
1429	 * right subtree (first fit).  The previous splay implies that
1430	 * all regions in the right subtree have addresses > start.
1431	 */
1432	while (entry != NULL) {
1433		if (entry->left != NULL && entry->left->max_free >= length)
1434			entry = entry->left;
1435		else if (entry->adj_free >= length) {
1436			*addr = entry->end;
1437			return (0);
1438		} else
1439			entry = entry->right;
1440	}
1441
1442	/* Can't get here, so panic if we do. */
1443	panic("vm_map_findspace: max_free corrupt");
1444}
1445
1446int
1447vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1448    vm_offset_t start, vm_size_t length, vm_prot_t prot,
1449    vm_prot_t max, int cow)
1450{
1451	vm_offset_t end;
1452	int result;
1453
1454	end = start + length;
1455	KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1456	    object == NULL,
1457	    ("vm_map_fixed: non-NULL backing object for stack"));
1458	vm_map_lock(map);
1459	VM_MAP_RANGE_CHECK(map, start, end);
1460	if ((cow & MAP_CHECK_EXCL) == 0)
1461		vm_map_delete(map, start, end);
1462	if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1463		result = vm_map_stack_locked(map, start, length, sgrowsiz,
1464		    prot, max, cow);
1465	} else {
1466		result = vm_map_insert(map, object, offset, start, end,
1467		    prot, max, cow);
1468	}
1469	vm_map_unlock(map);
1470	return (result);
1471}
1472
1473/*
1474 *	vm_map_find finds an unallocated region in the target address
1475 *	map with the given length.  The search is defined to be
1476 *	first-fit from the specified address; the region found is
1477 *	returned in the same parameter.
1478 *
1479 *	If object is non-NULL, ref count must be bumped by caller
1480 *	prior to making call to account for the new entry.
1481 */
1482int
1483vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1484	    vm_offset_t *addr,	/* IN/OUT */
1485	    vm_size_t length, vm_offset_t max_addr, int find_space,
1486	    vm_prot_t prot, vm_prot_t max, int cow)
1487{
1488	vm_offset_t alignment, initial_addr, start;
1489	int result;
1490
1491	KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1492	    object == NULL,
1493	    ("vm_map_find: non-NULL backing object for stack"));
1494	if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
1495	    (object->flags & OBJ_COLORED) == 0))
1496		find_space = VMFS_ANY_SPACE;
1497	if (find_space >> 8 != 0) {
1498		KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
1499		alignment = (vm_offset_t)1 << (find_space >> 8);
1500	} else
1501		alignment = 0;
1502	initial_addr = *addr;
1503again:
1504	start = initial_addr;
1505	vm_map_lock(map);
1506	do {
1507		if (find_space != VMFS_NO_SPACE) {
1508			if (vm_map_findspace(map, start, length, addr) ||
1509			    (max_addr != 0 && *addr + length > max_addr)) {
1510				vm_map_unlock(map);
1511				if (find_space == VMFS_OPTIMAL_SPACE) {
1512					find_space = VMFS_ANY_SPACE;
1513					goto again;
1514				}
1515				return (KERN_NO_SPACE);
1516			}
1517			switch (find_space) {
1518			case VMFS_SUPER_SPACE:
1519			case VMFS_OPTIMAL_SPACE:
1520				pmap_align_superpage(object, offset, addr,
1521				    length);
1522				break;
1523			case VMFS_ANY_SPACE:
1524				break;
1525			default:
1526				if ((*addr & (alignment - 1)) != 0) {
1527					*addr &= ~(alignment - 1);
1528					*addr += alignment;
1529				}
1530				break;
1531			}
1532
1533			start = *addr;
1534		}
1535		if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1536			result = vm_map_stack_locked(map, start, length,
1537			    sgrowsiz, prot, max, cow);
1538		} else {
1539			result = vm_map_insert(map, object, offset, start,
1540			    start + length, prot, max, cow);
1541		}
1542	} while (result == KERN_NO_SPACE && find_space != VMFS_NO_SPACE &&
1543	    find_space != VMFS_ANY_SPACE);
1544	vm_map_unlock(map);
1545	return (result);
1546}
1547
1548/*
1549 *	vm_map_simplify_entry:
1550 *
1551 *	Simplify the given map entry by merging with either neighbor.  This
1552 *	routine also has the ability to merge with both neighbors.
1553 *
1554 *	The map must be locked.
1555 *
1556 *	This routine guarantees that the passed entry remains valid (though
1557 *	possibly extended).  When merging, this routine may delete one or
1558 *	both neighbors.
1559 */
1560void
1561vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
1562{
1563	vm_map_entry_t next, prev;
1564	vm_size_t prevsize, esize;
1565
1566	if ((entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP |
1567	    MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP)) != 0)
1568		return;
1569
1570	prev = entry->prev;
1571	if (prev != &map->header) {
1572		prevsize = prev->end - prev->start;
1573		if ( (prev->end == entry->start) &&
1574		     (prev->object.vm_object == entry->object.vm_object) &&
1575		     (!prev->object.vm_object ||
1576			(prev->offset + prevsize == entry->offset)) &&
1577		     (prev->eflags == entry->eflags) &&
1578		     (prev->protection == entry->protection) &&
1579		     (prev->max_protection == entry->max_protection) &&
1580		     (prev->inheritance == entry->inheritance) &&
1581		     (prev->wired_count == entry->wired_count) &&
1582		     (prev->cred == entry->cred)) {
1583			vm_map_entry_unlink(map, prev);
1584			entry->start = prev->start;
1585			entry->offset = prev->offset;
1586			if (entry->prev != &map->header)
1587				vm_map_entry_resize_free(map, entry->prev);
1588
1589			/*
1590			 * If the backing object is a vnode object,
1591			 * vm_object_deallocate() calls vrele().
1592			 * However, vrele() does not lock the vnode
1593			 * because the vnode has additional
1594			 * references.  Thus, the map lock can be kept
1595			 * without causing a lock-order reversal with
1596			 * the vnode lock.
1597			 *
1598			 * Since we count the number of virtual page
1599			 * mappings in object->un_pager.vnp.writemappings,
1600			 * the writemappings value should not be adjusted
1601			 * when the entry is disposed of.
1602			 */
1603			if (prev->object.vm_object)
1604				vm_object_deallocate(prev->object.vm_object);
1605			if (prev->cred != NULL)
1606				crfree(prev->cred);
1607			vm_map_entry_dispose(map, prev);
1608		}
1609	}
1610
1611	next = entry->next;
1612	if (next != &map->header) {
1613		esize = entry->end - entry->start;
1614		if ((entry->end == next->start) &&
1615		    (next->object.vm_object == entry->object.vm_object) &&
1616		     (!entry->object.vm_object ||
1617			(entry->offset + esize == next->offset)) &&
1618		    (next->eflags == entry->eflags) &&
1619		    (next->protection == entry->protection) &&
1620		    (next->max_protection == entry->max_protection) &&
1621		    (next->inheritance == entry->inheritance) &&
1622		    (next->wired_count == entry->wired_count) &&
1623		    (next->cred == entry->cred)) {
1624			vm_map_entry_unlink(map, next);
1625			entry->end = next->end;
1626			vm_map_entry_resize_free(map, entry);
1627
1628			/*
1629			 * See comment above.
1630			 */
1631			if (next->object.vm_object)
1632				vm_object_deallocate(next->object.vm_object);
1633			if (next->cred != NULL)
1634				crfree(next->cred);
1635			vm_map_entry_dispose(map, next);
1636		}
1637	}
1638}
1639/*
1640 *	vm_map_clip_start:	[ internal use only ]
1641 *
1642 *	Asserts that the given entry begins at or after
1643 *	the specified address; if necessary,
1644 *	it splits the entry into two.
1645 */
1646#define vm_map_clip_start(map, entry, startaddr) \
1647{ \
1648	if (startaddr > entry->start) \
1649		_vm_map_clip_start(map, entry, startaddr); \
1650}
1651
1652/*
1653 *	This routine is called only when it is known that
1654 *	the entry must be split.
1655 */
1656static void
1657_vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
1658{
1659	vm_map_entry_t new_entry;
1660
1661	VM_MAP_ASSERT_LOCKED(map);
1662
1663	/*
1664	 * Split off the front portion -- note that we must insert the new
1665	 * entry BEFORE this one, so that this entry has the specified
1666	 * starting address.
1667	 */
1668	vm_map_simplify_entry(map, entry);
1669
1670	/*
1671	 * If there is no object backing this entry, we might as well create
1672	 * one now.  If we defer it, an object can get created after the map
1673	 * is clipped, and individual objects will be created for the split-up
1674	 * map.  This is a bit of a hack, but is also about the best place to
1675	 * put this improvement.
1676	 */
1677	if (entry->object.vm_object == NULL && !map->system_map) {
1678		vm_object_t object;
1679		object = vm_object_allocate(OBJT_DEFAULT,
1680				atop(entry->end - entry->start));
1681		entry->object.vm_object = object;
1682		entry->offset = 0;
1683		if (entry->cred != NULL) {
1684			object->cred = entry->cred;
1685			object->charge = entry->end - entry->start;
1686			entry->cred = NULL;
1687		}
1688	} else if (entry->object.vm_object != NULL &&
1689		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1690		   entry->cred != NULL) {
1691		VM_OBJECT_WLOCK(entry->object.vm_object);
1692		KASSERT(entry->object.vm_object->cred == NULL,
1693		    ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
1694		entry->object.vm_object->cred = entry->cred;
1695		entry->object.vm_object->charge = entry->end - entry->start;
1696		VM_OBJECT_WUNLOCK(entry->object.vm_object);
1697		entry->cred = NULL;
1698	}
1699
1700	new_entry = vm_map_entry_create(map);
1701	*new_entry = *entry;
1702
1703	new_entry->end = start;
1704	entry->offset += (start - entry->start);
1705	entry->start = start;
1706	if (new_entry->cred != NULL)
1707		crhold(entry->cred);
1708
1709	vm_map_entry_link(map, entry->prev, new_entry);
1710
1711	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1712		vm_object_reference(new_entry->object.vm_object);
1713		/*
1714		 * The object->un_pager.vnp.writemappings for the
1715		 * object of MAP_ENTRY_VN_WRITECNT type entry shall be
1716		 * kept as is here.  The virtual pages are
1717		 * re-distributed among the clipped entries, so the sum is
1718		 * left the same.
1719		 */
1720	}
1721}
1722
1723/*
1724 *	vm_map_clip_end:	[ internal use only ]
1725 *
1726 *	Asserts that the given entry ends at or before
1727 *	the specified address; if necessary,
1728 *	it splits the entry into two.
1729 */
1730#define vm_map_clip_end(map, entry, endaddr) \
1731{ \
1732	if ((endaddr) < (entry->end)) \
1733		_vm_map_clip_end((map), (entry), (endaddr)); \
1734}
1735
1736/*
1737 *	This routine is called only when it is known that
1738 *	the entry must be split.
1739 */
1740static void
1741_vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
1742{
1743	vm_map_entry_t new_entry;
1744
1745	VM_MAP_ASSERT_LOCKED(map);
1746
1747	/*
1748	 * If there is no object backing this entry, we might as well create
1749	 * one now.  If we defer it, an object can get created after the map
1750	 * is clipped, and individual objects will be created for the split-up
1751	 * map.  This is a bit of a hack, but is also about the best place to
1752	 * put this improvement.
1753	 */
1754	if (entry->object.vm_object == NULL && !map->system_map) {
1755		vm_object_t object;
1756		object = vm_object_allocate(OBJT_DEFAULT,
1757				atop(entry->end - entry->start));
1758		entry->object.vm_object = object;
1759		entry->offset = 0;
1760		if (entry->cred != NULL) {
1761			object->cred = entry->cred;
1762			object->charge = entry->end - entry->start;
1763			entry->cred = NULL;
1764		}
1765	} else if (entry->object.vm_object != NULL &&
1766		   ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
1767		   entry->cred != NULL) {
1768		VM_OBJECT_WLOCK(entry->object.vm_object);
1769		KASSERT(entry->object.vm_object->cred == NULL,
1770		    ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
1771		entry->object.vm_object->cred = entry->cred;
1772		entry->object.vm_object->charge = entry->end - entry->start;
1773		VM_OBJECT_WUNLOCK(entry->object.vm_object);
1774		entry->cred = NULL;
1775	}
1776
1777	/*
1778	 * Create a new entry and insert it AFTER the specified entry
1779	 */
1780	new_entry = vm_map_entry_create(map);
1781	*new_entry = *entry;
1782
1783	new_entry->start = entry->end = end;
1784	new_entry->offset += (end - entry->start);
1785	if (new_entry->cred != NULL)
1786		crhold(entry->cred);
1787
1788	vm_map_entry_link(map, entry, new_entry);
1789
1790	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1791		vm_object_reference(new_entry->object.vm_object);
1792	}
1793}
1794
1795/*
1796 *	vm_map_submap:		[ kernel use only ]
1797 *
1798 *	Mark the given range as handled by a subordinate map.
1799 *
1800 *	This range must have been created with vm_map_find,
1801 *	and no other operations may have been performed on this
1802 *	range prior to calling vm_map_submap.
1803 *
1804 *	Only a limited number of operations can be performed
1805 *	within this rage after calling vm_map_submap:
1806 *		vm_fault
1807 *	[Don't try vm_map_copy!]
1808 *
1809 *	To remove a submapping, one must first remove the
1810 *	range from the superior map, and then destroy the
1811 *	submap (if desired).  [Better yet, don't try it.]
1812 */
1813int
1814vm_map_submap(
1815	vm_map_t map,
1816	vm_offset_t start,
1817	vm_offset_t end,
1818	vm_map_t submap)
1819{
1820	vm_map_entry_t entry;
1821	int result = KERN_INVALID_ARGUMENT;
1822
1823	vm_map_lock(map);
1824
1825	VM_MAP_RANGE_CHECK(map, start, end);
1826
1827	if (vm_map_lookup_entry(map, start, &entry)) {
1828		vm_map_clip_start(map, entry, start);
1829	} else
1830		entry = entry->next;
1831
1832	vm_map_clip_end(map, entry, end);
1833
1834	if ((entry->start == start) && (entry->end == end) &&
1835	    ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1836	    (entry->object.vm_object == NULL)) {
1837		entry->object.sub_map = submap;
1838		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1839		result = KERN_SUCCESS;
1840	}
1841	vm_map_unlock(map);
1842
1843	return (result);
1844}
1845
1846/*
1847 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
1848 */
1849#define	MAX_INIT_PT	96
1850
1851/*
1852 *	vm_map_pmap_enter:
1853 *
1854 *	Preload the specified map's pmap with mappings to the specified
1855 *	object's memory-resident pages.  No further physical pages are
1856 *	allocated, and no further virtual pages are retrieved from secondary
1857 *	storage.  If the specified flags include MAP_PREFAULT_PARTIAL, then a
1858 *	limited number of page mappings are created at the low-end of the
1859 *	specified address range.  (For this purpose, a superpage mapping
1860 *	counts as one page mapping.)  Otherwise, all resident pages within
1861 *	the specified address range are mapped.  Because these mappings are
1862 *	being created speculatively, cached pages are not reactivated and
1863 *	mapped.
1864 */
1865static void
1866vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
1867    vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
1868{
1869	vm_offset_t start;
1870	vm_page_t p, p_start;
1871	vm_pindex_t mask, psize, threshold, tmpidx;
1872
1873	if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
1874		return;
1875	VM_OBJECT_RLOCK(object);
1876	if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1877		VM_OBJECT_RUNLOCK(object);
1878		VM_OBJECT_WLOCK(object);
1879		if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
1880			pmap_object_init_pt(map->pmap, addr, object, pindex,
1881			    size);
1882			VM_OBJECT_WUNLOCK(object);
1883			return;
1884		}
1885		VM_OBJECT_LOCK_DOWNGRADE(object);
1886	}
1887
1888	psize = atop(size);
1889	if (psize + pindex > object->size) {
1890		if (object->size < pindex) {
1891			VM_OBJECT_RUNLOCK(object);
1892			return;
1893		}
1894		psize = object->size - pindex;
1895	}
1896
1897	start = 0;
1898	p_start = NULL;
1899	threshold = MAX_INIT_PT;
1900
1901	p = vm_page_find_least(object, pindex);
1902	/*
1903	 * Assert: the variable p is either (1) the page with the
1904	 * least pindex greater than or equal to the parameter pindex
1905	 * or (2) NULL.
1906	 */
1907	for (;
1908	     p != NULL && (tmpidx = p->pindex - pindex) < psize;
1909	     p = TAILQ_NEXT(p, listq)) {
1910		/*
1911		 * don't allow an madvise to blow away our really
1912		 * free pages allocating pv entries.
1913		 */
1914		if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
1915		    vm_cnt.v_free_count < vm_cnt.v_free_reserved) ||
1916		    ((flags & MAP_PREFAULT_PARTIAL) != 0 &&
1917		    tmpidx >= threshold)) {
1918			psize = tmpidx;
1919			break;
1920		}
1921		if (p->valid == VM_PAGE_BITS_ALL) {
1922			if (p_start == NULL) {
1923				start = addr + ptoa(tmpidx);
1924				p_start = p;
1925			}
1926			/* Jump ahead if a superpage mapping is possible. */
1927			if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
1928			    (pagesizes[p->psind] - 1)) == 0) {
1929				mask = atop(pagesizes[p->psind]) - 1;
1930				if (tmpidx + mask < psize &&
1931				    vm_page_ps_is_valid(p)) {
1932					p += mask;
1933					threshold += mask;
1934				}
1935			}
1936		} else if (p_start != NULL) {
1937			pmap_enter_object(map->pmap, start, addr +
1938			    ptoa(tmpidx), p_start, prot);
1939			p_start = NULL;
1940		}
1941	}
1942	if (p_start != NULL)
1943		pmap_enter_object(map->pmap, start, addr + ptoa(psize),
1944		    p_start, prot);
1945	VM_OBJECT_RUNLOCK(object);
1946}
1947
1948/*
1949 *	vm_map_protect:
1950 *
1951 *	Sets the protection of the specified address
1952 *	region in the target map.  If "set_max" is
1953 *	specified, the maximum protection is to be set;
1954 *	otherwise, only the current protection is affected.
1955 */
1956int
1957vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1958	       vm_prot_t new_prot, boolean_t set_max)
1959{
1960	vm_map_entry_t current, entry;
1961	vm_object_t obj;
1962	struct ucred *cred;
1963	vm_prot_t old_prot;
1964
1965	if (start == end)
1966		return (KERN_SUCCESS);
1967
1968	vm_map_lock(map);
1969
1970	VM_MAP_RANGE_CHECK(map, start, end);
1971
1972	if (vm_map_lookup_entry(map, start, &entry)) {
1973		vm_map_clip_start(map, entry, start);
1974	} else {
1975		entry = entry->next;
1976	}
1977
1978	/*
1979	 * Make a first pass to check for protection violations.
1980	 */
1981	current = entry;
1982	while ((current != &map->header) && (current->start < end)) {
1983		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1984			vm_map_unlock(map);
1985			return (KERN_INVALID_ARGUMENT);
1986		}
1987		if ((new_prot & current->max_protection) != new_prot) {
1988			vm_map_unlock(map);
1989			return (KERN_PROTECTION_FAILURE);
1990		}
1991		current = current->next;
1992	}
1993
1994
1995	/*
1996	 * Do an accounting pass for private read-only mappings that
1997	 * now will do cow due to allowed write (e.g. debugger sets
1998	 * breakpoint on text segment)
1999	 */
2000	for (current = entry; (current != &map->header) &&
2001	     (current->start < end); current = current->next) {
2002
2003		vm_map_clip_end(map, current, end);
2004
2005		if (set_max ||
2006		    ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
2007		    ENTRY_CHARGED(current)) {
2008			continue;
2009		}
2010
2011		cred = curthread->td_ucred;
2012		obj = current->object.vm_object;
2013
2014		if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
2015			if (!swap_reserve(current->end - current->start)) {
2016				vm_map_unlock(map);
2017				return (KERN_RESOURCE_SHORTAGE);
2018			}
2019			crhold(cred);
2020			current->cred = cred;
2021			continue;
2022		}
2023
2024		VM_OBJECT_WLOCK(obj);
2025		if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
2026			VM_OBJECT_WUNLOCK(obj);
2027			continue;
2028		}
2029
2030		/*
2031		 * Charge for the whole object allocation now, since
2032		 * we cannot distinguish between non-charged and
2033		 * charged clipped mapping of the same object later.
2034		 */
2035		KASSERT(obj->charge == 0,
2036		    ("vm_map_protect: object %p overcharged (entry %p)",
2037		    obj, current));
2038		if (!swap_reserve(ptoa(obj->size))) {
2039			VM_OBJECT_WUNLOCK(obj);
2040			vm_map_unlock(map);
2041			return (KERN_RESOURCE_SHORTAGE);
2042		}
2043
2044		crhold(cred);
2045		obj->cred = cred;
2046		obj->charge = ptoa(obj->size);
2047		VM_OBJECT_WUNLOCK(obj);
2048	}
2049
2050	/*
2051	 * Go back and fix up protections. [Note that clipping is not
2052	 * necessary the second time.]
2053	 */
2054	current = entry;
2055	while ((current != &map->header) && (current->start < end)) {
2056		old_prot = current->protection;
2057
2058		if (set_max)
2059			current->protection =
2060			    (current->max_protection = new_prot) &
2061			    old_prot;
2062		else
2063			current->protection = new_prot;
2064
2065		/*
2066		 * For user wired map entries, the normal lazy evaluation of
2067		 * write access upgrades through soft page faults is
2068		 * undesirable.  Instead, immediately copy any pages that are
2069		 * copy-on-write and enable write access in the physical map.
2070		 */
2071		if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
2072		    (current->protection & VM_PROT_WRITE) != 0 &&
2073		    (old_prot & VM_PROT_WRITE) == 0)
2074			vm_fault_copy_entry(map, map, current, current, NULL);
2075
2076		/*
2077		 * When restricting access, update the physical map.  Worry
2078		 * about copy-on-write here.
2079		 */
2080		if ((old_prot & ~current->protection) != 0) {
2081#define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2082							VM_PROT_ALL)
2083			pmap_protect(map->pmap, current->start,
2084			    current->end,
2085			    current->protection & MASK(current));
2086#undef	MASK
2087		}
2088		vm_map_simplify_entry(map, current);
2089		current = current->next;
2090	}
2091	vm_map_unlock(map);
2092	return (KERN_SUCCESS);
2093}
2094
2095/*
2096 *	vm_map_madvise:
2097 *
2098 *	This routine traverses a processes map handling the madvise
2099 *	system call.  Advisories are classified as either those effecting
2100 *	the vm_map_entry structure, or those effecting the underlying
2101 *	objects.
2102 */
2103int
2104vm_map_madvise(
2105	vm_map_t map,
2106	vm_offset_t start,
2107	vm_offset_t end,
2108	int behav)
2109{
2110	vm_map_entry_t current, entry;
2111	int modify_map = 0;
2112
2113	/*
2114	 * Some madvise calls directly modify the vm_map_entry, in which case
2115	 * we need to use an exclusive lock on the map and we need to perform
2116	 * various clipping operations.  Otherwise we only need a read-lock
2117	 * on the map.
2118	 */
2119	switch(behav) {
2120	case MADV_NORMAL:
2121	case MADV_SEQUENTIAL:
2122	case MADV_RANDOM:
2123	case MADV_NOSYNC:
2124	case MADV_AUTOSYNC:
2125	case MADV_NOCORE:
2126	case MADV_CORE:
2127		if (start == end)
2128			return (KERN_SUCCESS);
2129		modify_map = 1;
2130		vm_map_lock(map);
2131		break;
2132	case MADV_WILLNEED:
2133	case MADV_DONTNEED:
2134	case MADV_FREE:
2135		if (start == end)
2136			return (KERN_SUCCESS);
2137		vm_map_lock_read(map);
2138		break;
2139	default:
2140		return (KERN_INVALID_ARGUMENT);
2141	}
2142
2143	/*
2144	 * Locate starting entry and clip if necessary.
2145	 */
2146	VM_MAP_RANGE_CHECK(map, start, end);
2147
2148	if (vm_map_lookup_entry(map, start, &entry)) {
2149		if (modify_map)
2150			vm_map_clip_start(map, entry, start);
2151	} else {
2152		entry = entry->next;
2153	}
2154
2155	if (modify_map) {
2156		/*
2157		 * madvise behaviors that are implemented in the vm_map_entry.
2158		 *
2159		 * We clip the vm_map_entry so that behavioral changes are
2160		 * limited to the specified address range.
2161		 */
2162		for (current = entry;
2163		     (current != &map->header) && (current->start < end);
2164		     current = current->next
2165		) {
2166			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2167				continue;
2168
2169			vm_map_clip_end(map, current, end);
2170
2171			switch (behav) {
2172			case MADV_NORMAL:
2173				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2174				break;
2175			case MADV_SEQUENTIAL:
2176				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2177				break;
2178			case MADV_RANDOM:
2179				vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2180				break;
2181			case MADV_NOSYNC:
2182				current->eflags |= MAP_ENTRY_NOSYNC;
2183				break;
2184			case MADV_AUTOSYNC:
2185				current->eflags &= ~MAP_ENTRY_NOSYNC;
2186				break;
2187			case MADV_NOCORE:
2188				current->eflags |= MAP_ENTRY_NOCOREDUMP;
2189				break;
2190			case MADV_CORE:
2191				current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2192				break;
2193			default:
2194				break;
2195			}
2196			vm_map_simplify_entry(map, current);
2197		}
2198		vm_map_unlock(map);
2199	} else {
2200		vm_pindex_t pstart, pend;
2201
2202		/*
2203		 * madvise behaviors that are implemented in the underlying
2204		 * vm_object.
2205		 *
2206		 * Since we don't clip the vm_map_entry, we have to clip
2207		 * the vm_object pindex and count.
2208		 */
2209		for (current = entry;
2210		     (current != &map->header) && (current->start < end);
2211		     current = current->next
2212		) {
2213			vm_offset_t useEnd, useStart;
2214
2215			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2216				continue;
2217
2218			pstart = OFF_TO_IDX(current->offset);
2219			pend = pstart + atop(current->end - current->start);
2220			useStart = current->start;
2221			useEnd = current->end;
2222
2223			if (current->start < start) {
2224				pstart += atop(start - current->start);
2225				useStart = start;
2226			}
2227			if (current->end > end) {
2228				pend -= atop(current->end - end);
2229				useEnd = end;
2230			}
2231
2232			if (pstart >= pend)
2233				continue;
2234
2235			/*
2236			 * Perform the pmap_advise() before clearing
2237			 * PGA_REFERENCED in vm_page_advise().  Otherwise, a
2238			 * concurrent pmap operation, such as pmap_remove(),
2239			 * could clear a reference in the pmap and set
2240			 * PGA_REFERENCED on the page before the pmap_advise()
2241			 * had completed.  Consequently, the page would appear
2242			 * referenced based upon an old reference that
2243			 * occurred before this pmap_advise() ran.
2244			 */
2245			if (behav == MADV_DONTNEED || behav == MADV_FREE)
2246				pmap_advise(map->pmap, useStart, useEnd,
2247				    behav);
2248
2249			vm_object_madvise(current->object.vm_object, pstart,
2250			    pend, behav);
2251
2252			/*
2253			 * Pre-populate paging structures in the
2254			 * WILLNEED case.  For wired entries, the
2255			 * paging structures are already populated.
2256			 */
2257			if (behav == MADV_WILLNEED &&
2258			    current->wired_count == 0) {
2259				vm_map_pmap_enter(map,
2260				    useStart,
2261				    current->protection,
2262				    current->object.vm_object,
2263				    pstart,
2264				    ptoa(pend - pstart),
2265				    MAP_PREFAULT_MADVISE
2266				);
2267			}
2268		}
2269		vm_map_unlock_read(map);
2270	}
2271	return (0);
2272}
2273
2274
2275/*
2276 *	vm_map_inherit:
2277 *
2278 *	Sets the inheritance of the specified address
2279 *	range in the target map.  Inheritance
2280 *	affects how the map will be shared with
2281 *	child maps at the time of vmspace_fork.
2282 */
2283int
2284vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2285	       vm_inherit_t new_inheritance)
2286{
2287	vm_map_entry_t entry;
2288	vm_map_entry_t temp_entry;
2289
2290	switch (new_inheritance) {
2291	case VM_INHERIT_NONE:
2292	case VM_INHERIT_COPY:
2293	case VM_INHERIT_SHARE:
2294		break;
2295	default:
2296		return (KERN_INVALID_ARGUMENT);
2297	}
2298	if (start == end)
2299		return (KERN_SUCCESS);
2300	vm_map_lock(map);
2301	VM_MAP_RANGE_CHECK(map, start, end);
2302	if (vm_map_lookup_entry(map, start, &temp_entry)) {
2303		entry = temp_entry;
2304		vm_map_clip_start(map, entry, start);
2305	} else
2306		entry = temp_entry->next;
2307	while ((entry != &map->header) && (entry->start < end)) {
2308		vm_map_clip_end(map, entry, end);
2309		entry->inheritance = new_inheritance;
2310		vm_map_simplify_entry(map, entry);
2311		entry = entry->next;
2312	}
2313	vm_map_unlock(map);
2314	return (KERN_SUCCESS);
2315}
2316
2317/*
2318 *	vm_map_unwire:
2319 *
2320 *	Implements both kernel and user unwiring.
2321 */
2322int
2323vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2324    int flags)
2325{
2326	vm_map_entry_t entry, first_entry, tmp_entry;
2327	vm_offset_t saved_start;
2328	unsigned int last_timestamp;
2329	int rv;
2330	boolean_t need_wakeup, result, user_unwire;
2331
2332	if (start == end)
2333		return (KERN_SUCCESS);
2334	user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2335	vm_map_lock(map);
2336	VM_MAP_RANGE_CHECK(map, start, end);
2337	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2338		if (flags & VM_MAP_WIRE_HOLESOK)
2339			first_entry = first_entry->next;
2340		else {
2341			vm_map_unlock(map);
2342			return (KERN_INVALID_ADDRESS);
2343		}
2344	}
2345	last_timestamp = map->timestamp;
2346	entry = first_entry;
2347	while (entry != &map->header && entry->start < end) {
2348		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2349			/*
2350			 * We have not yet clipped the entry.
2351			 */
2352			saved_start = (start >= entry->start) ? start :
2353			    entry->start;
2354			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2355			if (vm_map_unlock_and_wait(map, 0)) {
2356				/*
2357				 * Allow interruption of user unwiring?
2358				 */
2359			}
2360			vm_map_lock(map);
2361			if (last_timestamp+1 != map->timestamp) {
2362				/*
2363				 * Look again for the entry because the map was
2364				 * modified while it was unlocked.
2365				 * Specifically, the entry may have been
2366				 * clipped, merged, or deleted.
2367				 */
2368				if (!vm_map_lookup_entry(map, saved_start,
2369				    &tmp_entry)) {
2370					if (flags & VM_MAP_WIRE_HOLESOK)
2371						tmp_entry = tmp_entry->next;
2372					else {
2373						if (saved_start == start) {
2374							/*
2375							 * First_entry has been deleted.
2376							 */
2377							vm_map_unlock(map);
2378							return (KERN_INVALID_ADDRESS);
2379						}
2380						end = saved_start;
2381						rv = KERN_INVALID_ADDRESS;
2382						goto done;
2383					}
2384				}
2385				if (entry == first_entry)
2386					first_entry = tmp_entry;
2387				else
2388					first_entry = NULL;
2389				entry = tmp_entry;
2390			}
2391			last_timestamp = map->timestamp;
2392			continue;
2393		}
2394		vm_map_clip_start(map, entry, start);
2395		vm_map_clip_end(map, entry, end);
2396		/*
2397		 * Mark the entry in case the map lock is released.  (See
2398		 * above.)
2399		 */
2400		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2401		    entry->wiring_thread == NULL,
2402		    ("owned map entry %p", entry));
2403		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2404		entry->wiring_thread = curthread;
2405		/*
2406		 * Check the map for holes in the specified region.
2407		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2408		 */
2409		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2410		    (entry->end < end && (entry->next == &map->header ||
2411		    entry->next->start > entry->end))) {
2412			end = entry->end;
2413			rv = KERN_INVALID_ADDRESS;
2414			goto done;
2415		}
2416		/*
2417		 * If system unwiring, require that the entry is system wired.
2418		 */
2419		if (!user_unwire &&
2420		    vm_map_entry_system_wired_count(entry) == 0) {
2421			end = entry->end;
2422			rv = KERN_INVALID_ARGUMENT;
2423			goto done;
2424		}
2425		entry = entry->next;
2426	}
2427	rv = KERN_SUCCESS;
2428done:
2429	need_wakeup = FALSE;
2430	if (first_entry == NULL) {
2431		result = vm_map_lookup_entry(map, start, &first_entry);
2432		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2433			first_entry = first_entry->next;
2434		else
2435			KASSERT(result, ("vm_map_unwire: lookup failed"));
2436	}
2437	for (entry = first_entry; entry != &map->header && entry->start < end;
2438	    entry = entry->next) {
2439		/*
2440		 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2441		 * space in the unwired region could have been mapped
2442		 * while the map lock was dropped for draining
2443		 * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
2444		 * could be simultaneously wiring this new mapping
2445		 * entry.  Detect these cases and skip any entries
2446		 * marked as in transition by us.
2447		 */
2448		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2449		    entry->wiring_thread != curthread) {
2450			KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2451			    ("vm_map_unwire: !HOLESOK and new/changed entry"));
2452			continue;
2453		}
2454
2455		if (rv == KERN_SUCCESS && (!user_unwire ||
2456		    (entry->eflags & MAP_ENTRY_USER_WIRED))) {
2457			if (user_unwire)
2458				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2459			if (entry->wired_count == 1)
2460				vm_map_entry_unwire(map, entry);
2461			else
2462				entry->wired_count--;
2463		}
2464		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2465		    ("vm_map_unwire: in-transition flag missing %p", entry));
2466		KASSERT(entry->wiring_thread == curthread,
2467		    ("vm_map_unwire: alien wire %p", entry));
2468		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
2469		entry->wiring_thread = NULL;
2470		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2471			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2472			need_wakeup = TRUE;
2473		}
2474		vm_map_simplify_entry(map, entry);
2475	}
2476	vm_map_unlock(map);
2477	if (need_wakeup)
2478		vm_map_wakeup(map);
2479	return (rv);
2480}
2481
2482/*
2483 *	vm_map_wire_entry_failure:
2484 *
2485 *	Handle a wiring failure on the given entry.
2486 *
2487 *	The map should be locked.
2488 */
2489static void
2490vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
2491    vm_offset_t failed_addr)
2492{
2493
2494	VM_MAP_ASSERT_LOCKED(map);
2495	KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
2496	    entry->wired_count == 1,
2497	    ("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
2498	KASSERT(failed_addr < entry->end,
2499	    ("vm_map_wire_entry_failure: entry %p was fully wired", entry));
2500
2501	/*
2502	 * If any pages at the start of this entry were successfully wired,
2503	 * then unwire them.
2504	 */
2505	if (failed_addr > entry->start) {
2506		pmap_unwire(map->pmap, entry->start, failed_addr);
2507		vm_object_unwire(entry->object.vm_object, entry->offset,
2508		    failed_addr - entry->start, PQ_ACTIVE);
2509	}
2510
2511	/*
2512	 * Assign an out-of-range value to represent the failure to wire this
2513	 * entry.
2514	 */
2515	entry->wired_count = -1;
2516}
2517
2518/*
2519 *	vm_map_wire:
2520 *
2521 *	Implements both kernel and user wiring.
2522 */
2523int
2524vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2525    int flags)
2526{
2527	vm_map_entry_t entry, first_entry, tmp_entry;
2528	vm_offset_t faddr, saved_end, saved_start;
2529	unsigned int last_timestamp;
2530	int rv;
2531	boolean_t need_wakeup, result, user_wire;
2532	vm_prot_t prot;
2533
2534	if (start == end)
2535		return (KERN_SUCCESS);
2536	prot = 0;
2537	if (flags & VM_MAP_WIRE_WRITE)
2538		prot |= VM_PROT_WRITE;
2539	user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2540	vm_map_lock(map);
2541	VM_MAP_RANGE_CHECK(map, start, end);
2542	if (!vm_map_lookup_entry(map, start, &first_entry)) {
2543		if (flags & VM_MAP_WIRE_HOLESOK)
2544			first_entry = first_entry->next;
2545		else {
2546			vm_map_unlock(map);
2547			return (KERN_INVALID_ADDRESS);
2548		}
2549	}
2550	last_timestamp = map->timestamp;
2551	entry = first_entry;
2552	while (entry != &map->header && entry->start < end) {
2553		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2554			/*
2555			 * We have not yet clipped the entry.
2556			 */
2557			saved_start = (start >= entry->start) ? start :
2558			    entry->start;
2559			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2560			if (vm_map_unlock_and_wait(map, 0)) {
2561				/*
2562				 * Allow interruption of user wiring?
2563				 */
2564			}
2565			vm_map_lock(map);
2566			if (last_timestamp + 1 != map->timestamp) {
2567				/*
2568				 * Look again for the entry because the map was
2569				 * modified while it was unlocked.
2570				 * Specifically, the entry may have been
2571				 * clipped, merged, or deleted.
2572				 */
2573				if (!vm_map_lookup_entry(map, saved_start,
2574				    &tmp_entry)) {
2575					if (flags & VM_MAP_WIRE_HOLESOK)
2576						tmp_entry = tmp_entry->next;
2577					else {
2578						if (saved_start == start) {
2579							/*
2580							 * first_entry has been deleted.
2581							 */
2582							vm_map_unlock(map);
2583							return (KERN_INVALID_ADDRESS);
2584						}
2585						end = saved_start;
2586						rv = KERN_INVALID_ADDRESS;
2587						goto done;
2588					}
2589				}
2590				if (entry == first_entry)
2591					first_entry = tmp_entry;
2592				else
2593					first_entry = NULL;
2594				entry = tmp_entry;
2595			}
2596			last_timestamp = map->timestamp;
2597			continue;
2598		}
2599		vm_map_clip_start(map, entry, start);
2600		vm_map_clip_end(map, entry, end);
2601		/*
2602		 * Mark the entry in case the map lock is released.  (See
2603		 * above.)
2604		 */
2605		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2606		    entry->wiring_thread == NULL,
2607		    ("owned map entry %p", entry));
2608		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2609		entry->wiring_thread = curthread;
2610		if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
2611		    || (entry->protection & prot) != prot) {
2612			entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
2613			if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
2614				end = entry->end;
2615				rv = KERN_INVALID_ADDRESS;
2616				goto done;
2617			}
2618			goto next_entry;
2619		}
2620		if (entry->wired_count == 0) {
2621			entry->wired_count++;
2622			saved_start = entry->start;
2623			saved_end = entry->end;
2624
2625			/*
2626			 * Release the map lock, relying on the in-transition
2627			 * mark.  Mark the map busy for fork.
2628			 */
2629			vm_map_busy(map);
2630			vm_map_unlock(map);
2631
2632			faddr = saved_start;
2633			do {
2634				/*
2635				 * Simulate a fault to get the page and enter
2636				 * it into the physical map.
2637				 */
2638				if ((rv = vm_fault(map, faddr, VM_PROT_NONE,
2639				    VM_FAULT_WIRE)) != KERN_SUCCESS)
2640					break;
2641			} while ((faddr += PAGE_SIZE) < saved_end);
2642			vm_map_lock(map);
2643			vm_map_unbusy(map);
2644			if (last_timestamp + 1 != map->timestamp) {
2645				/*
2646				 * Look again for the entry because the map was
2647				 * modified while it was unlocked.  The entry
2648				 * may have been clipped, but NOT merged or
2649				 * deleted.
2650				 */
2651				result = vm_map_lookup_entry(map, saved_start,
2652				    &tmp_entry);
2653				KASSERT(result, ("vm_map_wire: lookup failed"));
2654				if (entry == first_entry)
2655					first_entry = tmp_entry;
2656				else
2657					first_entry = NULL;
2658				entry = tmp_entry;
2659				while (entry->end < saved_end) {
2660					/*
2661					 * In case of failure, handle entries
2662					 * that were not fully wired here;
2663					 * fully wired entries are handled
2664					 * later.
2665					 */
2666					if (rv != KERN_SUCCESS &&
2667					    faddr < entry->end)
2668						vm_map_wire_entry_failure(map,
2669						    entry, faddr);
2670					entry = entry->next;
2671				}
2672			}
2673			last_timestamp = map->timestamp;
2674			if (rv != KERN_SUCCESS) {
2675				vm_map_wire_entry_failure(map, entry, faddr);
2676				end = entry->end;
2677				goto done;
2678			}
2679		} else if (!user_wire ||
2680			   (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2681			entry->wired_count++;
2682		}
2683		/*
2684		 * Check the map for holes in the specified region.
2685		 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2686		 */
2687	next_entry:
2688		if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2689		    (entry->end < end && (entry->next == &map->header ||
2690		    entry->next->start > entry->end))) {
2691			end = entry->end;
2692			rv = KERN_INVALID_ADDRESS;
2693			goto done;
2694		}
2695		entry = entry->next;
2696	}
2697	rv = KERN_SUCCESS;
2698done:
2699	need_wakeup = FALSE;
2700	if (first_entry == NULL) {
2701		result = vm_map_lookup_entry(map, start, &first_entry);
2702		if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2703			first_entry = first_entry->next;
2704		else
2705			KASSERT(result, ("vm_map_wire: lookup failed"));
2706	}
2707	for (entry = first_entry; entry != &map->header && entry->start < end;
2708	    entry = entry->next) {
2709		if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
2710			goto next_entry_done;
2711
2712		/*
2713		 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2714		 * space in the unwired region could have been mapped
2715		 * while the map lock was dropped for faulting in the
2716		 * pages or draining MAP_ENTRY_IN_TRANSITION.
2717		 * Moreover, another thread could be simultaneously
2718		 * wiring this new mapping entry.  Detect these cases
2719		 * and skip any entries marked as in transition by us.
2720		 */
2721		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2722		    entry->wiring_thread != curthread) {
2723			KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2724			    ("vm_map_wire: !HOLESOK and new/changed entry"));
2725			continue;
2726		}
2727
2728		if (rv == KERN_SUCCESS) {
2729			if (user_wire)
2730				entry->eflags |= MAP_ENTRY_USER_WIRED;
2731		} else if (entry->wired_count == -1) {
2732			/*
2733			 * Wiring failed on this entry.  Thus, unwiring is
2734			 * unnecessary.
2735			 */
2736			entry->wired_count = 0;
2737		} else if (!user_wire ||
2738		    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2739			/*
2740			 * Undo the wiring.  Wiring succeeded on this entry
2741			 * but failed on a later entry.
2742			 */
2743			if (entry->wired_count == 1)
2744				vm_map_entry_unwire(map, entry);
2745			else
2746				entry->wired_count--;
2747		}
2748	next_entry_done:
2749		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2750		    ("vm_map_wire: in-transition flag missing %p", entry));
2751		KASSERT(entry->wiring_thread == curthread,
2752		    ("vm_map_wire: alien wire %p", entry));
2753		entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
2754		    MAP_ENTRY_WIRE_SKIPPED);
2755		entry->wiring_thread = NULL;
2756		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
2757			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
2758			need_wakeup = TRUE;
2759		}
2760		vm_map_simplify_entry(map, entry);
2761	}
2762	vm_map_unlock(map);
2763	if (need_wakeup)
2764		vm_map_wakeup(map);
2765	return (rv);
2766}
2767
2768/*
2769 * vm_map_sync
2770 *
2771 * Push any dirty cached pages in the address range to their pager.
2772 * If syncio is TRUE, dirty pages are written synchronously.
2773 * If invalidate is TRUE, any cached pages are freed as well.
2774 *
2775 * If the size of the region from start to end is zero, we are
2776 * supposed to flush all modified pages within the region containing
2777 * start.  Unfortunately, a region can be split or coalesced with
2778 * neighboring regions, making it difficult to determine what the
2779 * original region was.  Therefore, we approximate this requirement by
2780 * flushing the current region containing start.
2781 *
2782 * Returns an error if any part of the specified range is not mapped.
2783 */
2784int
2785vm_map_sync(
2786	vm_map_t map,
2787	vm_offset_t start,
2788	vm_offset_t end,
2789	boolean_t syncio,
2790	boolean_t invalidate)
2791{
2792	vm_map_entry_t current;
2793	vm_map_entry_t entry;
2794	vm_size_t size;
2795	vm_object_t object;
2796	vm_ooffset_t offset;
2797	unsigned int last_timestamp;
2798	boolean_t failed;
2799
2800	vm_map_lock_read(map);
2801	VM_MAP_RANGE_CHECK(map, start, end);
2802	if (!vm_map_lookup_entry(map, start, &entry)) {
2803		vm_map_unlock_read(map);
2804		return (KERN_INVALID_ADDRESS);
2805	} else if (start == end) {
2806		start = entry->start;
2807		end = entry->end;
2808	}
2809	/*
2810	 * Make a first pass to check for user-wired memory and holes.
2811	 */
2812	for (current = entry; current != &map->header && current->start < end;
2813	    current = current->next) {
2814		if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
2815			vm_map_unlock_read(map);
2816			return (KERN_INVALID_ARGUMENT);
2817		}
2818		if (end > current->end &&
2819		    (current->next == &map->header ||
2820			current->end != current->next->start)) {
2821			vm_map_unlock_read(map);
2822			return (KERN_INVALID_ADDRESS);
2823		}
2824	}
2825
2826	if (invalidate)
2827		pmap_remove(map->pmap, start, end);
2828	failed = FALSE;
2829
2830	/*
2831	 * Make a second pass, cleaning/uncaching pages from the indicated
2832	 * objects as we go.
2833	 */
2834	for (current = entry; current != &map->header && current->start < end;) {
2835		offset = current->offset + (start - current->start);
2836		size = (end <= current->end ? end : current->end) - start;
2837		if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2838			vm_map_t smap;
2839			vm_map_entry_t tentry;
2840			vm_size_t tsize;
2841
2842			smap = current->object.sub_map;
2843			vm_map_lock_read(smap);
2844			(void) vm_map_lookup_entry(smap, offset, &tentry);
2845			tsize = tentry->end - offset;
2846			if (tsize < size)
2847				size = tsize;
2848			object = tentry->object.vm_object;
2849			offset = tentry->offset + (offset - tentry->start);
2850			vm_map_unlock_read(smap);
2851		} else {
2852			object = current->object.vm_object;
2853		}
2854		vm_object_reference(object);
2855		last_timestamp = map->timestamp;
2856		vm_map_unlock_read(map);
2857		if (!vm_object_sync(object, offset, size, syncio, invalidate))
2858			failed = TRUE;
2859		start += size;
2860		vm_object_deallocate(object);
2861		vm_map_lock_read(map);
2862		if (last_timestamp == map->timestamp ||
2863		    !vm_map_lookup_entry(map, start, &current))
2864			current = current->next;
2865	}
2866
2867	vm_map_unlock_read(map);
2868	return (failed ? KERN_FAILURE : KERN_SUCCESS);
2869}
2870
2871/*
2872 *	vm_map_entry_unwire:	[ internal use only ]
2873 *
2874 *	Make the region specified by this entry pageable.
2875 *
2876 *	The map in question should be locked.
2877 *	[This is the reason for this routine's existence.]
2878 */
2879static void
2880vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2881{
2882
2883	VM_MAP_ASSERT_LOCKED(map);
2884	KASSERT(entry->wired_count > 0,
2885	    ("vm_map_entry_unwire: entry %p isn't wired", entry));
2886	pmap_unwire(map->pmap, entry->start, entry->end);
2887	vm_object_unwire(entry->object.vm_object, entry->offset, entry->end -
2888	    entry->start, PQ_ACTIVE);
2889	entry->wired_count = 0;
2890}
2891
2892static void
2893vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
2894{
2895
2896	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
2897		vm_object_deallocate(entry->object.vm_object);
2898	uma_zfree(system_map ? kmapentzone : mapentzone, entry);
2899}
2900
2901/*
2902 *	vm_map_entry_delete:	[ internal use only ]
2903 *
2904 *	Deallocate the given entry from the target map.
2905 */
2906static void
2907vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
2908{
2909	vm_object_t object;
2910	vm_pindex_t offidxstart, offidxend, count, size1;
2911	vm_ooffset_t size;
2912
2913	vm_map_entry_unlink(map, entry);
2914	object = entry->object.vm_object;
2915	size = entry->end - entry->start;
2916	map->size -= size;
2917
2918	if (entry->cred != NULL) {
2919		swap_release_by_cred(size, entry->cred);
2920		crfree(entry->cred);
2921	}
2922
2923	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2924	    (object != NULL)) {
2925		KASSERT(entry->cred == NULL || object->cred == NULL ||
2926		    (entry->eflags & MAP_ENTRY_NEEDS_COPY),
2927		    ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
2928		count = OFF_TO_IDX(size);
2929		offidxstart = OFF_TO_IDX(entry->offset);
2930		offidxend = offidxstart + count;
2931		VM_OBJECT_WLOCK(object);
2932		if (object->ref_count != 1 &&
2933		    ((object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
2934		    object == kernel_object || object == kmem_object)) {
2935			vm_object_collapse(object);
2936
2937			/*
2938			 * The option OBJPR_NOTMAPPED can be passed here
2939			 * because vm_map_delete() already performed
2940			 * pmap_remove() on the only mapping to this range
2941			 * of pages.
2942			 */
2943			vm_object_page_remove(object, offidxstart, offidxend,
2944			    OBJPR_NOTMAPPED);
2945			if (object->type == OBJT_SWAP)
2946				swap_pager_freespace(object, offidxstart, count);
2947			if (offidxend >= object->size &&
2948			    offidxstart < object->size) {
2949				size1 = object->size;
2950				object->size = offidxstart;
2951				if (object->cred != NULL) {
2952					size1 -= object->size;
2953					KASSERT(object->charge >= ptoa(size1),
2954					    ("vm_map_entry_delete: object->charge < 0"));
2955					swap_release_by_cred(ptoa(size1), object->cred);
2956					object->charge -= ptoa(size1);
2957				}
2958			}
2959		}
2960		VM_OBJECT_WUNLOCK(object);
2961	} else
2962		entry->object.vm_object = NULL;
2963	if (map->system_map)
2964		vm_map_entry_deallocate(entry, TRUE);
2965	else {
2966		entry->next = curthread->td_map_def_user;
2967		curthread->td_map_def_user = entry;
2968	}
2969}
2970
2971/*
2972 *	vm_map_delete:	[ internal use only ]
2973 *
2974 *	Deallocates the given address range from the target
2975 *	map.
2976 */
2977int
2978vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
2979{
2980	vm_map_entry_t entry;
2981	vm_map_entry_t first_entry;
2982
2983	VM_MAP_ASSERT_LOCKED(map);
2984	if (start == end)
2985		return (KERN_SUCCESS);
2986
2987	/*
2988	 * Find the start of the region, and clip it
2989	 */
2990	if (!vm_map_lookup_entry(map, start, &first_entry))
2991		entry = first_entry->next;
2992	else {
2993		entry = first_entry;
2994		vm_map_clip_start(map, entry, start);
2995	}
2996
2997	/*
2998	 * Step through all entries in this region
2999	 */
3000	while ((entry != &map->header) && (entry->start < end)) {
3001		vm_map_entry_t next;
3002
3003		/*
3004		 * Wait for wiring or unwiring of an entry to complete.
3005		 * Also wait for any system wirings to disappear on
3006		 * user maps.
3007		 */
3008		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
3009		    (vm_map_pmap(map) != kernel_pmap &&
3010		    vm_map_entry_system_wired_count(entry) != 0)) {
3011			unsigned int last_timestamp;
3012			vm_offset_t saved_start;
3013			vm_map_entry_t tmp_entry;
3014
3015			saved_start = entry->start;
3016			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3017			last_timestamp = map->timestamp;
3018			(void) vm_map_unlock_and_wait(map, 0);
3019			vm_map_lock(map);
3020			if (last_timestamp + 1 != map->timestamp) {
3021				/*
3022				 * Look again for the entry because the map was
3023				 * modified while it was unlocked.
3024				 * Specifically, the entry may have been
3025				 * clipped, merged, or deleted.
3026				 */
3027				if (!vm_map_lookup_entry(map, saved_start,
3028							 &tmp_entry))
3029					entry = tmp_entry->next;
3030				else {
3031					entry = tmp_entry;
3032					vm_map_clip_start(map, entry,
3033							  saved_start);
3034				}
3035			}
3036			continue;
3037		}
3038		vm_map_clip_end(map, entry, end);
3039
3040		next = entry->next;
3041
3042		/*
3043		 * Unwire before removing addresses from the pmap; otherwise,
3044		 * unwiring will put the entries back in the pmap.
3045		 */
3046		if (entry->wired_count != 0) {
3047			vm_map_entry_unwire(map, entry);
3048		}
3049
3050		pmap_remove(map->pmap, entry->start, entry->end);
3051
3052		/*
3053		 * Delete the entry only after removing all pmap
3054		 * entries pointing to its pages.  (Otherwise, its
3055		 * page frames may be reallocated, and any modify bits
3056		 * will be set in the wrong object!)
3057		 */
3058		vm_map_entry_delete(map, entry);
3059		entry = next;
3060	}
3061	return (KERN_SUCCESS);
3062}
3063
3064/*
3065 *	vm_map_remove:
3066 *
3067 *	Remove the given address range from the target map.
3068 *	This is the exported form of vm_map_delete.
3069 */
3070int
3071vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
3072{
3073	int result;
3074
3075	vm_map_lock(map);
3076	VM_MAP_RANGE_CHECK(map, start, end);
3077	result = vm_map_delete(map, start, end);
3078	vm_map_unlock(map);
3079	return (result);
3080}
3081
3082/*
3083 *	vm_map_check_protection:
3084 *
3085 *	Assert that the target map allows the specified privilege on the
3086 *	entire address region given.  The entire region must be allocated.
3087 *
3088 *	WARNING!  This code does not and should not check whether the
3089 *	contents of the region is accessible.  For example a smaller file
3090 *	might be mapped into a larger address space.
3091 *
3092 *	NOTE!  This code is also called by munmap().
3093 *
3094 *	The map must be locked.  A read lock is sufficient.
3095 */
3096boolean_t
3097vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
3098			vm_prot_t protection)
3099{
3100	vm_map_entry_t entry;
3101	vm_map_entry_t tmp_entry;
3102
3103	if (!vm_map_lookup_entry(map, start, &tmp_entry))
3104		return (FALSE);
3105	entry = tmp_entry;
3106
3107	while (start < end) {
3108		if (entry == &map->header)
3109			return (FALSE);
3110		/*
3111		 * No holes allowed!
3112		 */
3113		if (start < entry->start)
3114			return (FALSE);
3115		/*
3116		 * Check protection associated with entry.
3117		 */
3118		if ((entry->protection & protection) != protection)
3119			return (FALSE);
3120		/* go to next entry */
3121		start = entry->end;
3122		entry = entry->next;
3123	}
3124	return (TRUE);
3125}
3126
3127/*
3128 *	vm_map_copy_entry:
3129 *
3130 *	Copies the contents of the source entry to the destination
3131 *	entry.  The entries *must* be aligned properly.
3132 */
3133static void
3134vm_map_copy_entry(
3135	vm_map_t src_map,
3136	vm_map_t dst_map,
3137	vm_map_entry_t src_entry,
3138	vm_map_entry_t dst_entry,
3139	vm_ooffset_t *fork_charge)
3140{
3141	vm_object_t src_object;
3142	vm_map_entry_t fake_entry;
3143	vm_offset_t size;
3144	struct ucred *cred;
3145	int charged;
3146
3147	VM_MAP_ASSERT_LOCKED(dst_map);
3148
3149	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
3150		return;
3151
3152	if (src_entry->wired_count == 0 ||
3153	    (src_entry->protection & VM_PROT_WRITE) == 0) {
3154		/*
3155		 * If the source entry is marked needs_copy, it is already
3156		 * write-protected.
3157		 */
3158		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
3159		    (src_entry->protection & VM_PROT_WRITE) != 0) {
3160			pmap_protect(src_map->pmap,
3161			    src_entry->start,
3162			    src_entry->end,
3163			    src_entry->protection & ~VM_PROT_WRITE);
3164		}
3165
3166		/*
3167		 * Make a copy of the object.
3168		 */
3169		size = src_entry->end - src_entry->start;
3170		if ((src_object = src_entry->object.vm_object) != NULL) {
3171			VM_OBJECT_WLOCK(src_object);
3172			charged = ENTRY_CHARGED(src_entry);
3173			if ((src_object->handle == NULL) &&
3174				(src_object->type == OBJT_DEFAULT ||
3175				 src_object->type == OBJT_SWAP)) {
3176				vm_object_collapse(src_object);
3177				if ((src_object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
3178					vm_object_split(src_entry);
3179					src_object = src_entry->object.vm_object;
3180				}
3181			}
3182			vm_object_reference_locked(src_object);
3183			vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
3184			if (src_entry->cred != NULL &&
3185			    !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
3186				KASSERT(src_object->cred == NULL,
3187				    ("OVERCOMMIT: vm_map_copy_entry: cred %p",
3188				     src_object));
3189				src_object->cred = src_entry->cred;
3190				src_object->charge = size;
3191			}
3192			VM_OBJECT_WUNLOCK(src_object);
3193			dst_entry->object.vm_object = src_object;
3194			if (charged) {
3195				cred = curthread->td_ucred;
3196				crhold(cred);
3197				dst_entry->cred = cred;
3198				*fork_charge += size;
3199				if (!(src_entry->eflags &
3200				      MAP_ENTRY_NEEDS_COPY)) {
3201					crhold(cred);
3202					src_entry->cred = cred;
3203					*fork_charge += size;
3204				}
3205			}
3206			src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
3207			dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
3208			dst_entry->offset = src_entry->offset;
3209			if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3210				/*
3211				 * MAP_ENTRY_VN_WRITECNT cannot
3212				 * indicate write reference from
3213				 * src_entry, since the entry is
3214				 * marked as needs copy.  Allocate a
3215				 * fake entry that is used to
3216				 * decrement object->un_pager.vnp.writecount
3217				 * at the appropriate time.  Attach
3218				 * fake_entry to the deferred list.
3219				 */
3220				fake_entry = vm_map_entry_create(dst_map);
3221				fake_entry->eflags = MAP_ENTRY_VN_WRITECNT;
3222				src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT;
3223				vm_object_reference(src_object);
3224				fake_entry->object.vm_object = src_object;
3225				fake_entry->start = src_entry->start;
3226				fake_entry->end = src_entry->end;
3227				fake_entry->next = curthread->td_map_def_user;
3228				curthread->td_map_def_user = fake_entry;
3229			}
3230		} else {
3231			dst_entry->object.vm_object = NULL;
3232			dst_entry->offset = 0;
3233			if (src_entry->cred != NULL) {
3234				dst_entry->cred = curthread->td_ucred;
3235				crhold(dst_entry->cred);
3236				*fork_charge += size;
3237			}
3238		}
3239
3240		pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
3241		    dst_entry->end - dst_entry->start, src_entry->start);
3242	} else {
3243		/*
3244		 * We don't want to make writeable wired pages copy-on-write.
3245		 * Immediately copy these pages into the new map by simulating
3246		 * page faults.  The new pages are pageable.
3247		 */
3248		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
3249		    fork_charge);
3250	}
3251}
3252
3253/*
3254 * vmspace_map_entry_forked:
3255 * Update the newly-forked vmspace each time a map entry is inherited
3256 * or copied.  The values for vm_dsize and vm_tsize are approximate
3257 * (and mostly-obsolete ideas in the face of mmap(2) et al.)
3258 */
3259static void
3260vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
3261    vm_map_entry_t entry)
3262{
3263	vm_size_t entrysize;
3264	vm_offset_t newend;
3265
3266	entrysize = entry->end - entry->start;
3267	vm2->vm_map.size += entrysize;
3268	if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
3269		vm2->vm_ssize += btoc(entrysize);
3270	} else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
3271	    entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
3272		newend = MIN(entry->end,
3273		    (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
3274		vm2->vm_dsize += btoc(newend - entry->start);
3275	} else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
3276	    entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
3277		newend = MIN(entry->end,
3278		    (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
3279		vm2->vm_tsize += btoc(newend - entry->start);
3280	}
3281}
3282
3283/*
3284 * vmspace_fork:
3285 * Create a new process vmspace structure and vm_map
3286 * based on those of an existing process.  The new map
3287 * is based on the old map, according to the inheritance
3288 * values on the regions in that map.
3289 *
3290 * XXX It might be worth coalescing the entries added to the new vmspace.
3291 *
3292 * The source map must not be locked.
3293 */
3294struct vmspace *
3295vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
3296{
3297	struct vmspace *vm2;
3298	vm_map_t new_map, old_map;
3299	vm_map_entry_t new_entry, old_entry;
3300	vm_object_t object;
3301	int locked;
3302
3303	old_map = &vm1->vm_map;
3304	/* Copy immutable fields of vm1 to vm2. */
3305	vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, NULL);
3306	if (vm2 == NULL)
3307		return (NULL);
3308	vm2->vm_taddr = vm1->vm_taddr;
3309	vm2->vm_daddr = vm1->vm_daddr;
3310	vm2->vm_maxsaddr = vm1->vm_maxsaddr;
3311	vm_map_lock(old_map);
3312	if (old_map->busy)
3313		vm_map_wait_busy(old_map);
3314	new_map = &vm2->vm_map;
3315	locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
3316	KASSERT(locked, ("vmspace_fork: lock failed"));
3317
3318	old_entry = old_map->header.next;
3319
3320	while (old_entry != &old_map->header) {
3321		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3322			panic("vm_map_fork: encountered a submap");
3323
3324		switch (old_entry->inheritance) {
3325		case VM_INHERIT_NONE:
3326			break;
3327
3328		case VM_INHERIT_SHARE:
3329			/*
3330			 * Clone the entry, creating the shared object if necessary.
3331			 */
3332			object = old_entry->object.vm_object;
3333			if (object == NULL) {
3334				object = vm_object_allocate(OBJT_DEFAULT,
3335					atop(old_entry->end - old_entry->start));
3336				old_entry->object.vm_object = object;
3337				old_entry->offset = 0;
3338				if (old_entry->cred != NULL) {
3339					object->cred = old_entry->cred;
3340					object->charge = old_entry->end -
3341					    old_entry->start;
3342					old_entry->cred = NULL;
3343				}
3344			}
3345
3346			/*
3347			 * Add the reference before calling vm_object_shadow
3348			 * to insure that a shadow object is created.
3349			 */
3350			vm_object_reference(object);
3351			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3352				vm_object_shadow(&old_entry->object.vm_object,
3353				    &old_entry->offset,
3354				    old_entry->end - old_entry->start);
3355				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3356				/* Transfer the second reference too. */
3357				vm_object_reference(
3358				    old_entry->object.vm_object);
3359
3360				/*
3361				 * As in vm_map_simplify_entry(), the
3362				 * vnode lock will not be acquired in
3363				 * this call to vm_object_deallocate().
3364				 */
3365				vm_object_deallocate(object);
3366				object = old_entry->object.vm_object;
3367			}
3368			VM_OBJECT_WLOCK(object);
3369			vm_object_clear_flag(object, OBJ_ONEMAPPING);
3370			if (old_entry->cred != NULL) {
3371				KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
3372				object->cred = old_entry->cred;
3373				object->charge = old_entry->end - old_entry->start;
3374				old_entry->cred = NULL;
3375			}
3376
3377			/*
3378			 * Assert the correct state of the vnode
3379			 * v_writecount while the object is locked, to
3380			 * not relock it later for the assertion
3381			 * correctness.
3382			 */
3383			if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT &&
3384			    object->type == OBJT_VNODE) {
3385				KASSERT(((struct vnode *)object->handle)->
3386				    v_writecount > 0,
3387				    ("vmspace_fork: v_writecount %p", object));
3388				KASSERT(object->un_pager.vnp.writemappings > 0,
3389				    ("vmspace_fork: vnp.writecount %p",
3390				    object));
3391			}
3392			VM_OBJECT_WUNLOCK(object);
3393
3394			/*
3395			 * Clone the entry, referencing the shared object.
3396			 */
3397			new_entry = vm_map_entry_create(new_map);
3398			*new_entry = *old_entry;
3399			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3400			    MAP_ENTRY_IN_TRANSITION);
3401			new_entry->wiring_thread = NULL;
3402			new_entry->wired_count = 0;
3403			if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3404				vnode_pager_update_writecount(object,
3405				    new_entry->start, new_entry->end);
3406			}
3407
3408			/*
3409			 * Insert the entry into the new map -- we know we're
3410			 * inserting at the end of the new map.
3411			 */
3412			vm_map_entry_link(new_map, new_map->header.prev,
3413			    new_entry);
3414			vmspace_map_entry_forked(vm1, vm2, new_entry);
3415
3416			/*
3417			 * Update the physical map
3418			 */
3419			pmap_copy(new_map->pmap, old_map->pmap,
3420			    new_entry->start,
3421			    (old_entry->end - old_entry->start),
3422			    old_entry->start);
3423			break;
3424
3425		case VM_INHERIT_COPY:
3426			/*
3427			 * Clone the entry and link into the map.
3428			 */
3429			new_entry = vm_map_entry_create(new_map);
3430			*new_entry = *old_entry;
3431			/*
3432			 * Copied entry is COW over the old object.
3433			 */
3434			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3435			    MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT);
3436			new_entry->wiring_thread = NULL;
3437			new_entry->wired_count = 0;
3438			new_entry->object.vm_object = NULL;
3439			new_entry->cred = NULL;
3440			vm_map_entry_link(new_map, new_map->header.prev,
3441			    new_entry);
3442			vmspace_map_entry_forked(vm1, vm2, new_entry);
3443			vm_map_copy_entry(old_map, new_map, old_entry,
3444			    new_entry, fork_charge);
3445			break;
3446		}
3447		old_entry = old_entry->next;
3448	}
3449	/*
3450	 * Use inlined vm_map_unlock() to postpone handling the deferred
3451	 * map entries, which cannot be done until both old_map and
3452	 * new_map locks are released.
3453	 */
3454	sx_xunlock(&old_map->lock);
3455	sx_xunlock(&new_map->lock);
3456	vm_map_process_deferred();
3457
3458	return (vm2);
3459}
3460
3461int
3462vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3463    vm_prot_t prot, vm_prot_t max, int cow)
3464{
3465	vm_size_t growsize, init_ssize;
3466	rlim_t lmemlim, vmemlim;
3467	int rv;
3468
3469	growsize = sgrowsiz;
3470	init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
3471	vm_map_lock(map);
3472	lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK);
3473	vmemlim = lim_cur(curthread, RLIMIT_VMEM);
3474	if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3475		if (ptoa(pmap_wired_count(map->pmap)) + init_ssize > lmemlim) {
3476			rv = KERN_NO_SPACE;
3477			goto out;
3478		}
3479	}
3480	/* If we would blow our VMEM resource limit, no go */
3481	if (map->size + init_ssize > vmemlim) {
3482		rv = KERN_NO_SPACE;
3483		goto out;
3484	}
3485	rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
3486	    max, cow);
3487out:
3488	vm_map_unlock(map);
3489	return (rv);
3490}
3491
3492static int
3493vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3494    vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
3495{
3496	vm_map_entry_t new_entry, prev_entry;
3497	vm_offset_t bot, top;
3498	vm_size_t init_ssize;
3499	int orient, rv;
3500
3501	/*
3502	 * The stack orientation is piggybacked with the cow argument.
3503	 * Extract it into orient and mask the cow argument so that we
3504	 * don't pass it around further.
3505	 * NOTE: We explicitly allow bi-directional stacks.
3506	 */
3507	orient = cow & (MAP_STACK_GROWS_DOWN|MAP_STACK_GROWS_UP);
3508	KASSERT(orient != 0, ("No stack grow direction"));
3509
3510	if (addrbos < vm_map_min(map) ||
3511	    addrbos > vm_map_max(map) ||
3512	    addrbos + max_ssize < addrbos)
3513		return (KERN_NO_SPACE);
3514
3515	init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
3516
3517	/* If addr is already mapped, no go */
3518	if (vm_map_lookup_entry(map, addrbos, &prev_entry))
3519		return (KERN_NO_SPACE);
3520
3521	/*
3522	 * If we can't accommodate max_ssize in the current mapping, no go.
3523	 * However, we need to be aware that subsequent user mappings might
3524	 * map into the space we have reserved for stack, and currently this
3525	 * space is not protected.
3526	 *
3527	 * Hopefully we will at least detect this condition when we try to
3528	 * grow the stack.
3529	 */
3530	if ((prev_entry->next != &map->header) &&
3531	    (prev_entry->next->start < addrbos + max_ssize))
3532		return (KERN_NO_SPACE);
3533
3534	/*
3535	 * We initially map a stack of only init_ssize.  We will grow as
3536	 * needed later.  Depending on the orientation of the stack (i.e.
3537	 * the grow direction) we either map at the top of the range, the
3538	 * bottom of the range or in the middle.
3539	 *
3540	 * Note: we would normally expect prot and max to be VM_PROT_ALL,
3541	 * and cow to be 0.  Possibly we should eliminate these as input
3542	 * parameters, and just pass these values here in the insert call.
3543	 */
3544	if (orient == MAP_STACK_GROWS_DOWN)
3545		bot = addrbos + max_ssize - init_ssize;
3546	else if (orient == MAP_STACK_GROWS_UP)
3547		bot = addrbos;
3548	else
3549		bot = round_page(addrbos + max_ssize/2 - init_ssize/2);
3550	top = bot + init_ssize;
3551	rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
3552
3553	/* Now set the avail_ssize amount. */
3554	if (rv == KERN_SUCCESS) {
3555		new_entry = prev_entry->next;
3556		if (new_entry->end != top || new_entry->start != bot)
3557			panic("Bad entry start/end for new stack entry");
3558
3559		new_entry->avail_ssize = max_ssize - init_ssize;
3560		KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 ||
3561		    (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0,
3562		    ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
3563		KASSERT((orient & MAP_STACK_GROWS_UP) == 0 ||
3564		    (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0,
3565		    ("new entry lacks MAP_ENTRY_GROWS_UP"));
3566	}
3567
3568	return (rv);
3569}
3570
3571static int stack_guard_page = 0;
3572SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN,
3573    &stack_guard_page, 0,
3574    "Insert stack guard page ahead of the growable segments.");
3575
3576/* Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if the
3577 * desired address is already mapped, or if we successfully grow
3578 * the stack.  Also returns KERN_SUCCESS if addr is outside the
3579 * stack range (this is strange, but preserves compatibility with
3580 * the grow function in vm_machdep.c).
3581 */
3582int
3583vm_map_growstack(struct proc *p, vm_offset_t addr)
3584{
3585	vm_map_entry_t next_entry, prev_entry;
3586	vm_map_entry_t new_entry, stack_entry;
3587	struct vmspace *vm = p->p_vmspace;
3588	vm_map_t map = &vm->vm_map;
3589	vm_offset_t end;
3590	vm_size_t growsize;
3591	size_t grow_amount, max_grow;
3592	rlim_t lmemlim, stacklim, vmemlim;
3593	int is_procstack, rv;
3594	struct ucred *cred;
3595#ifdef notyet
3596	uint64_t limit;
3597#endif
3598#ifdef RACCT
3599	int error;
3600#endif
3601
3602	lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK);
3603	stacklim = lim_cur(curthread, RLIMIT_STACK);
3604	vmemlim = lim_cur(curthread, RLIMIT_VMEM);
3605Retry:
3606
3607	vm_map_lock_read(map);
3608
3609	/* If addr is already in the entry range, no need to grow.*/
3610	if (vm_map_lookup_entry(map, addr, &prev_entry)) {
3611		vm_map_unlock_read(map);
3612		return (KERN_SUCCESS);
3613	}
3614
3615	next_entry = prev_entry->next;
3616	if (!(prev_entry->eflags & MAP_ENTRY_GROWS_UP)) {
3617		/*
3618		 * This entry does not grow upwards. Since the address lies
3619		 * beyond this entry, the next entry (if one exists) has to
3620		 * be a downward growable entry. The entry list header is
3621		 * never a growable entry, so it suffices to check the flags.
3622		 */
3623		if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) {
3624			vm_map_unlock_read(map);
3625			return (KERN_SUCCESS);
3626		}
3627		stack_entry = next_entry;
3628	} else {
3629		/*
3630		 * This entry grows upward. If the next entry does not at
3631		 * least grow downwards, this is the entry we need to grow.
3632		 * otherwise we have two possible choices and we have to
3633		 * select one.
3634		 */
3635		if (next_entry->eflags & MAP_ENTRY_GROWS_DOWN) {
3636			/*
3637			 * We have two choices; grow the entry closest to
3638			 * the address to minimize the amount of growth.
3639			 */
3640			if (addr - prev_entry->end <= next_entry->start - addr)
3641				stack_entry = prev_entry;
3642			else
3643				stack_entry = next_entry;
3644		} else
3645			stack_entry = prev_entry;
3646	}
3647
3648	if (stack_entry == next_entry) {
3649		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_DOWN, ("foo"));
3650		KASSERT(addr < stack_entry->start, ("foo"));
3651		end = (prev_entry != &map->header) ? prev_entry->end :
3652		    stack_entry->start - stack_entry->avail_ssize;
3653		grow_amount = roundup(stack_entry->start - addr, PAGE_SIZE);
3654		max_grow = stack_entry->start - end;
3655	} else {
3656		KASSERT(stack_entry->eflags & MAP_ENTRY_GROWS_UP, ("foo"));
3657		KASSERT(addr >= stack_entry->end, ("foo"));
3658		end = (next_entry != &map->header) ? next_entry->start :
3659		    stack_entry->end + stack_entry->avail_ssize;
3660		grow_amount = roundup(addr + 1 - stack_entry->end, PAGE_SIZE);
3661		max_grow = end - stack_entry->end;
3662	}
3663
3664	if (grow_amount > stack_entry->avail_ssize) {
3665		vm_map_unlock_read(map);
3666		return (KERN_NO_SPACE);
3667	}
3668
3669	/*
3670	 * If there is no longer enough space between the entries nogo, and
3671	 * adjust the available space.  Note: this  should only happen if the
3672	 * user has mapped into the stack area after the stack was created,
3673	 * and is probably an error.
3674	 *
3675	 * This also effectively destroys any guard page the user might have
3676	 * intended by limiting the stack size.
3677	 */
3678	if (grow_amount + (stack_guard_page ? PAGE_SIZE : 0) > max_grow) {
3679		if (vm_map_lock_upgrade(map))
3680			goto Retry;
3681
3682		stack_entry->avail_ssize = max_grow;
3683
3684		vm_map_unlock(map);
3685		return (KERN_NO_SPACE);
3686	}
3687
3688	is_procstack = (addr >= (vm_offset_t)vm->vm_maxsaddr &&
3689	    addr < (vm_offset_t)p->p_sysent->sv_usrstack) ? 1 : 0;
3690
3691	/*
3692	 * If this is the main process stack, see if we're over the stack
3693	 * limit.
3694	 */
3695	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3696		vm_map_unlock_read(map);
3697		return (KERN_NO_SPACE);
3698	}
3699#ifdef RACCT
3700	if (racct_enable) {
3701		PROC_LOCK(p);
3702		if (is_procstack && racct_set(p, RACCT_STACK,
3703		    ctob(vm->vm_ssize) + grow_amount)) {
3704			PROC_UNLOCK(p);
3705			vm_map_unlock_read(map);
3706			return (KERN_NO_SPACE);
3707		}
3708		PROC_UNLOCK(p);
3709	}
3710#endif
3711
3712	/* Round up the grow amount modulo sgrowsiz */
3713	growsize = sgrowsiz;
3714	grow_amount = roundup(grow_amount, growsize);
3715	if (grow_amount > stack_entry->avail_ssize)
3716		grow_amount = stack_entry->avail_ssize;
3717	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
3718		grow_amount = trunc_page((vm_size_t)stacklim) -
3719		    ctob(vm->vm_ssize);
3720	}
3721#ifdef notyet
3722	PROC_LOCK(p);
3723	limit = racct_get_available(p, RACCT_STACK);
3724	PROC_UNLOCK(p);
3725	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
3726		grow_amount = limit - ctob(vm->vm_ssize);
3727#endif
3728	if (!old_mlock && map->flags & MAP_WIREFUTURE) {
3729		if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
3730			vm_map_unlock_read(map);
3731			rv = KERN_NO_SPACE;
3732			goto out;
3733		}
3734#ifdef RACCT
3735		if (racct_enable) {
3736			PROC_LOCK(p);
3737			if (racct_set(p, RACCT_MEMLOCK,
3738			    ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
3739				PROC_UNLOCK(p);
3740				vm_map_unlock_read(map);
3741				rv = KERN_NO_SPACE;
3742				goto out;
3743			}
3744			PROC_UNLOCK(p);
3745		}
3746#endif
3747	}
3748	/* If we would blow our VMEM resource limit, no go */
3749	if (map->size + grow_amount > vmemlim) {
3750		vm_map_unlock_read(map);
3751		rv = KERN_NO_SPACE;
3752		goto out;
3753	}
3754#ifdef RACCT
3755	if (racct_enable) {
3756		PROC_LOCK(p);
3757		if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
3758			PROC_UNLOCK(p);
3759			vm_map_unlock_read(map);
3760			rv = KERN_NO_SPACE;
3761			goto out;
3762		}
3763		PROC_UNLOCK(p);
3764	}
3765#endif
3766
3767	if (vm_map_lock_upgrade(map))
3768		goto Retry;
3769
3770	if (stack_entry == next_entry) {
3771		/*
3772		 * Growing downward.
3773		 */
3774		/* Get the preliminary new entry start value */
3775		addr = stack_entry->start - grow_amount;
3776
3777		/*
3778		 * If this puts us into the previous entry, cut back our
3779		 * growth to the available space. Also, see the note above.
3780		 */
3781		if (addr < end) {
3782			stack_entry->avail_ssize = max_grow;
3783			addr = end;
3784			if (stack_guard_page)
3785				addr += PAGE_SIZE;
3786		}
3787
3788		rv = vm_map_insert(map, NULL, 0, addr, stack_entry->start,
3789		    next_entry->protection, next_entry->max_protection,
3790		    MAP_STACK_GROWS_DOWN);
3791
3792		/* Adjust the available stack space by the amount we grew. */
3793		if (rv == KERN_SUCCESS) {
3794			new_entry = prev_entry->next;
3795			KASSERT(new_entry == stack_entry->prev, ("foo"));
3796			KASSERT(new_entry->end == stack_entry->start, ("foo"));
3797			KASSERT(new_entry->start == addr, ("foo"));
3798			KASSERT((new_entry->eflags & MAP_ENTRY_GROWS_DOWN) !=
3799			    0, ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
3800			grow_amount = new_entry->end - new_entry->start;
3801			new_entry->avail_ssize = stack_entry->avail_ssize -
3802			    grow_amount;
3803			stack_entry->eflags &= ~MAP_ENTRY_GROWS_DOWN;
3804		}
3805	} else {
3806		/*
3807		 * Growing upward.
3808		 */
3809		addr = stack_entry->end + grow_amount;
3810
3811		/*
3812		 * If this puts us into the next entry, cut back our growth
3813		 * to the available space. Also, see the note above.
3814		 */
3815		if (addr > end) {
3816			stack_entry->avail_ssize = end - stack_entry->end;
3817			addr = end;
3818			if (stack_guard_page)
3819				addr -= PAGE_SIZE;
3820		}
3821
3822		grow_amount = addr - stack_entry->end;
3823		cred = stack_entry->cred;
3824		if (cred == NULL && stack_entry->object.vm_object != NULL)
3825			cred = stack_entry->object.vm_object->cred;
3826		if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
3827			rv = KERN_NO_SPACE;
3828		/* Grow the underlying object if applicable. */
3829		else if (stack_entry->object.vm_object == NULL ||
3830			 vm_object_coalesce(stack_entry->object.vm_object,
3831			 stack_entry->offset,
3832			 (vm_size_t)(stack_entry->end - stack_entry->start),
3833			 (vm_size_t)grow_amount, cred != NULL)) {
3834			map->size += (addr - stack_entry->end);
3835			/* Update the current entry. */
3836			stack_entry->end = addr;
3837			stack_entry->avail_ssize -= grow_amount;
3838			vm_map_entry_resize_free(map, stack_entry);
3839			rv = KERN_SUCCESS;
3840		} else
3841			rv = KERN_FAILURE;
3842	}
3843
3844	if (rv == KERN_SUCCESS && is_procstack)
3845		vm->vm_ssize += btoc(grow_amount);
3846
3847	vm_map_unlock(map);
3848
3849	/*
3850	 * Heed the MAP_WIREFUTURE flag if it was set for this process.
3851	 */
3852	if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE)) {
3853		vm_map_wire(map,
3854		    (stack_entry == next_entry) ? addr : addr - grow_amount,
3855		    (stack_entry == next_entry) ? stack_entry->start : addr,
3856		    (p->p_flag & P_SYSTEM)
3857		    ? VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES
3858		    : VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
3859	}
3860
3861out:
3862#ifdef RACCT
3863	if (racct_enable && rv != KERN_SUCCESS) {
3864		PROC_LOCK(p);
3865		error = racct_set(p, RACCT_VMEM, map->size);
3866		KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
3867		if (!old_mlock) {
3868			error = racct_set(p, RACCT_MEMLOCK,
3869			    ptoa(pmap_wired_count(map->pmap)));
3870			KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
3871		}
3872	    	error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
3873		KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
3874		PROC_UNLOCK(p);
3875	}
3876#endif
3877
3878	return (rv);
3879}
3880
3881/*
3882 * Unshare the specified VM space for exec.  If other processes are
3883 * mapped to it, then create a new one.  The new vmspace is null.
3884 */
3885int
3886vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
3887{
3888	struct vmspace *oldvmspace = p->p_vmspace;
3889	struct vmspace *newvmspace;
3890
3891	KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
3892	    ("vmspace_exec recursed"));
3893	newvmspace = vmspace_alloc(minuser, maxuser, NULL);
3894	if (newvmspace == NULL)
3895		return (ENOMEM);
3896	newvmspace->vm_swrss = oldvmspace->vm_swrss;
3897	/*
3898	 * This code is written like this for prototype purposes.  The
3899	 * goal is to avoid running down the vmspace here, but let the
3900	 * other process's that are still using the vmspace to finally
3901	 * run it down.  Even though there is little or no chance of blocking
3902	 * here, it is a good idea to keep this form for future mods.
3903	 */
3904	PROC_VMSPACE_LOCK(p);
3905	p->p_vmspace = newvmspace;
3906	PROC_VMSPACE_UNLOCK(p);
3907	if (p == curthread->td_proc)
3908		pmap_activate(curthread);
3909	curthread->td_pflags |= TDP_EXECVMSPC;
3910	return (0);
3911}
3912
3913/*
3914 * Unshare the specified VM space for forcing COW.  This
3915 * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3916 */
3917int
3918vmspace_unshare(struct proc *p)
3919{
3920	struct vmspace *oldvmspace = p->p_vmspace;
3921	struct vmspace *newvmspace;
3922	vm_ooffset_t fork_charge;
3923
3924	if (oldvmspace->vm_refcnt == 1)
3925		return (0);
3926	fork_charge = 0;
3927	newvmspace = vmspace_fork(oldvmspace, &fork_charge);
3928	if (newvmspace == NULL)
3929		return (ENOMEM);
3930	if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
3931		vmspace_free(newvmspace);
3932		return (ENOMEM);
3933	}
3934	PROC_VMSPACE_LOCK(p);
3935	p->p_vmspace = newvmspace;
3936	PROC_VMSPACE_UNLOCK(p);
3937	if (p == curthread->td_proc)
3938		pmap_activate(curthread);
3939	vmspace_free(oldvmspace);
3940	return (0);
3941}
3942
3943/*
3944 *	vm_map_lookup:
3945 *
3946 *	Finds the VM object, offset, and
3947 *	protection for a given virtual address in the
3948 *	specified map, assuming a page fault of the
3949 *	type specified.
3950 *
3951 *	Leaves the map in question locked for read; return
3952 *	values are guaranteed until a vm_map_lookup_done
3953 *	call is performed.  Note that the map argument
3954 *	is in/out; the returned map must be used in
3955 *	the call to vm_map_lookup_done.
3956 *
3957 *	A handle (out_entry) is returned for use in
3958 *	vm_map_lookup_done, to make that fast.
3959 *
3960 *	If a lookup is requested with "write protection"
3961 *	specified, the map may be changed to perform virtual
3962 *	copying operations, although the data referenced will
3963 *	remain the same.
3964 */
3965int
3966vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
3967	      vm_offset_t vaddr,
3968	      vm_prot_t fault_typea,
3969	      vm_map_entry_t *out_entry,	/* OUT */
3970	      vm_object_t *object,		/* OUT */
3971	      vm_pindex_t *pindex,		/* OUT */
3972	      vm_prot_t *out_prot,		/* OUT */
3973	      boolean_t *wired)			/* OUT */
3974{
3975	vm_map_entry_t entry;
3976	vm_map_t map = *var_map;
3977	vm_prot_t prot;
3978	vm_prot_t fault_type = fault_typea;
3979	vm_object_t eobject;
3980	vm_size_t size;
3981	struct ucred *cred;
3982
3983RetryLookup:;
3984
3985	vm_map_lock_read(map);
3986
3987	/*
3988	 * Lookup the faulting address.
3989	 */
3990	if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
3991		vm_map_unlock_read(map);
3992		return (KERN_INVALID_ADDRESS);
3993	}
3994
3995	entry = *out_entry;
3996
3997	/*
3998	 * Handle submaps.
3999	 */
4000	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4001		vm_map_t old_map = map;
4002
4003		*var_map = map = entry->object.sub_map;
4004		vm_map_unlock_read(old_map);
4005		goto RetryLookup;
4006	}
4007
4008	/*
4009	 * Check whether this task is allowed to have this page.
4010	 */
4011	prot = entry->protection;
4012	fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
4013	if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
4014		vm_map_unlock_read(map);
4015		return (KERN_PROTECTION_FAILURE);
4016	}
4017	KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
4018	    (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) !=
4019	    (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY),
4020	    ("entry %p flags %x", entry, entry->eflags));
4021	if ((fault_typea & VM_PROT_COPY) != 0 &&
4022	    (entry->max_protection & VM_PROT_WRITE) == 0 &&
4023	    (entry->eflags & MAP_ENTRY_COW) == 0) {
4024		vm_map_unlock_read(map);
4025		return (KERN_PROTECTION_FAILURE);
4026	}
4027
4028	/*
4029	 * If this page is not pageable, we have to get it for all possible
4030	 * accesses.
4031	 */
4032	*wired = (entry->wired_count != 0);
4033	if (*wired)
4034		fault_type = entry->protection;
4035	size = entry->end - entry->start;
4036	/*
4037	 * If the entry was copy-on-write, we either ...
4038	 */
4039	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4040		/*
4041		 * If we want to write the page, we may as well handle that
4042		 * now since we've got the map locked.
4043		 *
4044		 * If we don't need to write the page, we just demote the
4045		 * permissions allowed.
4046		 */
4047		if ((fault_type & VM_PROT_WRITE) != 0 ||
4048		    (fault_typea & VM_PROT_COPY) != 0) {
4049			/*
4050			 * Make a new object, and place it in the object
4051			 * chain.  Note that no new references have appeared
4052			 * -- one just moved from the map to the new
4053			 * object.
4054			 */
4055			if (vm_map_lock_upgrade(map))
4056				goto RetryLookup;
4057
4058			if (entry->cred == NULL) {
4059				/*
4060				 * The debugger owner is charged for
4061				 * the memory.
4062				 */
4063				cred = curthread->td_ucred;
4064				crhold(cred);
4065				if (!swap_reserve_by_cred(size, cred)) {
4066					crfree(cred);
4067					vm_map_unlock(map);
4068					return (KERN_RESOURCE_SHORTAGE);
4069				}
4070				entry->cred = cred;
4071			}
4072			vm_object_shadow(&entry->object.vm_object,
4073			    &entry->offset, size);
4074			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
4075			eobject = entry->object.vm_object;
4076			if (eobject->cred != NULL) {
4077				/*
4078				 * The object was not shadowed.
4079				 */
4080				swap_release_by_cred(size, entry->cred);
4081				crfree(entry->cred);
4082				entry->cred = NULL;
4083			} else if (entry->cred != NULL) {
4084				VM_OBJECT_WLOCK(eobject);
4085				eobject->cred = entry->cred;
4086				eobject->charge = size;
4087				VM_OBJECT_WUNLOCK(eobject);
4088				entry->cred = NULL;
4089			}
4090
4091			vm_map_lock_downgrade(map);
4092		} else {
4093			/*
4094			 * We're attempting to read a copy-on-write page --
4095			 * don't allow writes.
4096			 */
4097			prot &= ~VM_PROT_WRITE;
4098		}
4099	}
4100
4101	/*
4102	 * Create an object if necessary.
4103	 */
4104	if (entry->object.vm_object == NULL &&
4105	    !map->system_map) {
4106		if (vm_map_lock_upgrade(map))
4107			goto RetryLookup;
4108		entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
4109		    atop(size));
4110		entry->offset = 0;
4111		if (entry->cred != NULL) {
4112			VM_OBJECT_WLOCK(entry->object.vm_object);
4113			entry->object.vm_object->cred = entry->cred;
4114			entry->object.vm_object->charge = size;
4115			VM_OBJECT_WUNLOCK(entry->object.vm_object);
4116			entry->cred = NULL;
4117		}
4118		vm_map_lock_downgrade(map);
4119	}
4120
4121	/*
4122	 * Return the object/offset from this entry.  If the entry was
4123	 * copy-on-write or empty, it has been fixed up.
4124	 */
4125	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4126	*object = entry->object.vm_object;
4127
4128	*out_prot = prot;
4129	return (KERN_SUCCESS);
4130}
4131
4132/*
4133 *	vm_map_lookup_locked:
4134 *
4135 *	Lookup the faulting address.  A version of vm_map_lookup that returns
4136 *      KERN_FAILURE instead of blocking on map lock or memory allocation.
4137 */
4138int
4139vm_map_lookup_locked(vm_map_t *var_map,		/* IN/OUT */
4140		     vm_offset_t vaddr,
4141		     vm_prot_t fault_typea,
4142		     vm_map_entry_t *out_entry,	/* OUT */
4143		     vm_object_t *object,	/* OUT */
4144		     vm_pindex_t *pindex,	/* OUT */
4145		     vm_prot_t *out_prot,	/* OUT */
4146		     boolean_t *wired)		/* OUT */
4147{
4148	vm_map_entry_t entry;
4149	vm_map_t map = *var_map;
4150	vm_prot_t prot;
4151	vm_prot_t fault_type = fault_typea;
4152
4153	/*
4154	 * Lookup the faulting address.
4155	 */
4156	if (!vm_map_lookup_entry(map, vaddr, out_entry))
4157		return (KERN_INVALID_ADDRESS);
4158
4159	entry = *out_entry;
4160
4161	/*
4162	 * Fail if the entry refers to a submap.
4163	 */
4164	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
4165		return (KERN_FAILURE);
4166
4167	/*
4168	 * Check whether this task is allowed to have this page.
4169	 */
4170	prot = entry->protection;
4171	fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
4172	if ((fault_type & prot) != fault_type)
4173		return (KERN_PROTECTION_FAILURE);
4174
4175	/*
4176	 * If this page is not pageable, we have to get it for all possible
4177	 * accesses.
4178	 */
4179	*wired = (entry->wired_count != 0);
4180	if (*wired)
4181		fault_type = entry->protection;
4182
4183	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4184		/*
4185		 * Fail if the entry was copy-on-write for a write fault.
4186		 */
4187		if (fault_type & VM_PROT_WRITE)
4188			return (KERN_FAILURE);
4189		/*
4190		 * We're attempting to read a copy-on-write page --
4191		 * don't allow writes.
4192		 */
4193		prot &= ~VM_PROT_WRITE;
4194	}
4195
4196	/*
4197	 * Fail if an object should be created.
4198	 */
4199	if (entry->object.vm_object == NULL && !map->system_map)
4200		return (KERN_FAILURE);
4201
4202	/*
4203	 * Return the object/offset from this entry.  If the entry was
4204	 * copy-on-write or empty, it has been fixed up.
4205	 */
4206	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4207	*object = entry->object.vm_object;
4208
4209	*out_prot = prot;
4210	return (KERN_SUCCESS);
4211}
4212
4213/*
4214 *	vm_map_lookup_done:
4215 *
4216 *	Releases locks acquired by a vm_map_lookup
4217 *	(according to the handle returned by that lookup).
4218 */
4219void
4220vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
4221{
4222	/*
4223	 * Unlock the main-level map
4224	 */
4225	vm_map_unlock_read(map);
4226}
4227
4228#include "opt_ddb.h"
4229#ifdef DDB
4230#include <sys/kernel.h>
4231
4232#include <ddb/ddb.h>
4233
4234static void
4235vm_map_print(vm_map_t map)
4236{
4237	vm_map_entry_t entry;
4238
4239	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4240	    (void *)map,
4241	    (void *)map->pmap, map->nentries, map->timestamp);
4242
4243	db_indent += 2;
4244	for (entry = map->header.next; entry != &map->header;
4245	    entry = entry->next) {
4246		db_iprintf("map entry %p: start=%p, end=%p\n",
4247		    (void *)entry, (void *)entry->start, (void *)entry->end);
4248		{
4249			static char *inheritance_name[4] =
4250			{"share", "copy", "none", "donate_copy"};
4251
4252			db_iprintf(" prot=%x/%x/%s",
4253			    entry->protection,
4254			    entry->max_protection,
4255			    inheritance_name[(int)(unsigned char)entry->inheritance]);
4256			if (entry->wired_count != 0)
4257				db_printf(", wired");
4258		}
4259		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4260			db_printf(", share=%p, offset=0x%jx\n",
4261			    (void *)entry->object.sub_map,
4262			    (uintmax_t)entry->offset);
4263			if ((entry->prev == &map->header) ||
4264			    (entry->prev->object.sub_map !=
4265				entry->object.sub_map)) {
4266				db_indent += 2;
4267				vm_map_print((vm_map_t)entry->object.sub_map);
4268				db_indent -= 2;
4269			}
4270		} else {
4271			if (entry->cred != NULL)
4272				db_printf(", ruid %d", entry->cred->cr_ruid);
4273			db_printf(", object=%p, offset=0x%jx",
4274			    (void *)entry->object.vm_object,
4275			    (uintmax_t)entry->offset);
4276			if (entry->object.vm_object && entry->object.vm_object->cred)
4277				db_printf(", obj ruid %d charge %jx",
4278				    entry->object.vm_object->cred->cr_ruid,
4279				    (uintmax_t)entry->object.vm_object->charge);
4280			if (entry->eflags & MAP_ENTRY_COW)
4281				db_printf(", copy (%s)",
4282				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4283			db_printf("\n");
4284
4285			if ((entry->prev == &map->header) ||
4286			    (entry->prev->object.vm_object !=
4287				entry->object.vm_object)) {
4288				db_indent += 2;
4289				vm_object_print((db_expr_t)(intptr_t)
4290						entry->object.vm_object,
4291						0, 0, (char *)0);
4292				db_indent -= 2;
4293			}
4294		}
4295	}
4296	db_indent -= 2;
4297}
4298
4299DB_SHOW_COMMAND(map, map)
4300{
4301
4302	if (!have_addr) {
4303		db_printf("usage: show map <addr>\n");
4304		return;
4305	}
4306	vm_map_print((vm_map_t)addr);
4307}
4308
4309DB_SHOW_COMMAND(procvm, procvm)
4310{
4311	struct proc *p;
4312
4313	if (have_addr) {
4314		p = (struct proc *) addr;
4315	} else {
4316		p = curproc;
4317	}
4318
4319	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4320	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4321	    (void *)vmspace_pmap(p->p_vmspace));
4322
4323	vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
4324}
4325
4326#endif /* DDB */
4327